direct-io.hg

changeset 13446:80c5b5914b79

merge with ia64 sn2+machvec tree
author awilliam@xenbuild2.aw
date Wed Dec 20 14:55:02 2006 -0700 (2006-12-20)
parents 46c44b5e6a1b 37309be26861
children a50fd1fed61e
files xen/include/asm-ia64/linux-null/linux/ioport.h xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h xen/include/asm-ia64/linux/asm/machvec.h xen/include/asm-ia64/linux/asm/pci.h
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/Makefile	Wed Dec 20 08:53:42 2006 -0700
     1.2 +++ b/xen/arch/ia64/linux-xen/Makefile	Wed Dec 20 14:55:02 2006 -0700
     1.3 @@ -1,3 +1,5 @@
     1.4 +subdir-y += sn
     1.5 +
     1.6  obj-y += efi.o
     1.7  obj-y += entry.o
     1.8  obj-y += irq_ia64.o
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/xen/arch/ia64/linux-xen/sn/Makefile	Wed Dec 20 14:55:02 2006 -0700
     2.3 @@ -0,0 +1,1 @@
     2.4 +subdir-y += kernel
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/arch/ia64/linux-xen/sn/kernel/Makefile	Wed Dec 20 14:55:02 2006 -0700
     3.3 @@ -0,0 +1,5 @@
     3.4 +obj-y += sn2_smp.o
     3.5 +obj-y += setup.o
     3.6 +obj-y += iomv.o
     3.7 +obj-y += irq.o
     3.8 +obj-y += io_init.o
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/arch/ia64/linux-xen/sn/kernel/README.origin	Wed Dec 20 14:55:02 2006 -0700
     4.3 @@ -0,0 +1,12 @@
     4.4 +# Source files in this directory are near-identical copies of linux-2.6.19
     4.5 +# files:
     4.6 +
     4.7 +# NOTE: ALL changes to these files should be clearly marked
     4.8 +# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
     4.9 +# easily updated to future versions of the corresponding Linux files.
    4.10 +
    4.11 +io_init.c		-> linux/arch/ia64/sn/kernel/io_init.c
    4.12 +iomv.c			-> linux/arch/ia64/sn/kernel/iomv.c
    4.13 +irq.c			-> linux/arch/ia64/sn/kernel/irq.c
    4.14 +setup.c			-> linux/arch/ia64/sn/kernel/setup.c
    4.15 +sn2_smp.c		-> linux/arch/ia64/sn/kernel/sn2/sn2_smp.c
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/ia64/linux-xen/sn/kernel/io_init.c	Wed Dec 20 14:55:02 2006 -0700
     5.3 @@ -0,0 +1,783 @@
     5.4 +/*
     5.5 + * This file is subject to the terms and conditions of the GNU General Public
     5.6 + * License.  See the file "COPYING" in the main directory of this archive
     5.7 + * for more details.
     5.8 + *
     5.9 + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
    5.10 + */
    5.11 +
    5.12 +#include <linux/bootmem.h>
    5.13 +#include <linux/nodemask.h>
    5.14 +#ifdef XEN
    5.15 +#include <linux/init.h>
    5.16 +#endif
    5.17 +#include <asm/sn/types.h>
    5.18 +#include <asm/sn/addrs.h>
    5.19 +#include <asm/sn/sn_feature_sets.h>
    5.20 +#include <asm/sn/geo.h>
    5.21 +#include <asm/sn/io.h>
    5.22 +#include <asm/sn/l1.h>
    5.23 +#include <asm/sn/module.h>
    5.24 +#include <asm/sn/pcibr_provider.h>
    5.25 +#include <asm/sn/pcibus_provider_defs.h>
    5.26 +#ifndef XEN
    5.27 +#include <asm/sn/pcidev.h>
    5.28 +#endif
    5.29 +#include <asm/sn/simulator.h>
    5.30 +#include <asm/sn/sn_sal.h>
    5.31 +#ifndef XEN
    5.32 +#include <asm/sn/tioca_provider.h>
    5.33 +#include <asm/sn/tioce_provider.h>
    5.34 +#endif
    5.35 +#ifdef XEN
    5.36 +#include "asm/sn/hubdev.h"
    5.37 +#include "asm/sn/xwidgetdev.h"
    5.38 +#else
    5.39 +#include "xtalk/hubdev.h"
    5.40 +#include "xtalk/xwidgetdev.h"
    5.41 +#endif
    5.42 +
    5.43 +
    5.44 +extern void sn_init_cpei_timer(void);
    5.45 +extern void register_sn_procfs(void);
    5.46 +#ifdef XEN
    5.47 +extern void sn_irq_lh_init(void);
    5.48 +#endif
    5.49 +
    5.50 +static struct list_head sn_sysdata_list;
    5.51 +
    5.52 +/* sysdata list struct */
    5.53 +struct sysdata_el {
    5.54 +	struct list_head entry;
    5.55 +	void *sysdata;
    5.56 +};
    5.57 +
    5.58 +struct slab_info {
    5.59 +	struct hubdev_info hubdev;
    5.60 +};
    5.61 +
    5.62 +struct brick {
    5.63 +	moduleid_t id;		/* Module ID of this module        */
    5.64 +	struct slab_info slab_info[MAX_SLABS + 1];
    5.65 +};
    5.66 +
    5.67 +int sn_ioif_inited;		/* SN I/O infrastructure initialized? */
    5.68 +
    5.69 +struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES];	/* indexed by asic type */
    5.70 +
    5.71 +#ifndef XEN
    5.72 +static int max_segment_number;		 /* Default highest segment number */
    5.73 +static int max_pcibus_number = 255;	/* Default highest pci bus number */
    5.74 +
    5.75 +/*
    5.76 + * Hooks and struct for unsupported pci providers
    5.77 + */
    5.78 +
    5.79 +static dma_addr_t
    5.80 +sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
    5.81 +{
    5.82 +	return 0;
    5.83 +}
    5.84 +
    5.85 +static void
    5.86 +sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
    5.87 +{
    5.88 +	return;
    5.89 +}
    5.90 +
    5.91 +static void *
    5.92 +sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
    5.93 +{
    5.94 +	return NULL;
    5.95 +}
    5.96 +
    5.97 +static struct sn_pcibus_provider sn_pci_default_provider = {
    5.98 +	.dma_map = sn_default_pci_map,
    5.99 +	.dma_map_consistent = sn_default_pci_map,
   5.100 +	.dma_unmap = sn_default_pci_unmap,
   5.101 +	.bus_fixup = sn_default_pci_bus_fixup,
   5.102 +};
   5.103 +#endif
   5.104 +
   5.105 +/*
   5.106 + * Retrieve the DMA Flush List given nasid, widget, and device.
   5.107 + * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
   5.108 + */
   5.109 +static inline u64
   5.110 +sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
   5.111 +			     u64 address)
   5.112 +{
   5.113 +	struct ia64_sal_retval ret_stuff;
   5.114 +	ret_stuff.status = 0;
   5.115 +	ret_stuff.v0 = 0;
   5.116 +
   5.117 +	SAL_CALL_NOLOCK(ret_stuff,
   5.118 +			(u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
   5.119 +			(u64) nasid, (u64) widget_num,
   5.120 +			(u64) device_num, (u64) address, 0, 0, 0);
   5.121 +	return ret_stuff.status;
   5.122 +}
   5.123 +
   5.124 +/*
   5.125 + * Retrieve the hub device info structure for the given nasid.
   5.126 + */
   5.127 +static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
   5.128 +{
   5.129 +	struct ia64_sal_retval ret_stuff;
   5.130 +	ret_stuff.status = 0;
   5.131 +	ret_stuff.v0 = 0;
   5.132 +
   5.133 +	SAL_CALL_NOLOCK(ret_stuff,
   5.134 +			(u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
   5.135 +			(u64) handle, (u64) address, 0, 0, 0, 0, 0);
   5.136 +	return ret_stuff.v0;
   5.137 +}
   5.138 +
   5.139 +/*
   5.140 + * Retrieve the pci bus information given the bus number.
   5.141 + */
   5.142 +static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
   5.143 +{
   5.144 +	struct ia64_sal_retval ret_stuff;
   5.145 +	ret_stuff.status = 0;
   5.146 +	ret_stuff.v0 = 0;
   5.147 +
   5.148 +	SAL_CALL_NOLOCK(ret_stuff,
   5.149 +			(u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
   5.150 +			(u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
   5.151 +	return ret_stuff.v0;
   5.152 +}
   5.153 +
   5.154 +#ifndef XEN
   5.155 +/*
   5.156 + * Retrieve the pci device information given the bus and device|function number.
   5.157 + */
   5.158 +static inline u64
   5.159 +sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
   5.160 +		    u64 sn_irq_info)
   5.161 +{
   5.162 +	struct ia64_sal_retval ret_stuff;
   5.163 +	ret_stuff.status = 0;
   5.164 +	ret_stuff.v0 = 0;
   5.165 +
   5.166 +	SAL_CALL_NOLOCK(ret_stuff,
   5.167 +			(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
   5.168 +			(u64) segment, (u64) bus_number, (u64) devfn,
   5.169 +			(u64) pci_dev,
   5.170 +			sn_irq_info, 0, 0);
   5.171 +	return ret_stuff.v0;
   5.172 +}
   5.173 +
   5.174 +/*
   5.175 + * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
   5.176 + *			  device.
   5.177 + */
   5.178 +inline struct pcidev_info *
   5.179 +sn_pcidev_info_get(struct pci_dev *dev)
   5.180 +{
   5.181 +	struct pcidev_info *pcidev;
   5.182 +
   5.183 +	list_for_each_entry(pcidev,
   5.184 +			    &(SN_PCI_CONTROLLER(dev)->pcidev_info), pdi_list) {
   5.185 +		if (pcidev->pdi_linux_pcidev == dev) {
   5.186 +			return pcidev;
   5.187 +		}
   5.188 +	}
   5.189 +	return NULL;
   5.190 +}
   5.191 +
   5.192 +/* Older PROM flush WAR
   5.193 + *
   5.194 + * 01/16/06 -- This war will be in place until a new official PROM is released.
   5.195 + * Additionally note that the struct sn_flush_device_war also has to be
   5.196 + * removed from arch/ia64/sn/include/xtalk/hubdev.h
   5.197 + */
   5.198 +static u8 war_implemented = 0;
   5.199 +
   5.200 +static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
   5.201 +			       struct sn_flush_device_common *common)
   5.202 +{
   5.203 +	struct sn_flush_device_war *war_list;
   5.204 +	struct sn_flush_device_war *dev_entry;
   5.205 +	struct ia64_sal_retval isrv = {0,0,0,0};
   5.206 +
   5.207 +	if (!war_implemented) {
   5.208 +		printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
   5.209 +		       "PROM flush WAR\n");
   5.210 +		war_implemented = 1;
   5.211 +	}
   5.212 +
   5.213 +	war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
   5.214 +	if (!war_list)
   5.215 +		BUG();
   5.216 +
   5.217 +	SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
   5.218 +			nasid, widget, __pa(war_list), 0, 0, 0 ,0);
   5.219 +	if (isrv.status)
   5.220 +		panic("sn_device_fixup_war failed: %s\n",
   5.221 +		      ia64_sal_strerror(isrv.status));
   5.222 +
   5.223 +	dev_entry = war_list + device;
   5.224 +	memcpy(common,dev_entry, sizeof(*common));
   5.225 +	kfree(war_list);
   5.226 +
   5.227 +	return isrv.status;
   5.228 +}
   5.229 +
   5.230 +/*
   5.231 + * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
   5.232 + *	each node in the system.
   5.233 + */
   5.234 +static void __init sn_fixup_ionodes(void)
   5.235 +{
   5.236 +	struct sn_flush_device_kernel *sn_flush_device_kernel;
   5.237 +	struct sn_flush_device_kernel *dev_entry;
   5.238 +	struct hubdev_info *hubdev;
   5.239 +	u64 status;
   5.240 +	u64 nasid;
   5.241 +	int i, widget, device, size;
   5.242 +
   5.243 +	/*
   5.244 +	 * Get SGI Specific HUB chipset information.
   5.245 +	 * Inform Prom that this kernel can support domain bus numbering.
   5.246 +	 */
   5.247 +	for (i = 0; i < num_cnodes; i++) {
   5.248 +		hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
   5.249 +		nasid = cnodeid_to_nasid(i);
   5.250 +		hubdev->max_segment_number = 0xffffffff;
   5.251 +		hubdev->max_pcibus_number = 0xff;
   5.252 +		status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev));
   5.253 +		if (status)
   5.254 +			continue;
   5.255 +
   5.256 +		/* Save the largest Domain and pcibus numbers found. */
   5.257 +		if (hubdev->max_segment_number) {
   5.258 +			/*
   5.259 +			 * Dealing with a Prom that supports segments.
   5.260 +			 */
   5.261 +			max_segment_number = hubdev->max_segment_number;
   5.262 +			max_pcibus_number = hubdev->max_pcibus_number;
   5.263 +		}
   5.264 +
   5.265 +		/* Attach the error interrupt handlers */
   5.266 +		if (nasid & 1)
   5.267 +			ice_error_init(hubdev);
   5.268 +		else
   5.269 +			hub_error_init(hubdev);
   5.270 +
   5.271 +		for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
   5.272 +			hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
   5.273 +
   5.274 +		if (!hubdev->hdi_flush_nasid_list.widget_p)
   5.275 +			continue;
   5.276 +
   5.277 +		size = (HUB_WIDGET_ID_MAX + 1) *
   5.278 +			sizeof(struct sn_flush_device_kernel *);
   5.279 +		hubdev->hdi_flush_nasid_list.widget_p =
   5.280 +			kzalloc(size, GFP_KERNEL);
   5.281 +		if (!hubdev->hdi_flush_nasid_list.widget_p)
   5.282 +			BUG();
   5.283 +
   5.284 +		for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
   5.285 +			size = DEV_PER_WIDGET *
   5.286 +				sizeof(struct sn_flush_device_kernel);
   5.287 +			sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
   5.288 +			if (!sn_flush_device_kernel)
   5.289 +				BUG();
   5.290 +
   5.291 +			dev_entry = sn_flush_device_kernel;
   5.292 +			for (device = 0; device < DEV_PER_WIDGET;
   5.293 +			     device++,dev_entry++) {
   5.294 +				size = sizeof(struct sn_flush_device_common);
   5.295 +				dev_entry->common = kzalloc(size, GFP_KERNEL);
   5.296 +				if (!dev_entry->common)
   5.297 +					BUG();
   5.298 +
   5.299 +				if (sn_prom_feature_available(
   5.300 +						       PRF_DEVICE_FLUSH_LIST))
   5.301 +					status = sal_get_device_dmaflush_list(
   5.302 +						     nasid, widget, device,
   5.303 +						     (u64)(dev_entry->common));
   5.304 +				else
   5.305 +#ifdef XEN
   5.306 +					BUG();
   5.307 +#else
   5.308 +					status = sn_device_fixup_war(nasid,
   5.309 +						     widget, device,
   5.310 +						     dev_entry->common);
   5.311 +#endif
   5.312 +				if (status != SALRET_OK)
   5.313 +					panic("SAL call failed: %s\n",
   5.314 +					      ia64_sal_strerror(status));
   5.315 +
   5.316 +				spin_lock_init(&dev_entry->sfdl_flush_lock);
   5.317 +			}
   5.318 +
   5.319 +			if (sn_flush_device_kernel)
   5.320 +				hubdev->hdi_flush_nasid_list.widget_p[widget] =
   5.321 +						       sn_flush_device_kernel;
   5.322 +	        }
   5.323 +	}
   5.324 +}
   5.325 +
   5.326 +/*
   5.327 + * sn_pci_window_fixup() - Create a pci_window for each device resource.
   5.328 + *			   Until ACPI support is added, we need this code
   5.329 + *			   to setup pci_windows for use by
   5.330 + *			   pcibios_bus_to_resource(),
   5.331 + *			   pcibios_resource_to_bus(), etc.
   5.332 + */
   5.333 +static void
   5.334 +sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
   5.335 +		    s64 * pci_addrs)
   5.336 +{
   5.337 +	struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
   5.338 +	unsigned int i;
   5.339 +	unsigned int idx;
   5.340 +	unsigned int new_count;
   5.341 +	struct pci_window *new_window;
   5.342 +
   5.343 +	if (count == 0)
   5.344 +		return;
   5.345 +	idx = controller->windows;
   5.346 +	new_count = controller->windows + count;
   5.347 +	new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
   5.348 +	if (new_window == NULL)
   5.349 +		BUG();
   5.350 +	if (controller->window) {
   5.351 +		memcpy(new_window, controller->window,
   5.352 +		       sizeof(struct pci_window) * controller->windows);
   5.353 +		kfree(controller->window);
   5.354 +	}
   5.355 +
   5.356 +	/* Setup a pci_window for each device resource. */
   5.357 +	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
   5.358 +		if (pci_addrs[i] == -1)
   5.359 +			continue;
   5.360 +
   5.361 +		new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
   5.362 +		new_window[idx].resource = dev->resource[i];
   5.363 +		idx++;
   5.364 +	}
   5.365 +
   5.366 +	controller->windows = new_count;
   5.367 +	controller->window = new_window;
   5.368 +}
   5.369 +
   5.370 +void sn_pci_unfixup_slot(struct pci_dev *dev)
   5.371 +{
   5.372 +	struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
   5.373 +
   5.374 +	sn_irq_unfixup(dev);
   5.375 +	pci_dev_put(host_pci_dev);
   5.376 +	pci_dev_put(dev);
   5.377 +}
   5.378 +
   5.379 +/*
   5.380 + * sn_pci_fixup_slot() - This routine sets up a slot's resources
   5.381 + * consistent with the Linux PCI abstraction layer.  Resources acquired
   5.382 + * from our PCI provider include PIO maps to BAR space and interrupt
   5.383 + * objects.
   5.384 + */
   5.385 +void sn_pci_fixup_slot(struct pci_dev *dev)
   5.386 +{
   5.387 +	unsigned int count = 0;
   5.388 +	int idx;
   5.389 +	int segment = pci_domain_nr(dev->bus);
   5.390 +	int status = 0;
   5.391 +	struct pcibus_bussoft *bs;
   5.392 + 	struct pci_bus *host_pci_bus;
   5.393 + 	struct pci_dev *host_pci_dev;
   5.394 +	struct pcidev_info *pcidev_info;
   5.395 +	s64 pci_addrs[PCI_ROM_RESOURCE + 1];
   5.396 + 	struct sn_irq_info *sn_irq_info;
   5.397 + 	unsigned long size;
   5.398 + 	unsigned int bus_no, devfn;
   5.399 +
   5.400 +	pci_dev_get(dev); /* for the sysdata pointer */
   5.401 +	pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
   5.402 +	if (!pcidev_info)
   5.403 +		BUG();		/* Cannot afford to run out of memory */
   5.404 +
   5.405 +	sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
   5.406 +	if (!sn_irq_info)
   5.407 +		BUG();		/* Cannot afford to run out of memory */
   5.408 +
   5.409 +	/* Call to retrieve pci device information needed by kernel. */
   5.410 +	status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number, 
   5.411 +				     dev->devfn,
   5.412 +				     (u64) __pa(pcidev_info),
   5.413 +				     (u64) __pa(sn_irq_info));
   5.414 +	if (status)
   5.415 +		BUG(); /* Cannot get platform pci device information */
   5.416 +
   5.417 +	/* Add pcidev_info to list in sn_pci_controller struct */
   5.418 +	list_add_tail(&pcidev_info->pdi_list,
   5.419 +		      &(SN_PCI_CONTROLLER(dev->bus)->pcidev_info));
   5.420 +
   5.421 +	/* Copy over PIO Mapped Addresses */
   5.422 +	for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
   5.423 +		unsigned long start, end, addr;
   5.424 +
   5.425 +		if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
   5.426 +			pci_addrs[idx] = -1;
   5.427 +			continue;
   5.428 +		}
   5.429 +
   5.430 +		start = dev->resource[idx].start;
   5.431 +		end = dev->resource[idx].end;
   5.432 +		size = end - start;
   5.433 +		if (size == 0) {
   5.434 +			pci_addrs[idx] = -1;
   5.435 +			continue;
   5.436 +		}
   5.437 +		pci_addrs[idx] = start;
   5.438 +		count++;
   5.439 +		addr = pcidev_info->pdi_pio_mapped_addr[idx];
   5.440 +		addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
   5.441 +		dev->resource[idx].start = addr;
   5.442 +		dev->resource[idx].end = addr + size;
   5.443 +		if (dev->resource[idx].flags & IORESOURCE_IO)
   5.444 +			dev->resource[idx].parent = &ioport_resource;
   5.445 +		else
   5.446 +			dev->resource[idx].parent = &iomem_resource;
   5.447 +	}
   5.448 +	/* Create a pci_window in the pci_controller struct for
   5.449 +	 * each device resource.
   5.450 +	 */
   5.451 +	if (count > 0)
   5.452 +		sn_pci_window_fixup(dev, count, pci_addrs);
   5.453 +
   5.454 +	/*
   5.455 +	 * Using the PROMs values for the PCI host bus, get the Linux
   5.456 + 	 * PCI host_pci_dev struct and set up host bus linkages
   5.457 + 	 */
   5.458 +
   5.459 +	bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
   5.460 +	devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
   5.461 + 	host_pci_bus = pci_find_bus(segment, bus_no);
   5.462 + 	host_pci_dev = pci_get_slot(host_pci_bus, devfn);
   5.463 +
   5.464 +	pcidev_info->host_pci_dev = host_pci_dev;
   5.465 +	pcidev_info->pdi_linux_pcidev = dev;
   5.466 +	pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
   5.467 +	bs = SN_PCIBUS_BUSSOFT(dev->bus);
   5.468 +	pcidev_info->pdi_pcibus_info = bs;
   5.469 +
   5.470 +	if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
   5.471 +		SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
   5.472 +	} else {
   5.473 +		SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
   5.474 +	}
   5.475 +
   5.476 +	/* Only set up IRQ stuff if this device has a host bus context */
   5.477 +	if (bs && sn_irq_info->irq_irq) {
   5.478 +		pcidev_info->pdi_sn_irq_info = sn_irq_info;
   5.479 +		dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
   5.480 +		sn_irq_fixup(dev, sn_irq_info);
   5.481 +	} else {
   5.482 +		pcidev_info->pdi_sn_irq_info = NULL;
   5.483 +		kfree(sn_irq_info);
   5.484 +	}
   5.485 +}
   5.486 +
   5.487 +/*
   5.488 + * sn_pci_controller_fixup() - This routine sets up a bus's resources
   5.489 + * consistent with the Linux PCI abstraction layer.
   5.490 + */
   5.491 +void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
   5.492 +{
   5.493 +	int status;
   5.494 +	int nasid, cnode;
   5.495 +	struct pci_controller *controller;
   5.496 +	struct sn_pci_controller *sn_controller;
   5.497 +	struct pcibus_bussoft *prom_bussoft_ptr;
   5.498 +	struct hubdev_info *hubdev_info;
   5.499 +	void *provider_soft;
   5.500 +	struct sn_pcibus_provider *provider;
   5.501 +
   5.502 + 	status = sal_get_pcibus_info((u64) segment, (u64) busnum,
   5.503 + 				     (u64) ia64_tpa(&prom_bussoft_ptr));
   5.504 + 	if (status > 0)
   5.505 +		return;		/*bus # does not exist */
   5.506 +	prom_bussoft_ptr = __va(prom_bussoft_ptr);
   5.507 +
   5.508 +	/* Allocate a sn_pci_controller, which has a pci_controller struct
   5.509 +	 * as the first member.
   5.510 +	 */
   5.511 +	sn_controller = kzalloc(sizeof(struct sn_pci_controller), GFP_KERNEL);
   5.512 +	if (!sn_controller)
   5.513 +		BUG();
   5.514 +	INIT_LIST_HEAD(&sn_controller->pcidev_info);
   5.515 +	controller = &sn_controller->pci_controller;
   5.516 +	controller->segment = segment;
   5.517 +
   5.518 +	if (bus == NULL) {
   5.519 + 		bus = pci_scan_bus(busnum, &pci_root_ops, controller);
   5.520 + 		if (bus == NULL)
   5.521 + 			goto error_return; /* error, or bus already scanned */
   5.522 + 		bus->sysdata = NULL;
   5.523 +	}
   5.524 +
   5.525 +	if (bus->sysdata)
   5.526 +		goto error_return; /* sysdata already alloc'd */
   5.527 +
   5.528 +	/*
   5.529 +	 * Per-provider fixup.  Copies the contents from prom to local
   5.530 +	 * area and links SN_PCIBUS_BUSSOFT().
   5.531 +	 */
   5.532 +
   5.533 +	if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES)
   5.534 +		goto error_return; /* unsupported asic type */
   5.535 +
   5.536 +	if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
   5.537 +		goto error_return; /* no further fixup necessary */
   5.538 +
   5.539 +	provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
   5.540 +	if (provider == NULL)
   5.541 +		goto error_return; /* no provider registerd for this asic */
   5.542 +
   5.543 +	bus->sysdata = controller;
   5.544 +	if (provider->bus_fixup)
   5.545 +		provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
   5.546 +	else
   5.547 +		provider_soft = NULL;
   5.548 +
   5.549 +	if (provider_soft == NULL) {
   5.550 +		/* fixup failed or not applicable */
   5.551 +		bus->sysdata = NULL;
   5.552 +		goto error_return;
   5.553 +	}
   5.554 +
   5.555 +	/*
   5.556 +	 * Setup pci_windows for legacy IO and MEM space.
   5.557 +	 * (Temporary until ACPI support is in place.)
   5.558 +	 */
   5.559 +	controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL);
   5.560 +	if (controller->window == NULL)
   5.561 +		BUG();
   5.562 +	controller->window[0].offset = prom_bussoft_ptr->bs_legacy_io;
   5.563 +	controller->window[0].resource.name = "legacy_io";
   5.564 +	controller->window[0].resource.flags = IORESOURCE_IO;
   5.565 +	controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io;
   5.566 +	controller->window[0].resource.end =
   5.567 +	    controller->window[0].resource.start + 0xffff;
   5.568 +	controller->window[0].resource.parent = &ioport_resource;
   5.569 +	controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem;
   5.570 +	controller->window[1].resource.name = "legacy_mem";
   5.571 +	controller->window[1].resource.flags = IORESOURCE_MEM;
   5.572 +	controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem;
   5.573 +	controller->window[1].resource.end =
   5.574 +	    controller->window[1].resource.start + (1024 * 1024) - 1;
   5.575 +	controller->window[1].resource.parent = &iomem_resource;
   5.576 +	controller->windows = 2;
   5.577 +
   5.578 +	/*
   5.579 +	 * Generic bus fixup goes here.  Don't reference prom_bussoft_ptr
   5.580 +	 * after this point.
   5.581 +	 */
   5.582 +
   5.583 +	PCI_CONTROLLER(bus)->platform_data = provider_soft;
   5.584 +	nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
   5.585 +	cnode = nasid_to_cnodeid(nasid);
   5.586 +	hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
   5.587 +	SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
   5.588 +	    &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
   5.589 +
   5.590 +	/*
   5.591 +	 * If the node information we obtained during the fixup phase is invalid
   5.592 +	 * then set controller->node to -1 (undetermined)
   5.593 +	 */
   5.594 +	if (controller->node >= num_online_nodes()) {
   5.595 +		struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
   5.596 +
   5.597 +		printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
   5.598 +				    "L_IO=%lx L_MEM=%lx BASE=%lx\n",
   5.599 +			b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
   5.600 +			b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
   5.601 +		printk(KERN_WARNING "on node %d but only %d nodes online."
   5.602 +			"Association set to undetermined.\n",
   5.603 +			controller->node, num_online_nodes());
   5.604 +		controller->node = -1;
   5.605 +	}
   5.606 +	return;
   5.607 +
   5.608 +error_return:
   5.609 +
   5.610 +	kfree(sn_controller);
   5.611 +	return;
   5.612 +}
   5.613 +
   5.614 +void sn_bus_store_sysdata(struct pci_dev *dev)
   5.615 +{
   5.616 +	struct sysdata_el *element;
   5.617 +
   5.618 +	element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
   5.619 +	if (!element) {
   5.620 +		dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
   5.621 +		return;
   5.622 +	}
   5.623 +	element->sysdata = SN_PCIDEV_INFO(dev);
   5.624 +	list_add(&element->entry, &sn_sysdata_list);
   5.625 +}
   5.626 +
   5.627 +void sn_bus_free_sysdata(void)
   5.628 +{
   5.629 +	struct sysdata_el *element;
   5.630 +	struct list_head *list, *safe;
   5.631 +
   5.632 +	list_for_each_safe(list, safe, &sn_sysdata_list) {
   5.633 +		element = list_entry(list, struct sysdata_el, entry);
   5.634 +		list_del(&element->entry);
   5.635 +		list_del(&(((struct pcidev_info *)
   5.636 +			     (element->sysdata))->pdi_list));
   5.637 +		kfree(element->sysdata);
   5.638 +		kfree(element);
   5.639 +	}
   5.640 +	return;
   5.641 +}
   5.642 +#endif
   5.643 +
   5.644 +/*
   5.645 + * Ugly hack to get PCI setup until we have a proper ACPI namespace.
   5.646 + */
   5.647 +
   5.648 +#define PCI_BUSES_TO_SCAN 256
   5.649 +
   5.650 +static int __init sn_pci_init(void)
   5.651 +{
   5.652 +#ifndef XEN
   5.653 +	int i, j;
   5.654 +	struct pci_dev *pci_dev = NULL;
   5.655 +#endif
   5.656 +
   5.657 +	if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
   5.658 +		return 0;
   5.659 +
   5.660 +#ifndef XEN
   5.661 +	/*
   5.662 +	 * prime sn_pci_provider[].  Individial provider init routines will
   5.663 +	 * override their respective default entries.
   5.664 +	 */
   5.665 +
   5.666 +	for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
   5.667 +		sn_pci_provider[i] = &sn_pci_default_provider;
   5.668 +
   5.669 +	pcibr_init_provider();
   5.670 +	tioca_init_provider();
   5.671 +	tioce_init_provider();
   5.672 +#endif
   5.673 +
   5.674 +	/*
   5.675 +	 * This is needed to avoid bounce limit checks in the blk layer
   5.676 +	 */
   5.677 +	ia64_max_iommu_merge_mask = ~PAGE_MASK;
   5.678 +#ifndef XEN
   5.679 +	sn_fixup_ionodes();
   5.680 +#endif
   5.681 +	sn_irq_lh_init();
   5.682 +	INIT_LIST_HEAD(&sn_sysdata_list);
   5.683 +#ifndef XEN
   5.684 +	sn_init_cpei_timer();
   5.685 +
   5.686 +#ifdef CONFIG_PROC_FS
   5.687 +	register_sn_procfs();
   5.688 +#endif
   5.689 +
   5.690 +	/* busses are not known yet ... */
   5.691 +	for (i = 0; i <= max_segment_number; i++)
   5.692 +		for (j = 0; j <= max_pcibus_number; j++)
   5.693 +			sn_pci_controller_fixup(i, j, NULL);
   5.694 +
   5.695 +	/*
   5.696 +	 * Generic Linux PCI Layer has created the pci_bus and pci_dev 
   5.697 +	 * structures - time for us to add our SN PLatform specific 
   5.698 +	 * information.
   5.699 +	 */
   5.700 +
   5.701 +	while ((pci_dev =
   5.702 +		pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
   5.703 +		sn_pci_fixup_slot(pci_dev);
   5.704 +#endif
   5.705 +
   5.706 +	sn_ioif_inited = 1;	/* sn I/O infrastructure now initialized */
   5.707 +
   5.708 +	return 0;
   5.709 +}
   5.710 +
   5.711 +/*
   5.712 + * hubdev_init_node() - Creates the HUB data structure and link them to it's 
   5.713 + *	own NODE specific data area.
   5.714 + */
   5.715 +void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
   5.716 +{
   5.717 +	struct hubdev_info *hubdev_info;
   5.718 +	int size;
   5.719 +#ifndef XEN
   5.720 +	pg_data_t *pg;
   5.721 +#else
   5.722 +	struct pglist_data *pg;
   5.723 +#endif
   5.724 +
   5.725 +	size = sizeof(struct hubdev_info);
   5.726 +
   5.727 +	if (node >= num_online_nodes())	/* Headless/memless IO nodes */
   5.728 +		pg = NODE_DATA(0);
   5.729 +	else
   5.730 +		pg = NODE_DATA(node);
   5.731 +
   5.732 +	hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
   5.733 +
   5.734 +	npda->pdinfo = (void *)hubdev_info;
   5.735 +}
   5.736 +
   5.737 +geoid_t
   5.738 +cnodeid_get_geoid(cnodeid_t cnode)
   5.739 +{
   5.740 +	struct hubdev_info *hubdev;
   5.741 +
   5.742 +	hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
   5.743 +	return hubdev->hdi_geoid;
   5.744 +}
   5.745 +
   5.746 +#ifndef XEN
   5.747 +void sn_generate_path(struct pci_bus *pci_bus, char *address)
   5.748 +{
   5.749 +	nasid_t nasid;
   5.750 +	cnodeid_t cnode;
   5.751 +	geoid_t geoid;
   5.752 +	moduleid_t moduleid;
   5.753 +	u16 bricktype;
   5.754 +
   5.755 +	nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
   5.756 +	cnode = nasid_to_cnodeid(nasid);
   5.757 +	geoid = cnodeid_get_geoid(cnode);
   5.758 +	moduleid = geo_module(geoid);
   5.759 +
   5.760 +	sprintf(address, "module_%c%c%c%c%.2d",
   5.761 +		'0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
   5.762 +		'0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
   5.763 +		'0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
   5.764 +		MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
   5.765 +
   5.766 +	/* Tollhouse requires slot id to be displayed */
   5.767 +	bricktype = MODULE_GET_BTYPE(moduleid);
   5.768 +	if ((bricktype == L1_BRICKTYPE_191010) ||
   5.769 +	    (bricktype == L1_BRICKTYPE_1932))
   5.770 +			sprintf(address, "%s^%d", address, geo_slot(geoid));
   5.771 +}
   5.772 +#endif
   5.773 +
   5.774 +#ifdef XEN
   5.775 +__initcall(sn_pci_init);
   5.776 +#else
   5.777 +subsys_initcall(sn_pci_init);
   5.778 +#endif
   5.779 +#ifndef XEN
   5.780 +EXPORT_SYMBOL(sn_pci_fixup_slot);
   5.781 +EXPORT_SYMBOL(sn_pci_unfixup_slot);
   5.782 +EXPORT_SYMBOL(sn_pci_controller_fixup);
   5.783 +EXPORT_SYMBOL(sn_bus_store_sysdata);
   5.784 +EXPORT_SYMBOL(sn_bus_free_sysdata);
   5.785 +EXPORT_SYMBOL(sn_generate_path);
   5.786 +#endif
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/ia64/linux-xen/sn/kernel/iomv.c	Wed Dec 20 14:55:02 2006 -0700
     6.3 @@ -0,0 +1,82 @@
     6.4 +/* 
     6.5 + * This file is subject to the terms and conditions of the GNU General Public
     6.6 + * License.  See the file "COPYING" in the main directory of this archive
     6.7 + * for more details.
     6.8 + *
     6.9 + * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
    6.10 + */
    6.11 +
    6.12 +#include <linux/module.h>
    6.13 +#include <asm/io.h>
    6.14 +#include <asm/delay.h>
    6.15 +#ifndef XEN
    6.16 +#include <asm/vga.h>
    6.17 +#endif
    6.18 +#include <asm/sn/nodepda.h>
    6.19 +#include <asm/sn/simulator.h>
    6.20 +#include <asm/sn/pda.h>
    6.21 +#include <asm/sn/sn_cpuid.h>
    6.22 +#include <asm/sn/shub_mmr.h>
    6.23 +
    6.24 +#define IS_LEGACY_VGA_IOPORT(p) \
    6.25 +	(((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
    6.26 +
    6.27 +#ifdef XEN
    6.28 +#define vga_console_iobase	0
    6.29 +#endif
    6.30 +
    6.31 +/**
    6.32 + * sn_io_addr - convert an in/out port to an i/o address
    6.33 + * @port: port to convert
    6.34 + *
    6.35 + * Legacy in/out instructions are converted to ld/st instructions
    6.36 + * on IA64.  This routine will convert a port number into a valid 
    6.37 + * SN i/o address.  Used by sn_in*() and sn_out*().
    6.38 + */
    6.39 +void *sn_io_addr(unsigned long port)
    6.40 +{
    6.41 +	if (!IS_RUNNING_ON_SIMULATOR()) {
    6.42 +		if (IS_LEGACY_VGA_IOPORT(port))
    6.43 +			port += vga_console_iobase;
    6.44 +		/* On sn2, legacy I/O ports don't point at anything */
    6.45 +		if (port < (64 * 1024))
    6.46 +			return NULL;
    6.47 +		return ((void *)(port | __IA64_UNCACHED_OFFSET));
    6.48 +	} else {
    6.49 +		/* but the simulator uses them... */
    6.50 +		unsigned long addr;
    6.51 +
    6.52 +		/*
    6.53 +		 * word align port, but need more than 10 bits
    6.54 +		 * for accessing registers in bedrock local block
    6.55 +		 * (so we don't do port&0xfff)
    6.56 +		 */
    6.57 +		addr = (is_shub2() ? 0xc00000028c000000UL : 0xc0000087cc000000UL) | ((port >> 2) << 12);
    6.58 +		if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port == 0x3f7)
    6.59 +			addr |= port;
    6.60 +		return (void *)addr;
    6.61 +	}
    6.62 +}
    6.63 +
    6.64 +EXPORT_SYMBOL(sn_io_addr);
    6.65 +
    6.66 +/**
    6.67 + * __sn_mmiowb - I/O space memory barrier
    6.68 + *
    6.69 + * See include/asm-ia64/io.h and Documentation/DocBook/deviceiobook.tmpl
    6.70 + * for details.
    6.71 + *
    6.72 + * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
    6.73 + * See PV 871084 for details about the WAR about zero value.
    6.74 + *
    6.75 + */
    6.76 +void __sn_mmiowb(void)
    6.77 +{
    6.78 +	volatile unsigned long *adr = pda->pio_write_status_addr;
    6.79 +	unsigned long val = pda->pio_write_status_val;
    6.80 +
    6.81 +	while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val)
    6.82 +		cpu_relax();
    6.83 +}
    6.84 +
    6.85 +EXPORT_SYMBOL(__sn_mmiowb);
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/arch/ia64/linux-xen/sn/kernel/irq.c	Wed Dec 20 14:55:02 2006 -0700
     7.3 @@ -0,0 +1,542 @@
     7.4 +/*
     7.5 + * Platform dependent support for SGI SN
     7.6 + *
     7.7 + * This file is subject to the terms and conditions of the GNU General Public
     7.8 + * License.  See the file "COPYING" in the main directory of this archive
     7.9 + * for more details.
    7.10 + *
    7.11 + * Copyright (c) 2000-2006 Silicon Graphics, Inc.  All Rights Reserved.
    7.12 + */
    7.13 +
    7.14 +#include <linux/irq.h>
    7.15 +#include <linux/spinlock.h>
    7.16 +#include <linux/init.h>
    7.17 +#ifdef XEN
    7.18 +#include <linux/pci.h>
    7.19 +#include <asm/hw_irq.h>
    7.20 +#endif
    7.21 +#include <asm/sn/addrs.h>
    7.22 +#include <asm/sn/arch.h>
    7.23 +#include <asm/sn/intr.h>
    7.24 +#include <asm/sn/pcibr_provider.h>
    7.25 +#include <asm/sn/pcibus_provider_defs.h>
    7.26 +#ifndef XEN
    7.27 +#include <asm/sn/pcidev.h>
    7.28 +#endif
    7.29 +#include <asm/sn/shub_mmr.h>
    7.30 +#include <asm/sn/sn_sal.h>
    7.31 +
    7.32 +#ifdef XEN
    7.33 +#define move_native_irq(foo)	do {} while(0)
    7.34 +#endif
    7.35 +
    7.36 +static void force_interrupt(int irq);
    7.37 +#ifndef XEN
    7.38 +static void register_intr_pda(struct sn_irq_info *sn_irq_info);
    7.39 +static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
    7.40 +#endif
    7.41 +
    7.42 +int sn_force_interrupt_flag = 1;
    7.43 +extern int sn_ioif_inited;
    7.44 +struct list_head **sn_irq_lh;
    7.45 +static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
    7.46 +
    7.47 +u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
    7.48 +				     struct sn_irq_info *sn_irq_info,
    7.49 +				     int req_irq, nasid_t req_nasid,
    7.50 +				     int req_slice)
    7.51 +{
    7.52 +	struct ia64_sal_retval ret_stuff;
    7.53 +	ret_stuff.status = 0;
    7.54 +	ret_stuff.v0 = 0;
    7.55 +
    7.56 +	SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
    7.57 +			(u64) SAL_INTR_ALLOC, (u64) local_nasid,
    7.58 +			(u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
    7.59 +			(u64) req_nasid, (u64) req_slice);
    7.60 +
    7.61 +	return ret_stuff.status;
    7.62 +}
    7.63 +
    7.64 +void sn_intr_free(nasid_t local_nasid, int local_widget,
    7.65 +				struct sn_irq_info *sn_irq_info)
    7.66 +{
    7.67 +	struct ia64_sal_retval ret_stuff;
    7.68 +	ret_stuff.status = 0;
    7.69 +	ret_stuff.v0 = 0;
    7.70 +
    7.71 +	SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
    7.72 +			(u64) SAL_INTR_FREE, (u64) local_nasid,
    7.73 +			(u64) local_widget, (u64) sn_irq_info->irq_irq,
    7.74 +			(u64) sn_irq_info->irq_cookie, 0, 0);
    7.75 +}
    7.76 +
    7.77 +static unsigned int sn_startup_irq(unsigned int irq)
    7.78 +{
    7.79 +	return 0;
    7.80 +}
    7.81 +
    7.82 +static void sn_shutdown_irq(unsigned int irq)
    7.83 +{
    7.84 +}
    7.85 +
    7.86 +static void sn_disable_irq(unsigned int irq)
    7.87 +{
    7.88 +}
    7.89 +
    7.90 +static void sn_enable_irq(unsigned int irq)
    7.91 +{
    7.92 +}
    7.93 +
    7.94 +static void sn_ack_irq(unsigned int irq)
    7.95 +{
    7.96 +	u64 event_occurred, mask;
    7.97 +
    7.98 +	irq = irq & 0xff;
    7.99 +	event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
   7.100 +	mask = event_occurred & SH_ALL_INT_MASK;
   7.101 +	HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
   7.102 +	__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
   7.103 +
   7.104 +	move_native_irq(irq);
   7.105 +}
   7.106 +
   7.107 +static void sn_end_irq(unsigned int irq)
   7.108 +{
   7.109 +	int ivec;
   7.110 +	u64 event_occurred;
   7.111 +
   7.112 +	ivec = irq & 0xff;
   7.113 +	if (ivec == SGI_UART_VECTOR) {
   7.114 +		event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
   7.115 +		/* If the UART bit is set here, we may have received an
   7.116 +		 * interrupt from the UART that the driver missed.  To
   7.117 +		 * make sure, we IPI ourselves to force us to look again.
   7.118 +		 */
   7.119 +		if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
   7.120 +			platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
   7.121 +					  IA64_IPI_DM_INT, 0);
   7.122 +		}
   7.123 +	}
   7.124 +	__clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
   7.125 +	if (sn_force_interrupt_flag)
   7.126 +		force_interrupt(irq);
   7.127 +}
   7.128 +
   7.129 +#ifndef XEN
   7.130 +static void sn_irq_info_free(struct rcu_head *head);
   7.131 +
   7.132 +struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
   7.133 +				       nasid_t nasid, int slice)
   7.134 +{
   7.135 +	int vector;
   7.136 +	int cpuphys;
   7.137 +	int64_t bridge;
   7.138 +	int local_widget, status;
   7.139 +	nasid_t local_nasid;
   7.140 +	struct sn_irq_info *new_irq_info;
   7.141 +	struct sn_pcibus_provider *pci_provider;
   7.142 +
   7.143 +	new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
   7.144 +	if (new_irq_info == NULL)
   7.145 +		return NULL;
   7.146 +
   7.147 +	memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
   7.148 +
   7.149 +	bridge = (u64) new_irq_info->irq_bridge;
   7.150 +	if (!bridge) {
   7.151 +		kfree(new_irq_info);
   7.152 +		return NULL; /* irq is not a device interrupt */
   7.153 +	}
   7.154 +
   7.155 +	local_nasid = NASID_GET(bridge);
   7.156 +
   7.157 +	if (local_nasid & 1)
   7.158 +		local_widget = TIO_SWIN_WIDGETNUM(bridge);
   7.159 +	else
   7.160 +		local_widget = SWIN_WIDGETNUM(bridge);
   7.161 +
   7.162 +	vector = sn_irq_info->irq_irq;
   7.163 +	/* Free the old PROM new_irq_info structure */
   7.164 +	sn_intr_free(local_nasid, local_widget, new_irq_info);
   7.165 +	/* Update kernels new_irq_info with new target info */
   7.166 +	unregister_intr_pda(new_irq_info);
   7.167 +
   7.168 +	/* allocate a new PROM new_irq_info struct */
   7.169 +	status = sn_intr_alloc(local_nasid, local_widget,
   7.170 +			       new_irq_info, vector,
   7.171 +			       nasid, slice);
   7.172 +
   7.173 +	/* SAL call failed */
   7.174 +	if (status) {
   7.175 +		kfree(new_irq_info);
   7.176 +		return NULL;
   7.177 +	}
   7.178 +
   7.179 +	cpuphys = nasid_slice_to_cpuid(nasid, slice);
   7.180 +	new_irq_info->irq_cpuid = cpuphys;
   7.181 +	register_intr_pda(new_irq_info);
   7.182 +
   7.183 +	pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
   7.184 +
   7.185 +	/*
   7.186 +	 * If this represents a line interrupt, target it.  If it's
   7.187 +	 * an msi (irq_int_bit < 0), it's already targeted.
   7.188 +	 */
   7.189 +	if (new_irq_info->irq_int_bit >= 0 &&
   7.190 +	    pci_provider && pci_provider->target_interrupt)
   7.191 +		(pci_provider->target_interrupt)(new_irq_info);
   7.192 +
   7.193 +	spin_lock(&sn_irq_info_lock);
   7.194 +#ifdef XEN
   7.195 +	list_replace(&sn_irq_info->list, &new_irq_info->list);
   7.196 +#else
   7.197 +	list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
   7.198 +#endif
   7.199 +	spin_unlock(&sn_irq_info_lock);
   7.200 +#ifndef XEN
   7.201 +	call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
   7.202 +#endif
   7.203 +
   7.204 +#ifdef CONFIG_SMP
   7.205 +	set_irq_affinity_info((vector & 0xff), cpuphys, 0);
   7.206 +#endif
   7.207 +
   7.208 +	return new_irq_info;
   7.209 +}
   7.210 +
   7.211 +static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
   7.212 +{
   7.213 +	struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
   7.214 +	nasid_t nasid;
   7.215 +	int slice;
   7.216 +
   7.217 +	nasid = cpuid_to_nasid(first_cpu(mask));
   7.218 +	slice = cpuid_to_slice(first_cpu(mask));
   7.219 +
   7.220 +	list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
   7.221 +				 sn_irq_lh[irq], list)
   7.222 +		(void)sn_retarget_vector(sn_irq_info, nasid, slice);
   7.223 +}
   7.224 +#endif
   7.225 +
   7.226 +struct hw_interrupt_type irq_type_sn = {
   7.227 +#ifndef XEN
   7.228 +	.name		= "SN hub",
   7.229 +#endif
   7.230 +	.startup	= sn_startup_irq,
   7.231 +	.shutdown	= sn_shutdown_irq,
   7.232 +	.enable		= sn_enable_irq,
   7.233 +	.disable	= sn_disable_irq,
   7.234 +	.ack		= sn_ack_irq,
   7.235 +	.end		= sn_end_irq,
   7.236 +#ifndef XEN
   7.237 +	.set_affinity	= sn_set_affinity_irq
   7.238 +#endif
   7.239 +};
   7.240 +
   7.241 +unsigned int sn_local_vector_to_irq(u8 vector)
   7.242 +{
   7.243 +	return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
   7.244 +}
   7.245 +
   7.246 +void sn_irq_init(void)
   7.247 +{
   7.248 +#ifndef XEN
   7.249 +	int i;
   7.250 +	irq_desc_t *base_desc = irq_desc;
   7.251 +
   7.252 +	ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
   7.253 +	ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
   7.254 +
   7.255 +	for (i = 0; i < NR_IRQS; i++) {
   7.256 +		if (base_desc[i].chip == &no_irq_type) {
   7.257 +			base_desc[i].chip = &irq_type_sn;
   7.258 +		}
   7.259 +	}
   7.260 +#endif
   7.261 +}
   7.262 +
   7.263 +#ifndef XEN
   7.264 +static void register_intr_pda(struct sn_irq_info *sn_irq_info)
   7.265 +{
   7.266 +	int irq = sn_irq_info->irq_irq;
   7.267 +	int cpu = sn_irq_info->irq_cpuid;
   7.268 +
   7.269 +	if (pdacpu(cpu)->sn_last_irq < irq) {
   7.270 +		pdacpu(cpu)->sn_last_irq = irq;
   7.271 +	}
   7.272 +
   7.273 +	if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
   7.274 +		pdacpu(cpu)->sn_first_irq = irq;
   7.275 +}
   7.276 +
   7.277 +static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
   7.278 +{
   7.279 +	int irq = sn_irq_info->irq_irq;
   7.280 +	int cpu = sn_irq_info->irq_cpuid;
   7.281 +	struct sn_irq_info *tmp_irq_info;
   7.282 +	int i, foundmatch;
   7.283 +
   7.284 +#ifndef XEN
   7.285 +	rcu_read_lock();
   7.286 +#else
   7.287 +	spin_lock(&sn_irq_info_lock);
   7.288 +#endif
   7.289 +	if (pdacpu(cpu)->sn_last_irq == irq) {
   7.290 +		foundmatch = 0;
   7.291 +		for (i = pdacpu(cpu)->sn_last_irq - 1;
   7.292 +		     i && !foundmatch; i--) {
   7.293 +#ifdef XEN
   7.294 +			list_for_each_entry(tmp_irq_info,
   7.295 +						sn_irq_lh[i],
   7.296 +						list) {
   7.297 +#else
   7.298 +			list_for_each_entry_rcu(tmp_irq_info,
   7.299 +						sn_irq_lh[i],
   7.300 +						list) {
   7.301 +#endif
   7.302 +				if (tmp_irq_info->irq_cpuid == cpu) {
   7.303 +					foundmatch = 1;
   7.304 +					break;
   7.305 +				}
   7.306 +			}
   7.307 +		}
   7.308 +		pdacpu(cpu)->sn_last_irq = i;
   7.309 +	}
   7.310 +
   7.311 +	if (pdacpu(cpu)->sn_first_irq == irq) {
   7.312 +		foundmatch = 0;
   7.313 +		for (i = pdacpu(cpu)->sn_first_irq + 1;
   7.314 +		     i < NR_IRQS && !foundmatch; i++) {
   7.315 +#ifdef XEN
   7.316 +			list_for_each_entry(tmp_irq_info,
   7.317 +						sn_irq_lh[i],
   7.318 +						list) {
   7.319 +#else
   7.320 +			list_for_each_entry_rcu(tmp_irq_info,
   7.321 +						sn_irq_lh[i],
   7.322 +						list) {
   7.323 +#endif
   7.324 +				if (tmp_irq_info->irq_cpuid == cpu) {
   7.325 +					foundmatch = 1;
   7.326 +					break;
   7.327 +				}
   7.328 +			}
   7.329 +		}
   7.330 +		pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
   7.331 +	}
   7.332 +#ifndef XEN
   7.333 +	rcu_read_unlock();
   7.334 +#else
   7.335 +	spin_unlock(&sn_irq_info_lock);
   7.336 +#endif
   7.337 +}
   7.338 +#endif /* XEN */
   7.339 +
   7.340 +#ifndef XEN
   7.341 +static void sn_irq_info_free(struct rcu_head *head)
   7.342 +{
   7.343 +	struct sn_irq_info *sn_irq_info;
   7.344 +
   7.345 +	sn_irq_info = container_of(head, struct sn_irq_info, rcu);
   7.346 +	kfree(sn_irq_info);
   7.347 +}
   7.348 +#endif
   7.349 +
   7.350 +#ifndef XEN
   7.351 +void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
   7.352 +{
   7.353 +	nasid_t nasid = sn_irq_info->irq_nasid;
   7.354 +	int slice = sn_irq_info->irq_slice;
   7.355 +	int cpu = nasid_slice_to_cpuid(nasid, slice);
   7.356 +
   7.357 +	pci_dev_get(pci_dev);
   7.358 +	sn_irq_info->irq_cpuid = cpu;
   7.359 +	sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
   7.360 +
   7.361 +	/* link it into the sn_irq[irq] list */
   7.362 +	spin_lock(&sn_irq_info_lock);
   7.363 +#ifdef XEN
   7.364 +	list_add(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
   7.365 +#else
   7.366 +	list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
   7.367 +#endif
   7.368 +#ifndef XEN
   7.369 +	reserve_irq_vector(sn_irq_info->irq_irq);
   7.370 +#endif
   7.371 +	spin_unlock(&sn_irq_info_lock);
   7.372 +
   7.373 +	register_intr_pda(sn_irq_info);
   7.374 +}
   7.375 +
   7.376 +void sn_irq_unfixup(struct pci_dev *pci_dev)
   7.377 +{
   7.378 +	struct sn_irq_info *sn_irq_info;
   7.379 +
   7.380 +	/* Only cleanup IRQ stuff if this device has a host bus context */
   7.381 +	if (!SN_PCIDEV_BUSSOFT(pci_dev))
   7.382 +		return;
   7.383 +
   7.384 +	sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
   7.385 +	if (!sn_irq_info)
   7.386 +		return;
   7.387 +	if (!sn_irq_info->irq_irq) {
   7.388 +		kfree(sn_irq_info);
   7.389 +		return;
   7.390 +	}
   7.391 +
   7.392 +	unregister_intr_pda(sn_irq_info);
   7.393 +	spin_lock(&sn_irq_info_lock);
   7.394 +#ifdef XEN
   7.395 +	list_del(&sn_irq_info->list);
   7.396 +#else
   7.397 +	list_del_rcu(&sn_irq_info->list);
   7.398 +#endif
   7.399 +	spin_unlock(&sn_irq_info_lock);
   7.400 +	if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
   7.401 +		free_irq_vector(sn_irq_info->irq_irq);
   7.402 +#ifndef XEN
   7.403 +	call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
   7.404 +#endif
   7.405 +	pci_dev_put(pci_dev);
   7.406 +
   7.407 +}
   7.408 +#endif
   7.409 +
   7.410 +static inline void
   7.411 +sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
   7.412 +{
   7.413 +	struct sn_pcibus_provider *pci_provider;
   7.414 +
   7.415 +	pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
   7.416 +	if (pci_provider && pci_provider->force_interrupt)
   7.417 +		(*pci_provider->force_interrupt)(sn_irq_info);
   7.418 +}
   7.419 +
   7.420 +static void force_interrupt(int irq)
   7.421 +{
   7.422 +	struct sn_irq_info *sn_irq_info;
   7.423 +
   7.424 +#ifndef XEN
   7.425 +	if (!sn_ioif_inited)
   7.426 +		return;
   7.427 +#endif
   7.428 +
   7.429 +#ifdef XEN
   7.430 +	spin_lock(&sn_irq_info_lock);
   7.431 +#else
   7.432 +	rcu_read_lock();
   7.433 +#endif
   7.434 +#ifdef XEN
   7.435 +	list_for_each_entry(sn_irq_info, sn_irq_lh[irq], list)
   7.436 +#else
   7.437 +	list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
   7.438 +#endif
   7.439 +		sn_call_force_intr_provider(sn_irq_info);
   7.440 +
   7.441 +#ifdef XEN
   7.442 +	spin_unlock(&sn_irq_info_lock);
   7.443 +#else
   7.444 +	rcu_read_unlock();
   7.445 +#endif
   7.446 +}
   7.447 +
   7.448 +#ifndef XEN
   7.449 +/*
   7.450 + * Check for lost interrupts.  If the PIC int_status reg. says that
   7.451 + * an interrupt has been sent, but not handled, and the interrupt
   7.452 + * is not pending in either the cpu irr regs or in the soft irr regs,
   7.453 + * and the interrupt is not in service, then the interrupt may have
   7.454 + * been lost.  Force an interrupt on that pin.  It is possible that
   7.455 + * the interrupt is in flight, so we may generate a spurious interrupt,
   7.456 + * but we should never miss a real lost interrupt.
   7.457 + */
   7.458 +static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
   7.459 +{
   7.460 +	u64 regval;
   7.461 +	struct pcidev_info *pcidev_info;
   7.462 +	struct pcibus_info *pcibus_info;
   7.463 +
   7.464 +	/*
   7.465 +	 * Bridge types attached to TIO (anything but PIC) do not need this WAR
   7.466 +	 * since they do not target Shub II interrupt registers.  If that
   7.467 +	 * ever changes, this check needs to accomodate.
   7.468 +	 */
   7.469 +	if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
   7.470 +		return;
   7.471 +
   7.472 +	pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
   7.473 +	if (!pcidev_info)
   7.474 +		return;
   7.475 +
   7.476 +	pcibus_info =
   7.477 +	    (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
   7.478 +	    pdi_pcibus_info;
   7.479 +	regval = pcireg_intr_status_get(pcibus_info);
   7.480 +
   7.481 +	if (!ia64_get_irr(irq_to_vector(irq))) {
   7.482 +		if (!test_bit(irq, pda->sn_in_service_ivecs)) {
   7.483 +			regval &= 0xff;
   7.484 +			if (sn_irq_info->irq_int_bit & regval &
   7.485 +			    sn_irq_info->irq_last_intr) {
   7.486 +				regval &= ~(sn_irq_info->irq_int_bit & regval);
   7.487 +				sn_call_force_intr_provider(sn_irq_info);
   7.488 +			}
   7.489 +		}
   7.490 +	}
   7.491 +	sn_irq_info->irq_last_intr = regval;
   7.492 +}
   7.493 +#endif
   7.494 +
   7.495 +void sn_lb_int_war_check(void)
   7.496 +{
   7.497 +#ifndef XEN
   7.498 +	struct sn_irq_info *sn_irq_info;
   7.499 +	int i;
   7.500 +
   7.501 +#ifdef XEN
   7.502 +	if (pda->sn_first_irq == 0)
   7.503 +#else
   7.504 +	if (!sn_ioif_inited || pda->sn_first_irq == 0)
   7.505 +#endif
   7.506 +		return;
   7.507 +
   7.508 +#ifdef XEN
   7.509 +	spin_lock(&sn_irq_info_lock);
   7.510 +#else
   7.511 +	rcu_read_lock();
   7.512 +#endif
   7.513 +	for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
   7.514 +#ifdef XEN
   7.515 +		list_for_each_entry(sn_irq_info, sn_irq_lh[i], list) {
   7.516 +#else
   7.517 +		list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
   7.518 +#endif
   7.519 +			sn_check_intr(i, sn_irq_info);
   7.520 +		}
   7.521 +	}
   7.522 +#ifdef XEN
   7.523 +	spin_unlock(&sn_irq_info_lock);
   7.524 +#else
   7.525 +	rcu_read_unlock();
   7.526 +#endif
   7.527 +#endif
   7.528 +}
   7.529 +
   7.530 +void __init sn_irq_lh_init(void)
   7.531 +{
   7.532 +	int i;
   7.533 +
   7.534 +	sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
   7.535 +	if (!sn_irq_lh)
   7.536 +		panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
   7.537 +
   7.538 +	for (i = 0; i < NR_IRQS; i++) {
   7.539 +		sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
   7.540 +		if (!sn_irq_lh[i])
   7.541 +			panic("SN PCI INIT: Failed IRQ memory allocation\n");
   7.542 +
   7.543 +		INIT_LIST_HEAD(sn_irq_lh[i]);
   7.544 +	}
   7.545 +}
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen/arch/ia64/linux-xen/sn/kernel/setup.c	Wed Dec 20 14:55:02 2006 -0700
     8.3 @@ -0,0 +1,808 @@
     8.4 +/*
     8.5 + * This file is subject to the terms and conditions of the GNU General Public
     8.6 + * License.  See the file "COPYING" in the main directory of this archive
     8.7 + * for more details.
     8.8 + *
     8.9 + * Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved.
    8.10 + */
    8.11 +
    8.12 +#include <linux/module.h>
    8.13 +#include <linux/init.h>
    8.14 +#include <linux/delay.h>
    8.15 +#include <linux/kernel.h>
    8.16 +#ifndef XEN
    8.17 +#include <linux/kdev_t.h>
    8.18 +#endif
    8.19 +#include <linux/string.h>
    8.20 +#ifndef XEN
    8.21 +#include <linux/screen_info.h>
    8.22 +#endif
    8.23 +#include <linux/console.h>
    8.24 +#include <linux/timex.h>
    8.25 +#include <linux/sched.h>
    8.26 +#include <linux/ioport.h>
    8.27 +#include <linux/mm.h>
    8.28 +#include <linux/serial.h>
    8.29 +#include <linux/irq.h>
    8.30 +#include <linux/bootmem.h>
    8.31 +#include <linux/mmzone.h>
    8.32 +#include <linux/interrupt.h>
    8.33 +#include <linux/acpi.h>
    8.34 +#include <linux/compiler.h>
    8.35 +#include <linux/sched.h>
    8.36 +#ifndef XEN
    8.37 +#include <linux/root_dev.h>
    8.38 +#endif
    8.39 +#include <linux/nodemask.h>
    8.40 +#include <linux/pm.h>
    8.41 +#include <linux/efi.h>
    8.42 +
    8.43 +#include <asm/io.h>
    8.44 +#include <asm/sal.h>
    8.45 +#include <asm/machvec.h>
    8.46 +#include <asm/system.h>
    8.47 +#include <asm/processor.h>
    8.48 +#ifndef XEN
    8.49 +#include <asm/vga.h>
    8.50 +#endif
    8.51 +#include <asm/sn/arch.h>
    8.52 +#include <asm/sn/addrs.h>
    8.53 +#include <asm/sn/pda.h>
    8.54 +#include <asm/sn/nodepda.h>
    8.55 +#include <asm/sn/sn_cpuid.h>
    8.56 +#include <asm/sn/simulator.h>
    8.57 +#include <asm/sn/leds.h>
    8.58 +#ifndef XEN
    8.59 +#include <asm/sn/bte.h>
    8.60 +#endif
    8.61 +#include <asm/sn/shub_mmr.h>
    8.62 +#ifndef XEN
    8.63 +#include <asm/sn/clksupport.h>
    8.64 +#endif
    8.65 +#include <asm/sn/sn_sal.h>
    8.66 +#include <asm/sn/geo.h>
    8.67 +#include <asm/sn/sn_feature_sets.h>
    8.68 +#ifndef XEN
    8.69 +#include "xtalk/xwidgetdev.h"
    8.70 +#include "xtalk/hubdev.h"
    8.71 +#else
    8.72 +#include "asm/sn/xwidgetdev.h"
    8.73 +#include "asm/sn/hubdev.h"
    8.74 +#endif
    8.75 +#include <asm/sn/klconfig.h>
    8.76 +#ifdef XEN
    8.77 +#include <asm/sn/shubio.h>
    8.78 +
    8.79 +/* Xen has no clue about NUMA ....  grrrr */
    8.80 +#define pxm_to_node(foo)		0
    8.81 +#define node_to_pxm(foo)		0
    8.82 +#define numa_node_id()			0
    8.83 +#endif
    8.84 +
    8.85 +
    8.86 +DEFINE_PER_CPU(struct pda_s, pda_percpu);
    8.87 +
    8.88 +#define MAX_PHYS_MEMORY		(1UL << IA64_MAX_PHYS_BITS)	/* Max physical address supported */
    8.89 +
    8.90 +extern void bte_init_node(nodepda_t *, cnodeid_t);
    8.91 +
    8.92 +extern void sn_timer_init(void);
    8.93 +extern unsigned long last_time_offset;
    8.94 +extern void (*ia64_mark_idle) (int);
    8.95 +extern void snidle(int);
    8.96 +extern unsigned long long (*ia64_printk_clock)(void);
    8.97 +
    8.98 +unsigned long sn_rtc_cycles_per_second;
    8.99 +EXPORT_SYMBOL(sn_rtc_cycles_per_second);
   8.100 +
   8.101 +DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
   8.102 +EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
   8.103 +
   8.104 +DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
   8.105 +EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
   8.106 +
   8.107 +DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
   8.108 +EXPORT_PER_CPU_SYMBOL(__sn_nodepda);
   8.109 +
   8.110 +char sn_system_serial_number_string[128];
   8.111 +EXPORT_SYMBOL(sn_system_serial_number_string);
   8.112 +u64 sn_partition_serial_number;
   8.113 +EXPORT_SYMBOL(sn_partition_serial_number);
   8.114 +u8 sn_partition_id;
   8.115 +EXPORT_SYMBOL(sn_partition_id);
   8.116 +u8 sn_system_size;
   8.117 +EXPORT_SYMBOL(sn_system_size);
   8.118 +u8 sn_sharing_domain_size;
   8.119 +EXPORT_SYMBOL(sn_sharing_domain_size);
   8.120 +u8 sn_coherency_id;
   8.121 +EXPORT_SYMBOL(sn_coherency_id);
   8.122 +u8 sn_region_size;
   8.123 +EXPORT_SYMBOL(sn_region_size);
   8.124 +int sn_prom_type;	/* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
   8.125 +
   8.126 +short physical_node_map[MAX_NUMALINK_NODES];
   8.127 +static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS];
   8.128 +
   8.129 +EXPORT_SYMBOL(physical_node_map);
   8.130 +
   8.131 +int num_cnodes;
   8.132 +
   8.133 +static void sn_init_pdas(char **);
   8.134 +static void build_cnode_tables(void);
   8.135 +
   8.136 +static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
   8.137 +
   8.138 +#ifndef XEN
   8.139 +/*
   8.140 + * The format of "screen_info" is strange, and due to early i386-setup
   8.141 + * code. This is just enough to make the console code think we're on a
   8.142 + * VGA color display.
   8.143 + */
   8.144 +struct screen_info sn_screen_info = {
   8.145 +	.orig_x = 0,
   8.146 +	.orig_y = 0,
   8.147 +	.orig_video_mode = 3,
   8.148 +	.orig_video_cols = 80,
   8.149 +	.orig_video_ega_bx = 3,
   8.150 +	.orig_video_lines = 25,
   8.151 +	.orig_video_isVGA = 1,
   8.152 +	.orig_video_points = 16
   8.153 +};
   8.154 +#endif
   8.155 +
   8.156 +/*
   8.157 + * This routine can only be used during init, since
   8.158 + * smp_boot_data is an init data structure.
   8.159 + * We have to use smp_boot_data.cpu_phys_id to find
   8.160 + * the physical id of the processor because the normal
   8.161 + * cpu_physical_id() relies on data structures that
   8.162 + * may not be initialized yet.
   8.163 + */
   8.164 +
   8.165 +static int __init pxm_to_nasid(int pxm)
   8.166 +{
   8.167 +	int i;
   8.168 +	int nid;
   8.169 +
   8.170 +	nid = pxm_to_node(pxm);
   8.171 +	for (i = 0; i < num_node_memblks; i++) {
   8.172 +		if (node_memblk[i].nid == nid) {
   8.173 +			return NASID_GET(node_memblk[i].start_paddr);
   8.174 +		}
   8.175 +	}
   8.176 +	return -1;
   8.177 +}
   8.178 +
   8.179 +/**
   8.180 + * early_sn_setup - early setup routine for SN platforms
   8.181 + *
   8.182 + * Sets up an initial console to aid debugging.  Intended primarily
   8.183 + * for bringup.  See start_kernel() in init/main.c.
   8.184 + */
   8.185 +
   8.186 +void __init early_sn_setup(void)
   8.187 +{
   8.188 +	efi_system_table_t *efi_systab;
   8.189 +	efi_config_table_t *config_tables;
   8.190 +	struct ia64_sal_systab *sal_systab;
   8.191 +	struct ia64_sal_desc_entry_point *ep;
   8.192 +	char *p;
   8.193 +	int i, j;
   8.194 +
   8.195 +	/*
   8.196 +	 * Parse enough of the SAL tables to locate the SAL entry point. Since, console
   8.197 +	 * IO on SN2 is done via SAL calls, early_printk won't work without this.
   8.198 +	 *
   8.199 +	 * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
   8.200 +	 * Any changes to those file may have to be made hereas well.
   8.201 +	 */
   8.202 +	efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
   8.203 +	config_tables = __va(efi_systab->tables);
   8.204 +	for (i = 0; i < efi_systab->nr_tables; i++) {
   8.205 +		if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) ==
   8.206 +		    0) {
   8.207 +			sal_systab = __va(config_tables[i].table);
   8.208 +			p = (char *)(sal_systab + 1);
   8.209 +			for (j = 0; j < sal_systab->entry_count; j++) {
   8.210 +				if (*p == SAL_DESC_ENTRY_POINT) {
   8.211 +					ep = (struct ia64_sal_desc_entry_point
   8.212 +					      *)p;
   8.213 +					ia64_sal_handler_init(__va
   8.214 +							      (ep->sal_proc),
   8.215 +							      __va(ep->gp));
   8.216 +					return;
   8.217 +				}
   8.218 +				p += SAL_DESC_SIZE(*p);
   8.219 +			}
   8.220 +		}
   8.221 +	}
   8.222 +	/* Uh-oh, SAL not available?? */
   8.223 +	printk(KERN_ERR "failed to find SAL entry point\n");
   8.224 +}
   8.225 +
   8.226 +extern int platform_intr_list[];
   8.227 +static int __initdata shub_1_1_found;
   8.228 +
   8.229 +/*
   8.230 + * sn_check_for_wars
   8.231 + *
   8.232 + * Set flag for enabling shub specific wars
   8.233 + */
   8.234 +
   8.235 +static inline int __init is_shub_1_1(int nasid)
   8.236 +{
   8.237 +	unsigned long id;
   8.238 +	int rev;
   8.239 +
   8.240 +	if (is_shub2())
   8.241 +		return 0;
   8.242 +	id = REMOTE_HUB_L(nasid, SH1_SHUB_ID);
   8.243 +	rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT;
   8.244 +	return rev <= 2;
   8.245 +}
   8.246 +
   8.247 +static void __init sn_check_for_wars(void)
   8.248 +{
   8.249 +	int cnode;
   8.250 +
   8.251 +	if (is_shub2()) {
   8.252 +		/* none yet */
   8.253 +	} else {
   8.254 +		for_each_online_node(cnode) {
   8.255 +			if (is_shub_1_1(cnodeid_to_nasid(cnode)))
   8.256 +				shub_1_1_found = 1;
   8.257 +		}
   8.258 +	}
   8.259 +}
   8.260 +
   8.261 +#ifndef XEN
   8.262 +/*
   8.263 + * Scan the EFI PCDP table (if it exists) for an acceptable VGA console
   8.264 + * output device.  If one exists, pick it and set sn_legacy_{io,mem} to
   8.265 + * reflect the bus offsets needed to address it.
   8.266 + *
   8.267 + * Since pcdp support in SN is not supported in the 2.4 kernel (or at least
   8.268 + * the one lbs is based on) just declare the needed structs here.
   8.269 + *
   8.270 + * Reference spec http://www.dig64.org/specifications/DIG64_PCDPv20.pdf
   8.271 + *
   8.272 + * Returns 0 if no acceptable vga is found, !0 otherwise.
   8.273 + *
   8.274 + * Note:  This stuff is duped here because Altix requires the PCDP to
   8.275 + * locate a usable VGA device due to lack of proper ACPI support.  Structures
   8.276 + * could be used from drivers/firmware/pcdp.h, but it was decided that moving
   8.277 + * this file to a more public location just for Altix use was undesireable.
   8.278 + */
   8.279 +
   8.280 +struct hcdp_uart_desc {
   8.281 +	u8	pad[45];
   8.282 +};
   8.283 +
   8.284 +struct pcdp {
   8.285 +	u8	signature[4];	/* should be 'HCDP' */
   8.286 +	u32	length;
   8.287 +	u8	rev;		/* should be >=3 for pcdp, <3 for hcdp */
   8.288 +	u8	sum;
   8.289 +	u8	oem_id[6];
   8.290 +	u64	oem_tableid;
   8.291 +	u32	oem_rev;
   8.292 +	u32	creator_id;
   8.293 +	u32	creator_rev;
   8.294 +	u32	num_type0;
   8.295 +	struct hcdp_uart_desc uart[0];	/* num_type0 of these */
   8.296 +	/* pcdp descriptors follow */
   8.297 +}  __attribute__((packed));
   8.298 +
   8.299 +struct pcdp_device_desc {
   8.300 +	u8	type;
   8.301 +	u8	primary;
   8.302 +	u16	length;
   8.303 +	u16	index;
   8.304 +	/* interconnect specific structure follows */
   8.305 +	/* device specific structure follows that */
   8.306 +}  __attribute__((packed));
   8.307 +
   8.308 +struct pcdp_interface_pci {
   8.309 +	u8	type;		/* 1 == pci */
   8.310 +	u8	reserved;
   8.311 +	u16	length;
   8.312 +	u8	segment;
   8.313 +	u8	bus;
   8.314 +	u8 	dev;
   8.315 +	u8	fun;
   8.316 +	u16	devid;
   8.317 +	u16	vendid;
   8.318 +	u32	acpi_interrupt;
   8.319 +	u64	mmio_tra;
   8.320 +	u64	ioport_tra;
   8.321 +	u8	flags;
   8.322 +	u8	translation;
   8.323 +}  __attribute__((packed));
   8.324 +
   8.325 +struct pcdp_vga_device {
   8.326 +	u8	num_eas_desc;
   8.327 +	/* ACPI Extended Address Space Desc follows */
   8.328 +}  __attribute__((packed));
   8.329 +
   8.330 +/* from pcdp_device_desc.primary */
   8.331 +#define PCDP_PRIMARY_CONSOLE	0x01
   8.332 +
   8.333 +/* from pcdp_device_desc.type */
   8.334 +#define PCDP_CONSOLE_INOUT	0x0
   8.335 +#define PCDP_CONSOLE_DEBUG	0x1
   8.336 +#define PCDP_CONSOLE_OUT	0x2
   8.337 +#define PCDP_CONSOLE_IN		0x3
   8.338 +#define PCDP_CONSOLE_TYPE_VGA	0x8
   8.339 +
   8.340 +#define PCDP_CONSOLE_VGA	(PCDP_CONSOLE_TYPE_VGA | PCDP_CONSOLE_OUT)
   8.341 +
   8.342 +/* from pcdp_interface_pci.type */
   8.343 +#define PCDP_IF_PCI		1
   8.344 +
   8.345 +/* from pcdp_interface_pci.translation */
   8.346 +#define PCDP_PCI_TRANS_IOPORT	0x02
   8.347 +#define PCDP_PCI_TRANS_MMIO	0x01
   8.348 +
   8.349 +#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
   8.350 +static void
   8.351 +sn_scan_pcdp(void)
   8.352 +{
   8.353 +	u8 *bp;
   8.354 +	struct pcdp *pcdp;
   8.355 +	struct pcdp_device_desc device;
   8.356 +	struct pcdp_interface_pci if_pci;
   8.357 +	extern struct efi efi;
   8.358 +
   8.359 +	if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
   8.360 +		return;		/* no hcdp/pcdp table */
   8.361 +
   8.362 +	pcdp = __va(efi.hcdp);
   8.363 +
   8.364 +	if (pcdp->rev < 3)
   8.365 +		return;		/* only support PCDP (rev >= 3) */
   8.366 +
   8.367 +	for (bp = (u8 *)&pcdp->uart[pcdp->num_type0];
   8.368 +	     bp < (u8 *)pcdp + pcdp->length;
   8.369 +	     bp += device.length) {
   8.370 +		memcpy(&device, bp, sizeof(device));
   8.371 +		if (! (device.primary & PCDP_PRIMARY_CONSOLE))
   8.372 +			continue;	/* not primary console */
   8.373 +
   8.374 +		if (device.type != PCDP_CONSOLE_VGA)
   8.375 +			continue;	/* not VGA descriptor */
   8.376 +
   8.377 +		memcpy(&if_pci, bp+sizeof(device), sizeof(if_pci));
   8.378 +		if (if_pci.type != PCDP_IF_PCI)
   8.379 +			continue;	/* not PCI interconnect */
   8.380 +
   8.381 +		if (if_pci.translation & PCDP_PCI_TRANS_IOPORT)
   8.382 +			vga_console_iobase =
   8.383 +				if_pci.ioport_tra | __IA64_UNCACHED_OFFSET;
   8.384 +
   8.385 +		if (if_pci.translation & PCDP_PCI_TRANS_MMIO)
   8.386 +			vga_console_membase =
   8.387 +				if_pci.mmio_tra | __IA64_UNCACHED_OFFSET;
   8.388 +
   8.389 +		break; /* once we find the primary, we're done */
   8.390 +	}
   8.391 +}
   8.392 +#endif
   8.393 +
   8.394 +static unsigned long sn2_rtc_initial;
   8.395 +
   8.396 +static unsigned long long ia64_sn2_printk_clock(void)
   8.397 +{
   8.398 +	unsigned long rtc_now = rtc_time();
   8.399 +
   8.400 +	return (rtc_now - sn2_rtc_initial) *
   8.401 +		(1000000000 / sn_rtc_cycles_per_second);
   8.402 +}
   8.403 +#endif
   8.404 +
   8.405 +/**
   8.406 + * sn_setup - SN platform setup routine
   8.407 + * @cmdline_p: kernel command line
   8.408 + *
   8.409 + * Handles platform setup for SN machines.  This includes determining
   8.410 + * the RTC frequency (via a SAL call), initializing secondary CPUs, and
   8.411 + * setting up per-node data areas.  The console is also initialized here.
   8.412 + */
   8.413 +#ifdef XEN
   8.414 +void __cpuinit sn_cpu_init(void);
   8.415 +#endif
   8.416 +
   8.417 +void __init sn_setup(char **cmdline_p)
   8.418 +{
   8.419 +#ifndef XEN
   8.420 +	long status, ticks_per_sec, drift;
   8.421 +#else
   8.422 +	unsigned long status, ticks_per_sec, drift;
   8.423 +#endif
   8.424 +	u32 version = sn_sal_rev();
   8.425 +#ifndef XEN
   8.426 +	extern void sn_cpu_init(void);
   8.427 +
   8.428 +	sn2_rtc_initial = rtc_time();
   8.429 +	ia64_sn_plat_set_error_handling_features();	// obsolete
   8.430 +	ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
   8.431 +	ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
   8.432 +
   8.433 +
   8.434 +#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
   8.435 +	/*
   8.436 +	 * Handle SN vga console.
   8.437 +	 *
   8.438 +	 * SN systems do not have enough ACPI table information
   8.439 +	 * being passed from prom to identify VGA adapters and the legacy
   8.440 +	 * addresses to access them.  Until that is done, SN systems rely
   8.441 +	 * on the PCDP table to identify the primary VGA console if one
   8.442 +	 * exists.
   8.443 +	 *
   8.444 +	 * However, kernel PCDP support is optional, and even if it is built
   8.445 +	 * into the kernel, it will not be used if the boot cmdline contains
   8.446 +	 * console= directives.
   8.447 +	 *
   8.448 +	 * So, to work around this mess, we duplicate some of the PCDP code
   8.449 +	 * here so that the primary VGA console (as defined by PCDP) will
   8.450 +	 * work on SN systems even if a different console (e.g. serial) is
   8.451 +	 * selected on the boot line (or CONFIG_EFI_PCDP is off).
   8.452 +	 */
   8.453 +
   8.454 +	if (! vga_console_membase)
   8.455 +		sn_scan_pcdp();
   8.456 +
   8.457 +	if (vga_console_membase) {
   8.458 +		/* usable vga ... make tty0 the preferred default console */
   8.459 +		if (!strstr(*cmdline_p, "console="))
   8.460 +			add_preferred_console("tty", 0, NULL);
   8.461 +	} else {
   8.462 +		printk(KERN_DEBUG "SGI: Disabling VGA console\n");
   8.463 +		if (!strstr(*cmdline_p, "console="))
   8.464 +			add_preferred_console("ttySG", 0, NULL);
   8.465 +#ifdef CONFIG_DUMMY_CONSOLE
   8.466 +		conswitchp = &dummy_con;
   8.467 +#else
   8.468 +		conswitchp = NULL;
   8.469 +#endif				/* CONFIG_DUMMY_CONSOLE */
   8.470 +	}
   8.471 +#endif				/* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
   8.472 +
   8.473 +	MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
   8.474 +#endif
   8.475 +
   8.476 +	/*
   8.477 +	 * Build the tables for managing cnodes.
   8.478 +	 */
   8.479 +	build_cnode_tables();
   8.480 +
   8.481 +	status =
   8.482 +	    ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
   8.483 +			       &drift);
   8.484 +	if (status != 0 || ticks_per_sec < 100000) {
   8.485 +		printk(KERN_WARNING
   8.486 +		       "unable to determine platform RTC clock frequency, guessing.\n");
   8.487 +		/* PROM gives wrong value for clock freq. so guess */
   8.488 +		sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
   8.489 +	} else
   8.490 +		sn_rtc_cycles_per_second = ticks_per_sec;
   8.491 +#ifndef XEN
   8.492 +
   8.493 +	platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
   8.494 +
   8.495 +	ia64_printk_clock = ia64_sn2_printk_clock;
   8.496 +#endif
   8.497 +
   8.498 +	printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
   8.499 +
   8.500 +	/*
   8.501 +	 * we set the default root device to /dev/hda
   8.502 +	 * to make simulation easy
   8.503 +	 */
   8.504 +#ifndef XEN
   8.505 +	ROOT_DEV = Root_HDA1;
   8.506 +#endif
   8.507 +
   8.508 +	/*
   8.509 +	 * Create the PDAs and NODEPDAs for all the cpus.
   8.510 +	 */
   8.511 +	sn_init_pdas(cmdline_p);
   8.512 +
   8.513 +#ifndef XEN
   8.514 +	ia64_mark_idle = &snidle;
   8.515 +#endif
   8.516 +
   8.517 +	/*
   8.518 +	 * For the bootcpu, we do this here. All other cpus will make the
   8.519 +	 * call as part of cpu_init in slave cpu initialization.
   8.520 +	 */
   8.521 +	sn_cpu_init();
   8.522 +
   8.523 +#ifndef XEN
   8.524 +#ifdef CONFIG_SMP
   8.525 +	init_smp_config();
   8.526 +#endif
   8.527 +	screen_info = sn_screen_info;
   8.528 +
   8.529 +	sn_timer_init();
   8.530 +
   8.531 +	/*
   8.532 +	 * set pm_power_off to a SAL call to allow
   8.533 +	 * sn machines to power off. The SAL call can be replaced
   8.534 +	 * by an ACPI interface call when ACPI is fully implemented
   8.535 +	 * for sn.
   8.536 +	 */
   8.537 +	pm_power_off = ia64_sn_power_down;
   8.538 +	current->thread.flags |= IA64_THREAD_MIGRATION;
   8.539 +#endif
   8.540 +}
   8.541 +
   8.542 +/**
   8.543 + * sn_init_pdas - setup node data areas
   8.544 + *
   8.545 + * One time setup for Node Data Area.  Called by sn_setup().
   8.546 + */
   8.547 +static void __init sn_init_pdas(char **cmdline_p)
   8.548 +{
   8.549 +	cnodeid_t cnode;
   8.550 +
   8.551 +	/*
   8.552 +	 * Allocate & initalize the nodepda for each node.
   8.553 +	 */
   8.554 +	for_each_online_node(cnode) {
   8.555 +		nodepdaindr[cnode] =
   8.556 +		    alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
   8.557 +		memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
   8.558 +		memset(nodepdaindr[cnode]->phys_cpuid, -1,
   8.559 +		    sizeof(nodepdaindr[cnode]->phys_cpuid));
   8.560 +		spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
   8.561 +	}
   8.562 +
   8.563 +	/*
   8.564 +	 * Allocate & initialize nodepda for TIOs.  For now, put them on node 0.
   8.565 +	 */
   8.566 +	for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
   8.567 +		nodepdaindr[cnode] =
   8.568 +		    alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
   8.569 +		memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
   8.570 +	}
   8.571 +
   8.572 +	/*
   8.573 +	 * Now copy the array of nodepda pointers to each nodepda.
   8.574 +	 */
   8.575 +	for (cnode = 0; cnode < num_cnodes; cnode++)
   8.576 +		memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
   8.577 +		       sizeof(nodepdaindr));
   8.578 +
   8.579 +#ifndef XEN
   8.580 +	/*
   8.581 +	 * Set up IO related platform-dependent nodepda fields.
   8.582 +	 * The following routine actually sets up the hubinfo struct
   8.583 +	 * in nodepda.
   8.584 +	 */
   8.585 +	for_each_online_node(cnode) {
   8.586 +		bte_init_node(nodepdaindr[cnode], cnode);
   8.587 +	}
   8.588 +
   8.589 +	/*
   8.590 +	 * Initialize the per node hubdev.  This includes IO Nodes and
   8.591 +	 * headless/memless nodes.
   8.592 +	 */
   8.593 +	for (cnode = 0; cnode < num_cnodes; cnode++) {
   8.594 +		hubdev_init_node(nodepdaindr[cnode], cnode);
   8.595 +	}
   8.596 +#endif
   8.597 +}
   8.598 +
   8.599 +/**
   8.600 + * sn_cpu_init - initialize per-cpu data areas
   8.601 + * @cpuid: cpuid of the caller
   8.602 + *
   8.603 + * Called during cpu initialization on each cpu as it starts.
   8.604 + * Currently, initializes the per-cpu data area for SNIA.
   8.605 + * Also sets up a few fields in the nodepda.  Also known as
   8.606 + * platform_cpu_init() by the ia64 machvec code.
   8.607 + */
   8.608 +void __cpuinit sn_cpu_init(void)
   8.609 +{
   8.610 +	int cpuid;
   8.611 +	int cpuphyid;
   8.612 +	int nasid;
   8.613 +	int subnode;
   8.614 +	int slice;
   8.615 +	int cnode;
   8.616 +	int i;
   8.617 +	static int wars_have_been_checked;
   8.618 +
   8.619 +	cpuid = smp_processor_id();
   8.620 +#ifndef XEN
   8.621 +	if (cpuid == 0 && IS_MEDUSA()) {
   8.622 +		if (ia64_sn_is_fake_prom())
   8.623 +			sn_prom_type = 2;
   8.624 +		else
   8.625 +			sn_prom_type = 1;
   8.626 +		printk(KERN_INFO "Running on medusa with %s PROM\n",
   8.627 +		       (sn_prom_type == 1) ? "real" : "fake");
   8.628 +	}
   8.629 +#endif
   8.630 +
   8.631 +	memset(pda, 0, sizeof(pda));
   8.632 +	if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
   8.633 +				&sn_hub_info->nasid_bitmask,
   8.634 +				&sn_hub_info->nasid_shift,
   8.635 +				&sn_system_size, &sn_sharing_domain_size,
   8.636 +				&sn_partition_id, &sn_coherency_id,
   8.637 +				&sn_region_size))
   8.638 +		BUG();
   8.639 +	sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
   8.640 +
   8.641 +	/*
   8.642 +	 * Don't check status. The SAL call is not supported on all PROMs
   8.643 +	 * but a failure is harmless.
   8.644 +	 */
   8.645 +	(void) ia64_sn_set_cpu_number(cpuid);
   8.646 +
   8.647 +	/*
   8.648 +	 * The boot cpu makes this call again after platform initialization is
   8.649 +	 * complete.
   8.650 +	 */
   8.651 +	if (nodepdaindr[0] == NULL)
   8.652 +		return;
   8.653 +
   8.654 +	for (i = 0; i < MAX_PROM_FEATURE_SETS; i++)
   8.655 +		if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
   8.656 +			break;
   8.657 +
   8.658 +	cpuphyid = get_sapicid();
   8.659 +
   8.660 +	if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
   8.661 +		BUG();
   8.662 +
   8.663 +	for (i=0; i < MAX_NUMNODES; i++) {
   8.664 +		if (nodepdaindr[i]) {
   8.665 +			nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
   8.666 +			nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
   8.667 +			nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
   8.668 +		}
   8.669 +	}
   8.670 +
   8.671 +	cnode = nasid_to_cnodeid(nasid);
   8.672 +
   8.673 +	sn_nodepda = nodepdaindr[cnode];
   8.674 +
   8.675 +	pda->led_address =
   8.676 +	    (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
   8.677 +	pda->led_state = LED_ALWAYS_SET;
   8.678 +	pda->hb_count = HZ / 2;
   8.679 +	pda->hb_state = 0;
   8.680 +	pda->idle_flag = 0;
   8.681 +
   8.682 +	if (cpuid != 0) {
   8.683 +		/* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
   8.684 +		memcpy(sn_cnodeid_to_nasid,
   8.685 +		       (&per_cpu(__sn_cnodeid_to_nasid, 0)),
   8.686 +		       sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
   8.687 +	}
   8.688 +
   8.689 +	/*
   8.690 +	 * Check for WARs.
   8.691 +	 * Only needs to be done once, on BSP.
   8.692 +	 * Has to be done after loop above, because it uses this cpu's
   8.693 +	 * sn_cnodeid_to_nasid table which was just initialized if this
   8.694 +	 * isn't cpu 0.
   8.695 +	 * Has to be done before assignment below.
   8.696 +	 */
   8.697 +	if (!wars_have_been_checked) {
   8.698 +		sn_check_for_wars();
   8.699 +		wars_have_been_checked = 1;
   8.700 +	}
   8.701 +	sn_hub_info->shub_1_1_found = shub_1_1_found;
   8.702 +
   8.703 +	/*
   8.704 +	 * Set up addresses of PIO/MEM write status registers.
   8.705 +	 */
   8.706 +	{
   8.707 +		u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
   8.708 +		u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2,
   8.709 +			SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3};
   8.710 +		u64 *pio;
   8.711 +		pio = is_shub1() ? pio1 : pio2;
   8.712 +		pda->pio_write_status_addr =
   8.713 +		   (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]);
   8.714 +		pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
   8.715 +	}
   8.716 +
   8.717 +#ifndef XEN  /* local_node_data is not allocated .... yet */
   8.718 +	/*
   8.719 +	 * WAR addresses for SHUB 1.x.
   8.720 +	 */
   8.721 +	if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
   8.722 +		int buddy_nasid;
   8.723 +		buddy_nasid =
   8.724 +		    cnodeid_to_nasid(numa_node_id() ==
   8.725 +				     num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
   8.726 +		pda->pio_shub_war_cam_addr =
   8.727 +		    (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
   8.728 +							      SH1_PI_CAM_CONTROL);
   8.729 +	}
   8.730 +#endif
   8.731 +}
   8.732 +
   8.733 +/*
   8.734 + * Build tables for converting between NASIDs and cnodes.
   8.735 + */
   8.736 +static inline int __init board_needs_cnode(int type)
   8.737 +{
   8.738 +	return (type == KLTYPE_SNIA || type == KLTYPE_TIO);
   8.739 +}
   8.740 +
   8.741 +void __init build_cnode_tables(void)
   8.742 +{
   8.743 +	int nasid;
   8.744 +	int node;
   8.745 +	lboard_t *brd;
   8.746 +
   8.747 +	memset(physical_node_map, -1, sizeof(physical_node_map));
   8.748 +	memset(sn_cnodeid_to_nasid, -1,
   8.749 +			sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
   8.750 +
   8.751 +	/*
   8.752 +	 * First populate the tables with C/M bricks. This ensures that
   8.753 +	 * cnode == node for all C & M bricks.
   8.754 +	 */
   8.755 +	for_each_online_node(node) {
   8.756 +		nasid = pxm_to_nasid(node_to_pxm(node));
   8.757 +		sn_cnodeid_to_nasid[node] = nasid;
   8.758 +		physical_node_map[nasid] = node;
   8.759 +	}
   8.760 +
   8.761 +	/*
   8.762 +	 * num_cnodes is total number of C/M/TIO bricks. Because of the 256 node
   8.763 +	 * limit on the number of nodes, we can't use the generic node numbers 
   8.764 +	 * for this. Note that num_cnodes is incremented below as TIOs or
   8.765 +	 * headless/memoryless nodes are discovered.
   8.766 +	 */
   8.767 +	num_cnodes = num_online_nodes();
   8.768 +
   8.769 +	/* fakeprom does not support klgraph */
   8.770 +	if (IS_RUNNING_ON_FAKE_PROM())
   8.771 +		return;
   8.772 +
   8.773 +	/* Find TIOs & headless/memoryless nodes and add them to the tables */
   8.774 +	for_each_online_node(node) {
   8.775 +		kl_config_hdr_t *klgraph_header;
   8.776 +		nasid = cnodeid_to_nasid(node);
   8.777 +		klgraph_header = ia64_sn_get_klconfig_addr(nasid);
   8.778 +		if (klgraph_header == NULL)
   8.779 +			BUG();
   8.780 +		brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
   8.781 +		while (brd) {
   8.782 +			if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) {
   8.783 +				sn_cnodeid_to_nasid[num_cnodes] = brd->brd_nasid;
   8.784 +				physical_node_map[brd->brd_nasid] = num_cnodes++;
   8.785 +			}
   8.786 +			brd = find_lboard_next(brd);
   8.787 +		}
   8.788 +	}
   8.789 +}
   8.790 +
   8.791 +int
   8.792 +nasid_slice_to_cpuid(int nasid, int slice)
   8.793 +{
   8.794 +	long cpu;
   8.795 +
   8.796 +	for (cpu = 0; cpu < NR_CPUS; cpu++)
   8.797 +		if (cpuid_to_nasid(cpu) == nasid &&
   8.798 +					cpuid_to_slice(cpu) == slice)
   8.799 +			return cpu;
   8.800 +
   8.801 +	return -1;
   8.802 +}
   8.803 +
   8.804 +int sn_prom_feature_available(int id)
   8.805 +{
   8.806 +	if (id >= BITS_PER_LONG * MAX_PROM_FEATURE_SETS)
   8.807 +		return 0;
   8.808 +	return test_bit(id, sn_prom_features);
   8.809 +}
   8.810 +EXPORT_SYMBOL(sn_prom_feature_available);
   8.811 +
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c	Wed Dec 20 14:55:02 2006 -0700
     9.3 @@ -0,0 +1,548 @@
     9.4 +/*
     9.5 + * SN2 Platform specific SMP Support
     9.6 + *
     9.7 + * This file is subject to the terms and conditions of the GNU General Public
     9.8 + * License.  See the file "COPYING" in the main directory of this archive
     9.9 + * for more details.
    9.10 + *
    9.11 + * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved.
    9.12 + */
    9.13 +
    9.14 +#include <linux/init.h>
    9.15 +#include <linux/kernel.h>
    9.16 +#include <linux/spinlock.h>
    9.17 +#include <linux/threads.h>
    9.18 +#include <linux/sched.h>
    9.19 +#include <linux/smp.h>
    9.20 +#include <linux/interrupt.h>
    9.21 +#include <linux/irq.h>
    9.22 +#include <linux/mmzone.h>
    9.23 +#include <linux/module.h>
    9.24 +#include <linux/bitops.h>
    9.25 +#include <linux/nodemask.h>
    9.26 +#include <linux/proc_fs.h>
    9.27 +#include <linux/seq_file.h>
    9.28 +
    9.29 +#include <asm/processor.h>
    9.30 +#include <asm/irq.h>
    9.31 +#include <asm/sal.h>
    9.32 +#include <asm/system.h>
    9.33 +#include <asm/delay.h>
    9.34 +#include <asm/io.h>
    9.35 +#include <asm/smp.h>
    9.36 +#include <asm/tlb.h>
    9.37 +#include <asm/numa.h>
    9.38 +#include <asm/hw_irq.h>
    9.39 +#include <asm/current.h>
    9.40 +#ifdef XEN
    9.41 +#include <asm/sn/arch.h>
    9.42 +#endif
    9.43 +#include <asm/sn/sn_cpuid.h>
    9.44 +#include <asm/sn/sn_sal.h>
    9.45 +#include <asm/sn/addrs.h>
    9.46 +#include <asm/sn/shub_mmr.h>
    9.47 +#include <asm/sn/nodepda.h>
    9.48 +#include <asm/sn/rw_mmr.h>
    9.49 +
    9.50 +DEFINE_PER_CPU(struct ptc_stats, ptcstats);
    9.51 +DECLARE_PER_CPU(struct ptc_stats, ptcstats);
    9.52 +
    9.53 +static  __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
    9.54 +
    9.55 +extern unsigned long
    9.56 +sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
    9.57 +			       volatile unsigned long *, unsigned long,
    9.58 +			       volatile unsigned long *, unsigned long);
    9.59 +void
    9.60 +sn2_ptc_deadlock_recovery(short *, short, short, int,
    9.61 +			  volatile unsigned long *, unsigned long,
    9.62 +			  volatile unsigned long *, unsigned long);
    9.63 +
    9.64 +/*
    9.65 + * Note: some is the following is captured here to make degugging easier
    9.66 + * (the macros make more sense if you see the debug patch - not posted)
    9.67 + */
    9.68 +#define sn2_ptctest	0
    9.69 +#define local_node_uses_ptc_ga(sh1)	((sh1) ? 1 : 0)
    9.70 +#define max_active_pio(sh1)		((sh1) ? 32 : 7)
    9.71 +#define reset_max_active_on_deadlock()	1
    9.72 +#ifndef XEN
    9.73 +#define PTC_LOCK(sh1)			((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
    9.74 +#else
    9.75 +#define PTC_LOCK(sh1)			&sn2_global_ptc_lock
    9.76 +#endif
    9.77 +
    9.78 +struct ptc_stats {
    9.79 +	unsigned long ptc_l;
    9.80 +	unsigned long change_rid;
    9.81 +	unsigned long shub_ptc_flushes;
    9.82 +	unsigned long nodes_flushed;
    9.83 +	unsigned long deadlocks;
    9.84 +	unsigned long deadlocks2;
    9.85 +	unsigned long lock_itc_clocks;
    9.86 +	unsigned long shub_itc_clocks;
    9.87 +	unsigned long shub_itc_clocks_max;
    9.88 +	unsigned long shub_ptc_flushes_not_my_mm;
    9.89 +};
    9.90 +
    9.91 +#define sn2_ptctest	0
    9.92 +
    9.93 +static inline unsigned long wait_piowc(void)
    9.94 +{
    9.95 +	volatile unsigned long *piows;
    9.96 +	unsigned long zeroval, ws;
    9.97 +
    9.98 +	piows = pda->pio_write_status_addr;
    9.99 +	zeroval = pda->pio_write_status_val;
   9.100 +	do {
   9.101 +		cpu_relax();
   9.102 +	} while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
   9.103 +	return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
   9.104 +}
   9.105 +
   9.106 +#ifndef XEN  /* No idea if Xen will ever support this */
   9.107 +/**
   9.108 + * sn_migrate - SN-specific task migration actions
   9.109 + * @task: Task being migrated to new CPU
   9.110 + *
   9.111 + * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
   9.112 + * Context switching user threads which have memory-mapped MMIO may cause
   9.113 + * PIOs to issue from seperate CPUs, thus the PIO writes must be drained
   9.114 + * from the previous CPU's Shub before execution resumes on the new CPU.
   9.115 + */
   9.116 +void sn_migrate(struct task_struct *task)
   9.117 +{
   9.118 +	pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu);
   9.119 +	volatile unsigned long *adr = last_pda->pio_write_status_addr;
   9.120 +	unsigned long val = last_pda->pio_write_status_val;
   9.121 +
   9.122 +	/* Drain PIO writes from old CPU's Shub */
   9.123 +	while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK)
   9.124 +			!= val))
   9.125 +		cpu_relax();
   9.126 +}
   9.127 +
   9.128 +void sn_tlb_migrate_finish(struct mm_struct *mm)
   9.129 +{
   9.130 +	/* flush_tlb_mm is inefficient if more than 1 users of mm */
   9.131 +#ifndef XEN
   9.132 +	if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
   9.133 +#else
   9.134 +	if (mm == &current->arch.mm && mm && atomic_read(&mm->mm_users) == 1)
   9.135 +#endif
   9.136 +		flush_tlb_mm(mm);
   9.137 +}
   9.138 +#endif
   9.139 +
   9.140 +/**
   9.141 + * sn2_global_tlb_purge - globally purge translation cache of virtual address range
   9.142 + * @mm: mm_struct containing virtual address range
   9.143 + * @start: start of virtual address range
   9.144 + * @end: end of virtual address range
   9.145 + * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
   9.146 + *
   9.147 + * Purges the translation caches of all processors of the given virtual address
   9.148 + * range.
   9.149 + *
   9.150 + * Note:
   9.151 + * 	- cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
   9.152 + * 	- cpu_vm_mask is converted into a nodemask of the nodes containing the
   9.153 + * 	  cpus in cpu_vm_mask.
   9.154 + *	- if only one bit is set in cpu_vm_mask & it is the current cpu & the
   9.155 + *	  process is purging its own virtual address range, then only the
   9.156 + *	  local TLB needs to be flushed. This flushing can be done using
   9.157 + *	  ptc.l. This is the common case & avoids the global spinlock.
   9.158 + *	- if multiple cpus have loaded the context, then flushing has to be
   9.159 + *	  done with ptc.g/MMRs under protection of the global ptc_lock.
   9.160 + */
   9.161 +
   9.162 +#ifdef XEN  /* Xen is soooooooo stupid! */
   9.163 +// static cpumask_t mask_all = CPU_MASK_ALL;
   9.164 +#endif
   9.165 +
   9.166 +void
   9.167 +#ifndef XEN
   9.168 +sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
   9.169 +#else
   9.170 +sn2_global_tlb_purge(unsigned long start,
   9.171 +#endif
   9.172 +		     unsigned long end, unsigned long nbits)
   9.173 +{
   9.174 +	int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
   9.175 +#ifndef XEN
   9.176 +	int mymm = (mm == current->active_mm && mm == current->mm);
   9.177 +#else
   9.178 +	// struct mm_struct *mm;
   9.179 +	int mymm = 0;
   9.180 +#endif
   9.181 +	int use_cpu_ptcga;
   9.182 +	volatile unsigned long *ptc0, *ptc1;
   9.183 +	unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
   9.184 +	short nasids[MAX_NUMNODES], nix;
   9.185 +	nodemask_t nodes_flushed;
   9.186 +	int active, max_active, deadlock;
   9.187 +
   9.188 +	nodes_clear(nodes_flushed);
   9.189 +	i = 0;
   9.190 +
   9.191 +#ifndef XEN  /* One day Xen will grow up! */
   9.192 +	for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
   9.193 +		cnode = cpu_to_node(cpu);
   9.194 +		node_set(cnode, nodes_flushed);
   9.195 +		lcpu = cpu;
   9.196 +		i++;
   9.197 +	}
   9.198 +#else
   9.199 +	for_each_cpu(cpu) {
   9.200 +		cnode = cpu_to_node(cpu);
   9.201 +		node_set(cnode, nodes_flushed);
   9.202 +		lcpu = cpu;
   9.203 +		i++;
   9.204 +	}
   9.205 +#endif
   9.206 +
   9.207 +	if (i == 0)
   9.208 +		return;
   9.209 +
   9.210 +	preempt_disable();
   9.211 +
   9.212 +	if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) {
   9.213 +		do {
   9.214 +			ia64_ptcl(start, nbits << 2);
   9.215 +			start += (1UL << nbits);
   9.216 +		} while (start < end);
   9.217 +		ia64_srlz_i();
   9.218 +		__get_cpu_var(ptcstats).ptc_l++;
   9.219 +		preempt_enable();
   9.220 +		return;
   9.221 +	}
   9.222 +
   9.223 +#ifndef XEN
   9.224 +	if (atomic_read(&mm->mm_users) == 1 && mymm) {
   9.225 +#ifndef XEN  /* I hate Xen! */
   9.226 +		flush_tlb_mm(mm);
   9.227 +#else
   9.228 +		flush_tlb_mask(mask_all);
   9.229 +#endif
   9.230 +		__get_cpu_var(ptcstats).change_rid++;
   9.231 +		preempt_enable();
   9.232 +		return;
   9.233 +	}
   9.234 +#endif
   9.235 +
   9.236 +	itc = ia64_get_itc();
   9.237 +	nix = 0;
   9.238 +	for_each_node_mask(cnode, nodes_flushed)
   9.239 +		nasids[nix++] = cnodeid_to_nasid(cnode);
   9.240 +
   9.241 +#ifndef XEN
   9.242 +	rr_value = (mm->context << 3) | REGION_NUMBER(start);
   9.243 +#else
   9.244 +	rr_value = REGION_NUMBER(start);
   9.245 +#endif
   9.246 +
   9.247 +	shub1 = is_shub1();
   9.248 +	if (shub1) {
   9.249 +		data0 = (1UL << SH1_PTC_0_A_SHFT) |
   9.250 +		    	(nbits << SH1_PTC_0_PS_SHFT) |
   9.251 +			(rr_value << SH1_PTC_0_RID_SHFT) |
   9.252 +		    	(1UL << SH1_PTC_0_START_SHFT);
   9.253 +#ifndef XEN
   9.254 +		ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
   9.255 +		ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
   9.256 +#else
   9.257 +		ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
   9.258 +		ptc1 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
   9.259 +#endif
   9.260 +	} else {
   9.261 +		data0 = (1UL << SH2_PTC_A_SHFT) |
   9.262 +			(nbits << SH2_PTC_PS_SHFT) |
   9.263 +		    	(1UL << SH2_PTC_START_SHFT);
   9.264 +#ifndef XEN
   9.265 +		ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + 
   9.266 +#else
   9.267 +		ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + 
   9.268 +#endif
   9.269 +			(rr_value << SH2_PTC_RID_SHFT));
   9.270 +		ptc1 = NULL;
   9.271 +	}
   9.272 +	
   9.273 +
   9.274 +	mynasid = get_nasid();
   9.275 +	use_cpu_ptcga = local_node_uses_ptc_ga(shub1);
   9.276 +	max_active = max_active_pio(shub1);
   9.277 +
   9.278 +	itc = ia64_get_itc();
   9.279 +	spin_lock_irqsave(PTC_LOCK(shub1), flags);
   9.280 +	itc2 = ia64_get_itc();
   9.281 +
   9.282 +	__get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
   9.283 +	__get_cpu_var(ptcstats).shub_ptc_flushes++;
   9.284 +	__get_cpu_var(ptcstats).nodes_flushed += nix;
   9.285 +	if (!mymm)
   9.286 +		 __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++;
   9.287 +
   9.288 +	if (use_cpu_ptcga && !mymm) {
   9.289 +		old_rr = ia64_get_rr(start);
   9.290 +		ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8));
   9.291 +		ia64_srlz_d();
   9.292 +	}
   9.293 +
   9.294 +	wait_piowc();
   9.295 +	do {
   9.296 +		if (shub1)
   9.297 +			data1 = start | (1UL << SH1_PTC_1_START_SHFT);
   9.298 +		else
   9.299 +			data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
   9.300 +		deadlock = 0;
   9.301 +		active = 0;
   9.302 +		for (ibegin = 0, i = 0; i < nix; i++) {
   9.303 +			nasid = nasids[i];
   9.304 +			if (use_cpu_ptcga && unlikely(nasid == mynasid)) {
   9.305 +				ia64_ptcga(start, nbits << 2);
   9.306 +				ia64_srlz_i();
   9.307 +			} else {
   9.308 +				ptc0 = CHANGE_NASID(nasid, ptc0);
   9.309 +				if (ptc1)
   9.310 +					ptc1 = CHANGE_NASID(nasid, ptc1);
   9.311 +				pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
   9.312 +				active++;
   9.313 +			}
   9.314 +			if (active >= max_active || i == (nix - 1)) {
   9.315 +				if ((deadlock = wait_piowc())) {
   9.316 +					sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
   9.317 +					if (reset_max_active_on_deadlock())
   9.318 +						max_active = 1;
   9.319 +				}
   9.320 +				active = 0;
   9.321 +				ibegin = i + 1;
   9.322 +			}
   9.323 +		}
   9.324 +		start += (1UL << nbits);
   9.325 +	} while (start < end);
   9.326 +
   9.327 +	itc2 = ia64_get_itc() - itc2;
   9.328 +	__get_cpu_var(ptcstats).shub_itc_clocks += itc2;
   9.329 +	if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
   9.330 +		__get_cpu_var(ptcstats).shub_itc_clocks_max = itc2;
   9.331 +
   9.332 +	if (old_rr) {
   9.333 +		ia64_set_rr(start, old_rr);
   9.334 +		ia64_srlz_d();
   9.335 +	}
   9.336 +
   9.337 +	spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
   9.338 +
   9.339 +	preempt_enable();
   9.340 +}
   9.341 +
   9.342 +/*
   9.343 + * sn2_ptc_deadlock_recovery
   9.344 + *
   9.345 + * Recover from PTC deadlocks conditions. Recovery requires stepping thru each 
   9.346 + * TLB flush transaction.  The recovery sequence is somewhat tricky & is
   9.347 + * coded in assembly language.
   9.348 + */
   9.349 +
   9.350 +void
   9.351 +sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
   9.352 +			  volatile unsigned long *ptc0, unsigned long data0,
   9.353 +			  volatile unsigned long *ptc1, unsigned long data1)
   9.354 +{
   9.355 +	short nasid, i;
   9.356 +	unsigned long *piows, zeroval, n;
   9.357 +
   9.358 +	__get_cpu_var(ptcstats).deadlocks++;
   9.359 +
   9.360 +	piows = (unsigned long *) pda->pio_write_status_addr;
   9.361 +	zeroval = pda->pio_write_status_val;
   9.362 +
   9.363 +
   9.364 +	for (i=ib; i <= ie; i++) {
   9.365 +		nasid = nasids[i];
   9.366 +		if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid)
   9.367 +			continue;
   9.368 +		ptc0 = CHANGE_NASID(nasid, ptc0);
   9.369 +		if (ptc1)
   9.370 +			ptc1 = CHANGE_NASID(nasid, ptc1);
   9.371 +
   9.372 +		n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
   9.373 +		__get_cpu_var(ptcstats).deadlocks2 += n;
   9.374 +	}
   9.375 +
   9.376 +}
   9.377 +
   9.378 +/**
   9.379 + * sn_send_IPI_phys - send an IPI to a Nasid and slice
   9.380 + * @nasid: nasid to receive the interrupt (may be outside partition)
   9.381 + * @physid: physical cpuid to receive the interrupt.
   9.382 + * @vector: command to send
   9.383 + * @delivery_mode: delivery mechanism
   9.384 + *
   9.385 + * Sends an IPI (interprocessor interrupt) to the processor specified by
   9.386 + * @physid
   9.387 + *
   9.388 + * @delivery_mode can be one of the following
   9.389 + *
   9.390 + * %IA64_IPI_DM_INT - pend an interrupt
   9.391 + * %IA64_IPI_DM_PMI - pend a PMI
   9.392 + * %IA64_IPI_DM_NMI - pend an NMI
   9.393 + * %IA64_IPI_DM_INIT - pend an INIT interrupt
   9.394 + */
   9.395 +void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode)
   9.396 +{
   9.397 +	long val;
   9.398 +	unsigned long flags = 0;
   9.399 +	volatile long *p;
   9.400 +
   9.401 +	p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
   9.402 +	val = (1UL << SH_IPI_INT_SEND_SHFT) |
   9.403 +	    (physid << SH_IPI_INT_PID_SHFT) |
   9.404 +	    ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
   9.405 +	    ((long)vector << SH_IPI_INT_IDX_SHFT) |
   9.406 +	    (0x000feeUL << SH_IPI_INT_BASE_SHFT);
   9.407 +
   9.408 +	mb();
   9.409 +	if (enable_shub_wars_1_1()) {
   9.410 +		spin_lock_irqsave(&sn2_global_ptc_lock, flags);
   9.411 +	}
   9.412 +	pio_phys_write_mmr(p, val);
   9.413 +	if (enable_shub_wars_1_1()) {
   9.414 +		wait_piowc();
   9.415 +		spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
   9.416 +	}
   9.417 +
   9.418 +}
   9.419 +
   9.420 +EXPORT_SYMBOL(sn_send_IPI_phys);
   9.421 +
   9.422 +/**
   9.423 + * sn2_send_IPI - send an IPI to a processor
   9.424 + * @cpuid: target of the IPI
   9.425 + * @vector: command to send
   9.426 + * @delivery_mode: delivery mechanism
   9.427 + * @redirect: redirect the IPI?
   9.428 + *
   9.429 + * Sends an IPI (InterProcessor Interrupt) to the processor specified by
   9.430 + * @cpuid.  @vector specifies the command to send, while @delivery_mode can 
   9.431 + * be one of the following
   9.432 + *
   9.433 + * %IA64_IPI_DM_INT - pend an interrupt
   9.434 + * %IA64_IPI_DM_PMI - pend a PMI
   9.435 + * %IA64_IPI_DM_NMI - pend an NMI
   9.436 + * %IA64_IPI_DM_INIT - pend an INIT interrupt
   9.437 + */
   9.438 +void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
   9.439 +{
   9.440 +	long physid;
   9.441 +	int nasid;
   9.442 +
   9.443 +	physid = cpu_physical_id(cpuid);
   9.444 +#ifdef XEN
   9.445 +	if (!sn_nodepda) {
   9.446 +		ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
   9.447 +	} else
   9.448 +#endif
   9.449 +	nasid = cpuid_to_nasid(cpuid);
   9.450 +
   9.451 +	/* the following is used only when starting cpus at boot time */
   9.452 +	if (unlikely(nasid == -1))
   9.453 +		ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
   9.454 +
   9.455 + 	sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
   9.456 +}
   9.457 +
   9.458 +#ifdef CONFIG_PROC_FS
   9.459 +
   9.460 +#define PTC_BASENAME	"sgi_sn/ptc_statistics"
   9.461 +
   9.462 +static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
   9.463 +{
   9.464 +	if (*offset < NR_CPUS)
   9.465 +		return offset;
   9.466 +	return NULL;
   9.467 +}
   9.468 +
   9.469 +static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
   9.470 +{
   9.471 +	(*offset)++;
   9.472 +	if (*offset < NR_CPUS)
   9.473 +		return offset;
   9.474 +	return NULL;
   9.475 +}
   9.476 +
   9.477 +static void sn2_ptc_seq_stop(struct seq_file *file, void *data)
   9.478 +{
   9.479 +}
   9.480 +
   9.481 +static int sn2_ptc_seq_show(struct seq_file *file, void *data)
   9.482 +{
   9.483 +	struct ptc_stats *stat;
   9.484 +	int cpu;
   9.485 +
   9.486 +	cpu = *(loff_t *) data;
   9.487 +
   9.488 +	if (!cpu) {
   9.489 +		seq_printf(file,
   9.490 +			   "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n");
   9.491 +		seq_printf(file, "# ptctest %d\n", sn2_ptctest);
   9.492 +	}
   9.493 +
   9.494 +	if (cpu < NR_CPUS && cpu_online(cpu)) {
   9.495 +		stat = &per_cpu(ptcstats, cpu);
   9.496 +		seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
   9.497 +				stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
   9.498 +				stat->deadlocks,
   9.499 +				1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
   9.500 +				1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
   9.501 +				1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
   9.502 +				stat->shub_ptc_flushes_not_my_mm,
   9.503 +				stat->deadlocks2);
   9.504 +	}
   9.505 +	return 0;
   9.506 +}
   9.507 +
   9.508 +static struct seq_operations sn2_ptc_seq_ops = {
   9.509 +	.start = sn2_ptc_seq_start,
   9.510 +	.next = sn2_ptc_seq_next,
   9.511 +	.stop = sn2_ptc_seq_stop,
   9.512 +	.show = sn2_ptc_seq_show
   9.513 +};
   9.514 +
   9.515 +static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
   9.516 +{
   9.517 +	return seq_open(file, &sn2_ptc_seq_ops);
   9.518 +}
   9.519 +
   9.520 +static struct file_operations proc_sn2_ptc_operations = {
   9.521 +	.open = sn2_ptc_proc_open,
   9.522 +	.read = seq_read,
   9.523 +	.llseek = seq_lseek,
   9.524 +	.release = seq_release,
   9.525 +};
   9.526 +
   9.527 +static struct proc_dir_entry *proc_sn2_ptc;
   9.528 +
   9.529 +static int __init sn2_ptc_init(void)
   9.530 +{
   9.531 +	if (!ia64_platform_is("sn2"))
   9.532 +		return 0;
   9.533 +
   9.534 +	if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) {
   9.535 +		printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
   9.536 +		return -EINVAL;
   9.537 +	}
   9.538 +	proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations;
   9.539 +	spin_lock_init(&sn2_global_ptc_lock);
   9.540 +	return 0;
   9.541 +}
   9.542 +
   9.543 +static void __exit sn2_ptc_exit(void)
   9.544 +{
   9.545 +	remove_proc_entry(PTC_BASENAME, NULL);
   9.546 +}
   9.547 +
   9.548 +module_init(sn2_ptc_init);
   9.549 +module_exit(sn2_ptc_exit);
   9.550 +#endif /* CONFIG_PROC_FS */
   9.551 +
    10.1 --- a/xen/arch/ia64/linux/Makefile	Wed Dec 20 08:53:42 2006 -0700
    10.2 +++ b/xen/arch/ia64/linux/Makefile	Wed Dec 20 14:55:02 2006 -0700
    10.3 @@ -1,3 +1,7 @@
    10.4 +subdir-y += dig
    10.5 +subdir-y += hp
    10.6 +subdir-y += sn
    10.7 +
    10.8  obj-y += bitop.o
    10.9  obj-y += clear_page.o
   10.10  obj-y += cmdline.o
   10.11 @@ -23,6 +27,7 @@ obj-y += __udivdi3.o
   10.12  obj-y += __moddi3.o
   10.13  obj-y += __umoddi3.o
   10.14  obj-y += carta_random.o
   10.15 +obj-y += io.o
   10.16  
   10.17  ## variants of divide/modulo
   10.18  ## see files in xen/arch/ia64/linux/lib (linux/arch/ia64/lib)
    11.1 --- a/xen/arch/ia64/linux/README.origin	Wed Dec 20 08:53:42 2006 -0700
    11.2 +++ b/xen/arch/ia64/linux/README.origin	Wed Dec 20 14:55:02 2006 -0700
    11.3 @@ -27,3 +27,6 @@ strlen.S		-> linux/arch/ia64/lib/strlen.
    11.4  
    11.5  # The files below are from Linux-2.6.16.33
    11.6  carta_random.S		-> linux/arch/ia64/lib/carta_random.S
    11.7 +
    11.8 +# The files below are from Linux-2.6.19
    11.9 +io.c			-> linux/arch/ia64/lib/io.c
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/xen/arch/ia64/linux/dig/Makefile	Wed Dec 20 14:55:02 2006 -0700
    12.3 @@ -0,0 +1,1 @@
    12.4 +obj-y += machvec.o
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/xen/arch/ia64/linux/dig/README.origin	Wed Dec 20 14:55:02 2006 -0700
    13.3 @@ -0,0 +1,7 @@
    13.4 +Source files in this directory are identical copies of linux-2.6.19 files:
    13.5 +
    13.6 +NOTE: DO NOT commit changes to these files!   If a file
    13.7 +needs to be changed, move it to ../linux-xen and follow
    13.8 +the instructions in the README there.
    13.9 +
   13.10 +machvec.c		-> linux/arch/ia64/dig/machvec.c
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/xen/arch/ia64/linux/dig/machvec.c	Wed Dec 20 14:55:02 2006 -0700
    14.3 @@ -0,0 +1,3 @@
    14.4 +#define MACHVEC_PLATFORM_NAME		dig
    14.5 +#define MACHVEC_PLATFORM_HEADER		<asm/machvec_dig.h>
    14.6 +#include <asm/machvec_init.h>
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/xen/arch/ia64/linux/hp/Makefile	Wed Dec 20 14:55:02 2006 -0700
    15.3 @@ -0,0 +1,1 @@
    15.4 +subdir-y += zx1
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/xen/arch/ia64/linux/hp/zx1/Makefile	Wed Dec 20 14:55:02 2006 -0700
    16.3 @@ -0,0 +1,1 @@
    16.4 +obj-y += hpzx1_machvec.o
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/xen/arch/ia64/linux/hp/zx1/README.origin	Wed Dec 20 14:55:02 2006 -0700
    17.3 @@ -0,0 +1,7 @@
    17.4 +Source files in this directory are identical copies of linux-2.6.19 files:
    17.5 +
    17.6 +NOTE: DO NOT commit changes to these files!   If a file
    17.7 +needs to be changed, move it to ../linux-xen and follow
    17.8 +the instructions in the README there.
    17.9 +
   17.10 +hpzx1_machvec.c		-> linux/arch/ia64/hp/zx1/hpzx1_machvec.c
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c	Wed Dec 20 14:55:02 2006 -0700
    18.3 @@ -0,0 +1,3 @@
    18.4 +#define MACHVEC_PLATFORM_NAME		hpzx1
    18.5 +#define MACHVEC_PLATFORM_HEADER		<asm/machvec_hpzx1.h>
    18.6 +#include <asm/machvec_init.h>
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/xen/arch/ia64/linux/io.c	Wed Dec 20 14:55:02 2006 -0700
    19.3 @@ -0,0 +1,164 @@
    19.4 +#include <linux/module.h>
    19.5 +#include <linux/types.h>
    19.6 +
    19.7 +#include <asm/io.h>
    19.8 +
    19.9 +/*
   19.10 + * Copy data from IO memory space to "real" memory space.
   19.11 + * This needs to be optimized.
   19.12 + */
   19.13 +void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
   19.14 +{
   19.15 +	char *dst = to;
   19.16 +
   19.17 +	while (count) {
   19.18 +		count--;
   19.19 +		*dst++ = readb(from++);
   19.20 +	}
   19.21 +}
   19.22 +EXPORT_SYMBOL(memcpy_fromio);
   19.23 +
   19.24 +/*
   19.25 + * Copy data from "real" memory space to IO memory space.
   19.26 + * This needs to be optimized.
   19.27 + */
   19.28 +void memcpy_toio(volatile void __iomem *to, const void *from, long count)
   19.29 +{
   19.30 +	const char *src = from;
   19.31 +
   19.32 +	while (count) {
   19.33 +		count--;
   19.34 +		writeb(*src++, to++);
   19.35 +	}
   19.36 +}
   19.37 +EXPORT_SYMBOL(memcpy_toio);
   19.38 +
   19.39 +/*
   19.40 + * "memset" on IO memory space.
   19.41 + * This needs to be optimized.
   19.42 + */
   19.43 +void memset_io(volatile void __iomem *dst, int c, long count)
   19.44 +{
   19.45 +	unsigned char ch = (char)(c & 0xff);
   19.46 +
   19.47 +	while (count) {
   19.48 +		count--;
   19.49 +		writeb(ch, dst);
   19.50 +		dst++;
   19.51 +	}
   19.52 +}
   19.53 +EXPORT_SYMBOL(memset_io);
   19.54 +
   19.55 +#ifdef CONFIG_IA64_GENERIC
   19.56 +
   19.57 +#undef __ia64_inb
   19.58 +#undef __ia64_inw
   19.59 +#undef __ia64_inl
   19.60 +#undef __ia64_outb
   19.61 +#undef __ia64_outw
   19.62 +#undef __ia64_outl
   19.63 +#undef __ia64_readb
   19.64 +#undef __ia64_readw
   19.65 +#undef __ia64_readl
   19.66 +#undef __ia64_readq
   19.67 +#undef __ia64_readb_relaxed
   19.68 +#undef __ia64_readw_relaxed
   19.69 +#undef __ia64_readl_relaxed
   19.70 +#undef __ia64_readq_relaxed
   19.71 +#undef __ia64_writeb
   19.72 +#undef __ia64_writew
   19.73 +#undef __ia64_writel
   19.74 +#undef __ia64_writeq
   19.75 +#undef __ia64_mmiowb
   19.76 +
   19.77 +unsigned int
   19.78 +__ia64_inb (unsigned long port)
   19.79 +{
   19.80 +	return ___ia64_inb(port);
   19.81 +}
   19.82 +
   19.83 +unsigned int
   19.84 +__ia64_inw (unsigned long port)
   19.85 +{
   19.86 +	return ___ia64_inw(port);
   19.87 +}
   19.88 +
   19.89 +unsigned int
   19.90 +__ia64_inl (unsigned long port)
   19.91 +{
   19.92 +	return ___ia64_inl(port);
   19.93 +}
   19.94 +
   19.95 +void
   19.96 +__ia64_outb (unsigned char val, unsigned long port)
   19.97 +{
   19.98 +	___ia64_outb(val, port);
   19.99 +}
  19.100 +
  19.101 +void
  19.102 +__ia64_outw (unsigned short val, unsigned long port)
  19.103 +{
  19.104 +	___ia64_outw(val, port);
  19.105 +}
  19.106 +
  19.107 +void
  19.108 +__ia64_outl (unsigned int val, unsigned long port)
  19.109 +{
  19.110 +	___ia64_outl(val, port);
  19.111 +}
  19.112 +
  19.113 +unsigned char
  19.114 +__ia64_readb (void __iomem *addr)
  19.115 +{
  19.116 +	return ___ia64_readb (addr);
  19.117 +}
  19.118 +
  19.119 +unsigned short
  19.120 +__ia64_readw (void __iomem *addr)
  19.121 +{
  19.122 +	return ___ia64_readw (addr);
  19.123 +}
  19.124 +
  19.125 +unsigned int
  19.126 +__ia64_readl (void __iomem *addr)
  19.127 +{
  19.128 +	return ___ia64_readl (addr);
  19.129 +}
  19.130 +
  19.131 +unsigned long
  19.132 +__ia64_readq (void __iomem *addr)
  19.133 +{
  19.134 +	return ___ia64_readq (addr);
  19.135 +}
  19.136 +
  19.137 +unsigned char
  19.138 +__ia64_readb_relaxed (void __iomem *addr)
  19.139 +{
  19.140 +	return ___ia64_readb (addr);
  19.141 +}
  19.142 +
  19.143 +unsigned short
  19.144 +__ia64_readw_relaxed (void __iomem *addr)
  19.145 +{
  19.146 +	return ___ia64_readw (addr);
  19.147 +}
  19.148 +
  19.149 +unsigned int
  19.150 +__ia64_readl_relaxed (void __iomem *addr)
  19.151 +{
  19.152 +	return ___ia64_readl (addr);
  19.153 +}
  19.154 +
  19.155 +unsigned long
  19.156 +__ia64_readq_relaxed (void __iomem *addr)
  19.157 +{
  19.158 +	return ___ia64_readq (addr);
  19.159 +}
  19.160 +
  19.161 +void
  19.162 +__ia64_mmiowb(void)
  19.163 +{
  19.164 +	___ia64_mmiowb();
  19.165 +}
  19.166 +
  19.167 +#endif /* CONFIG_IA64_GENERIC */
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/xen/arch/ia64/linux/sn/Makefile	Wed Dec 20 14:55:02 2006 -0700
    20.3 @@ -0,0 +1,2 @@
    20.4 +subdir-y += kernel
    20.5 +subdir-y += pci
    21.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.2 +++ b/xen/arch/ia64/linux/sn/kernel/Makefile	Wed Dec 20 14:55:02 2006 -0700
    21.3 @@ -0,0 +1,3 @@
    21.4 +obj-y += machvec.o
    21.5 +obj-y += pio_phys.o
    21.6 +obj-y += ptc_deadlock.o
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/xen/arch/ia64/linux/sn/kernel/README.origin	Wed Dec 20 14:55:02 2006 -0700
    22.3 @@ -0,0 +1,9 @@
    22.4 +Source files in this directory are identical copies of linux-2.6.19 files:
    22.5 +
    22.6 +NOTE: DO NOT commit changes to these files!   If a file
    22.7 +needs to be changed, move it to ../linux-xen and follow
    22.8 +the instructions in the README there.
    22.9 +
   22.10 +machvec.c		-> linux/arch/ia64/sn/kernel/machvec.c
   22.11 +pio_phys.S		-> linux/arch/ia64/sn/kernel/pio_phys.S
   22.12 +ptc_deadlock.S		-> linux/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/xen/arch/ia64/linux/sn/kernel/machvec.c	Wed Dec 20 14:55:02 2006 -0700
    23.3 @@ -0,0 +1,11 @@
    23.4 +/*
    23.5 + * This file is subject to the terms and conditions of the GNU General Public
    23.6 + * License.  See the file "COPYING" in the main directory of this archive
    23.7 + * for more details.
    23.8 + *
    23.9 + * Copyright (c) 2002-2003 Silicon Graphics, Inc.  All Rights Reserved.
   23.10 + */
   23.11 +
   23.12 +#define MACHVEC_PLATFORM_NAME	sn2
   23.13 +#define MACHVEC_PLATFORM_HEADER	<asm/machvec_sn2.h>
   23.14 +#include <asm/machvec_init.h>
    24.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.2 +++ b/xen/arch/ia64/linux/sn/kernel/pio_phys.S	Wed Dec 20 14:55:02 2006 -0700
    24.3 @@ -0,0 +1,71 @@
    24.4 +/*
    24.5 + * This file is subject to the terms and conditions of the GNU General Public
    24.6 + * License.  See the file "COPYING" in the main directory of this archive
    24.7 + * for more details.
    24.8 + *
    24.9 + * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
   24.10 + *
   24.11 + * This file contains macros used to access MMR registers via
   24.12 + * uncached physical addresses.
   24.13 + *      pio_phys_read_mmr  - read an MMR
   24.14 + *      pio_phys_write_mmr - write an MMR
   24.15 + *      pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
   24.16 + *              Second MMR will be skipped if address is NULL
   24.17 + *
   24.18 + * Addresses passed to these routines should be uncached physical addresses
   24.19 + * 	ie., 0x80000....
   24.20 + */
   24.21 +
   24.22 +
   24.23 +
   24.24 +#include <asm/asmmacro.h>
   24.25 +#include <asm/page.h>
   24.26 +
   24.27 +GLOBAL_ENTRY(pio_phys_read_mmr)
   24.28 +	.prologue
   24.29 +	.regstk 1,0,0,0
   24.30 +	.body
   24.31 +	mov r2=psr
   24.32 +	rsm psr.i | psr.dt
   24.33 +	;;
   24.34 +	srlz.d
   24.35 +	ld8.acq r8=[r32]
   24.36 +	;;
   24.37 +	mov psr.l=r2;;
   24.38 +	srlz.d
   24.39 +	br.ret.sptk.many rp
   24.40 +END(pio_phys_read_mmr)
   24.41 +
   24.42 +GLOBAL_ENTRY(pio_phys_write_mmr)
   24.43 +	.prologue
   24.44 +	.regstk 2,0,0,0
   24.45 +	.body
   24.46 +	mov r2=psr
   24.47 +	rsm psr.i | psr.dt
   24.48 +	;;
   24.49 +	srlz.d
   24.50 +	st8.rel [r32]=r33
   24.51 +	;;
   24.52 +	mov psr.l=r2;;
   24.53 +	srlz.d
   24.54 +	br.ret.sptk.many rp
   24.55 +END(pio_phys_write_mmr)
   24.56 +
   24.57 +GLOBAL_ENTRY(pio_atomic_phys_write_mmrs)
   24.58 +	.prologue
   24.59 +	.regstk 4,0,0,0
   24.60 +	.body
   24.61 +	mov r2=psr
   24.62 +	cmp.ne p9,p0=r34,r0;
   24.63 +	rsm psr.i | psr.dt | psr.ic
   24.64 +	;;
   24.65 +	srlz.d
   24.66 +	st8.rel [r32]=r33
   24.67 +(p9)	st8.rel [r34]=r35
   24.68 +	;;
   24.69 +	mov psr.l=r2;;
   24.70 +	srlz.d
   24.71 +	br.ret.sptk.many rp
   24.72 +END(pio_atomic_phys_write_mmrs)
   24.73 +
   24.74 +
    25.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.2 +++ b/xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S	Wed Dec 20 14:55:02 2006 -0700
    25.3 @@ -0,0 +1,92 @@
    25.4 +/* 
    25.5 + * This file is subject to the terms and conditions of the GNU General Public
    25.6 + * License.  See the file "COPYING" in the main directory of this archive
    25.7 + * for more details.
    25.8 + *
    25.9 + * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
   25.10 + */
   25.11 +
   25.12 +#include <asm/types.h>
   25.13 +#include <asm/sn/shub_mmr.h>
   25.14 +
   25.15 +#define DEADLOCKBIT	SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
   25.16 +#define WRITECOUNTMASK	SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
   25.17 +#define ALIAS_OFFSET	8
   25.18 +
   25.19 +
   25.20 +	.global	sn2_ptc_deadlock_recovery_core
   25.21 +	.proc  	sn2_ptc_deadlock_recovery_core
   25.22 +
   25.23 +sn2_ptc_deadlock_recovery_core:
   25.24 +	.regstk 6,0,0,0
   25.25 +
   25.26 +	ptc0  	 = in0
   25.27 +	data0 	 = in1
   25.28 +	ptc1  	 = in2
   25.29 +	data1 	 = in3
   25.30 +	piowc 	 = in4
   25.31 +	zeroval  = in5
   25.32 +	piowcphy = r30
   25.33 +	psrsave  = r2
   25.34 +	scr1	 = r16
   25.35 +	scr2	 = r17
   25.36 +	mask	 = r18
   25.37 +
   25.38 +
   25.39 +	extr.u	piowcphy=piowc,0,61;;	// Convert piowc to uncached physical address
   25.40 +	dep	piowcphy=-1,piowcphy,63,1
   25.41 +	movl	mask=WRITECOUNTMASK
   25.42 +	mov	r8=r0
   25.43 +
   25.44 +1:
   25.45 +	cmp.ne  p8,p9=r0,ptc1		// Test for shub type (ptc1 non-null on shub1)
   25.46 +					// p8 = 1 if shub1, p9 = 1 if shub2
   25.47 +
   25.48 +	add	scr2=ALIAS_OFFSET,piowc	// Address of WRITE_STATUS alias register 
   25.49 +	mov	scr1=7;;		// Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR
   25.50 +(p8)	st8.rel	[scr2]=scr1;;
   25.51 +(p9)	ld8.acq	scr1=[scr2];;
   25.52 +
   25.53 +5:	ld8.acq	scr1=[piowc];;		// Wait for PIOs to complete.
   25.54 +	hint	@pause
   25.55 +	and	scr2=scr1,mask;;	// mask of writecount bits
   25.56 +	cmp.ne	p6,p0=zeroval,scr2
   25.57 +(p6)	br.cond.sptk 5b
   25.58 +	
   25.59 +
   25.60 +
   25.61 +	////////////// BEGIN PHYSICAL MODE ////////////////////
   25.62 +	mov psrsave=psr			// Disable IC (no PMIs)
   25.63 +	rsm psr.i | psr.dt | psr.ic;;
   25.64 +	srlz.i;;
   25.65 +
   25.66 +	st8.rel [ptc0]=data0		// Write PTC0 & wait for completion.
   25.67 +
   25.68 +5:	ld8.acq	scr1=[piowcphy];;	// Wait for PIOs to complete.
   25.69 +	hint	@pause
   25.70 +	and	scr2=scr1,mask;;	// mask of writecount bits
   25.71 +	cmp.ne	p6,p0=zeroval,scr2
   25.72 +(p6)	br.cond.sptk 5b;;
   25.73 +
   25.74 +	tbit.nz	p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
   25.75 +(p7)	cmp.ne p7,p0=r0,ptc1;;		// Test for non-null ptc1
   25.76 +	
   25.77 +(p7)	st8.rel [ptc1]=data1;;		// Now write PTC1.
   25.78 +
   25.79 +5:	ld8.acq	scr1=[piowcphy];;	// Wait for PIOs to complete.
   25.80 +	hint	@pause
   25.81 +	and	scr2=scr1,mask;;	// mask of writecount bits
   25.82 +	cmp.ne	p6,p0=zeroval,scr2
   25.83 +(p6)	br.cond.sptk 5b
   25.84 +	
   25.85 +	tbit.nz	p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK
   25.86 +
   25.87 +	mov psr.l=psrsave;;		// Reenable IC
   25.88 +	srlz.i;;
   25.89 +	////////////// END   PHYSICAL MODE ////////////////////
   25.90 +
   25.91 +(p8)	add	r8=1,r8
   25.92 +(p8)	br.cond.spnt 1b;;		// Repeat if DEADLOCK occurred.
   25.93 +
   25.94 +	br.ret.sptk	rp
   25.95 +	.endp sn2_ptc_deadlock_recovery_core
    26.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.2 +++ b/xen/arch/ia64/linux/sn/pci/Makefile	Wed Dec 20 14:55:02 2006 -0700
    26.3 @@ -0,0 +1,1 @@
    26.4 +subdir-y += pcibr
    27.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.2 +++ b/xen/arch/ia64/linux/sn/pci/pcibr/Makefile	Wed Dec 20 14:55:02 2006 -0700
    27.3 @@ -0,0 +1,1 @@
    27.4 +obj-y += pcibr_reg.o
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/xen/arch/ia64/linux/sn/pci/pcibr/README.origin	Wed Dec 20 14:55:02 2006 -0700
    28.3 @@ -0,0 +1,7 @@
    28.4 +Source files in this directory are identical copies of linux-2.6.19 files:
    28.5 +
    28.6 +NOTE: DO NOT commit changes to these files!   If a file
    28.7 +needs to be changed, move it to ../linux-xen and follow
    28.8 +the instructions in the README there.
    28.9 +
   28.10 +pcibr_reg.c		-> linux/arch/ia64/sn/pci/pcibr/pcibr_reg.c
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c	Wed Dec 20 14:55:02 2006 -0700
    29.3 @@ -0,0 +1,285 @@
    29.4 +/*
    29.5 + * This file is subject to the terms and conditions of the GNU General Public
    29.6 + * License.  See the file "COPYING" in the main directory of this archive
    29.7 + * for more details.
    29.8 + *
    29.9 + * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
   29.10 + */
   29.11 +
   29.12 +#include <linux/interrupt.h>
   29.13 +#include <linux/types.h>
   29.14 +#include <asm/sn/io.h>
   29.15 +#include <asm/sn/pcibr_provider.h>
   29.16 +#include <asm/sn/pcibus_provider_defs.h>
   29.17 +#include <asm/sn/pcidev.h>
   29.18 +#include <asm/sn/pic.h>
   29.19 +#include <asm/sn/tiocp.h>
   29.20 +
   29.21 +union br_ptr {
   29.22 +	struct tiocp tio;
   29.23 +	struct pic pic;
   29.24 +};
   29.25 +
   29.26 +/*
   29.27 + * Control Register Access -- Read/Write                            0000_0020
   29.28 + */
   29.29 +void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
   29.30 +{
   29.31 +	union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
   29.32 +
   29.33 +	if (pcibus_info) {
   29.34 +		switch (pcibus_info->pbi_bridge_type) {
   29.35 +		case PCIBR_BRIDGETYPE_TIOCP:
   29.36 +			__sn_clrq_relaxed(&ptr->tio.cp_control, bits);
   29.37 +			break;
   29.38 +		case PCIBR_BRIDGETYPE_PIC:
   29.39 +			__sn_clrq_relaxed(&ptr->pic.p_wid_control, bits);
   29.40 +			break;
   29.41 +		default:
   29.42 +			panic
   29.43 +			    ("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
   29.44 +			     ptr);
   29.45 +		}
   29.46 +	}
   29.47 +}
   29.48 +
   29.49 +void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits)
   29.50 +{
   29.51 +	union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
   29.52 +
   29.53 +	if (pcibus_info) {
   29.54 +		switch (pcibus_info->pbi_bridge_type) {
   29.55 +		case PCIBR_BRIDGETYPE_TIOCP:
   29.56 +			__sn_setq_relaxed(&ptr->tio.cp_control, bits);
   29.57 +			break;
   29.58 +		case PCIBR_BRIDGETYPE_PIC:
   29.59 +			__sn_setq_relaxed(&ptr->pic.p_wid_control, bits);
   29.60 +			break;
   29.61 +		default:
   29.62 +			panic
   29.63 +			    ("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
   29.64 +			     ptr);
   29.65 +		}
   29.66 +	}
   29.67 +}
   29.68 +
   29.69 +/*
   29.70 + * PCI/PCIX Target Flush Register Access -- Read Only		    0000_0050
   29.71 + */
   29.72 +u64 pcireg_tflush_get(struct pcibus_info *pcibus_info)
   29.73 +{
   29.74 +	union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
   29.75 +	u64 ret = 0;
   29.76 +
   29.77 +	if (pcibus_info) {
   29.78 +		switch (pcibus_info->pbi_bridge_type) {
   29.79 +		case PCIBR_BRIDGETYPE_TIOCP:
   29.80 +			ret = __sn_readq_relaxed(&ptr->tio.cp_tflush);
   29.81 +			break;
   29.82 +		case PCIBR_BRIDGETYPE_PIC:
   29.83 +			ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush);
   29.84 +			break;
   29.85 +		default:
   29.86 +			panic
   29.87 +			    ("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
   29.88 +			     ptr);
   29.89 +		}
   29.90 +	}
   29.91 +
   29.92 +	/* Read of the Target Flush should always return zero */
   29.93 +	if (ret != 0)
   29.94 +		panic("pcireg_tflush_get:Target Flush failed\n");
   29.95 +
   29.96 +	return ret;
   29.97 +}
   29.98 +
   29.99 +/*
  29.100 + * Interrupt Status Register Access -- Read Only		    0000_0100
  29.101 + */
  29.102 +u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info)
  29.103 +{
  29.104 +	union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
  29.105 +	u64 ret = 0;
  29.106 +
  29.107 +	if (pcibus_info) {
  29.108 +		switch (pcibus_info->pbi_bridge_type) {
  29.109 +		case PCIBR_BRIDGETYPE_TIOCP:
  29.110 +			ret = __sn_readq_relaxed(&ptr->tio.cp_int_status);
  29.111 +			break;
  29.112 +		case PCIBR_BRIDGETYPE_PIC:
  29.113 +			ret = __sn_readq_relaxed(&ptr->pic.p_int_status);
  29.114 +			break;
  29.115 +		default:
  29.116 +			panic
  29.117 +			    ("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
  29.118 +			     ptr);
  29.119 +		}
  29.120 +	}
  29.121 +	return ret;
  29.122 +}
  29.123 +
  29.124 +/*
  29.125 + * Interrupt Enable Register Access -- Read/Write                   0000_0108
  29.126 + */
  29.127 +void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
  29.128 +{
  29.129 +	union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
  29.130 +
  29.131 +	if (pcibus_info) {
  29.132 +		switch (pcibus_info->pbi_bridge_type) {
  29.133 +		case PCIBR_BRIDGETYPE_TIOCP:
  29.134 +			__sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits);
  29.135 +			break;
  29.136 +		case PCIBR_BRIDGETYPE_PIC:
  29.137 +			__sn_clrq_relaxed(&ptr->pic.p_int_enable, bits);
  29.138 +			break;
  29.139 +		default:
  29.140 +			panic
  29.141 +			    ("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
  29.142 +			     ptr);
  29.143 +		}
  29.144 +	}
  29.145 +}
  29.146 +
  29.147 +void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits)
  29.148 +{
  29.149 +	union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
  29.150 +
  29.151 +	if (pcibus_info) {
  29.152 +		switch (pcibus_info->pbi_bridge_type) {
  29.153 +		case PCIBR_BRIDGETYPE_TIOCP:
  29.154 +			__sn_setq_relaxed(&ptr->tio.cp_int_enable, bits);
  29.155 +			break;
  29.156 +		case PCIBR_BRIDGETYPE_PIC:
  29.157 +			__sn_setq_relaxed(&ptr->pic.p_int_enable, bits);
  29.158 +			break;
  29.159 +		default:
  29.160 +			panic
  29.161 +			    ("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
  29.162 +			     ptr);
  29.163 +		}
  29.164 +	}
  29.165 +}
  29.166 +
  29.167 +/*
  29.168 + * Intr Host Address Register (int_addr) -- Read/Write  0000_0130 - 0000_0168
  29.169 + */
  29.170 +void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
  29.171 +			       u64 addr)
  29.172 +{
  29.173 +	union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
  29.174 +
  29.175 +	if (pcibus_info) {
  29.176 +		switch (pcibus_info->pbi_bridge_type) {
  29.177 +		case PCIBR_BRIDGETYPE_TIOCP:
  29.178 +			__sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n],
  29.179 +			    TIOCP_HOST_INTR_ADDR);
  29.180 +			__sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n],
  29.181 +			    (addr & TIOCP_HOST_INTR_ADDR));
  29.182 +			break;
  29.183 +		case PCIBR_BRIDGETYPE_PIC:
  29.184 +			__sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n],
  29.185 +			    PIC_HOST_INTR_ADDR);
  29.186 +			__sn_setq_relaxed(&ptr->pic.p_int_addr[int_n],
  29.187 +			    (addr & PIC_HOST_INTR_ADDR));
  29.188 +			break;
  29.189 +		default:
  29.190 +			panic
  29.191 +			    ("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
  29.192 +			     ptr);
  29.193 +		}
  29.194 +	}
  29.195 +}
  29.196 +
  29.197 +/*
  29.198 + * Force Interrupt Register Access -- Write Only	0000_01C0 - 0000_01F8
  29.199 + */
  29.200 +void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
  29.201 +{
  29.202 +	union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
  29.203 +
  29.204 +	if (pcibus_info) {
  29.205 +		switch (pcibus_info->pbi_bridge_type) {
  29.206 +		case PCIBR_BRIDGETYPE_TIOCP:
  29.207 +			writeq(1, &ptr->tio.cp_force_pin[int_n]);
  29.208 +			break;
  29.209 +		case PCIBR_BRIDGETYPE_PIC:
  29.210 +			writeq(1, &ptr->pic.p_force_pin[int_n]);
  29.211 +			break;
  29.212 +		default:
  29.213 +			panic
  29.214 +			    ("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
  29.215 +			     ptr);
  29.216 +		}
  29.217 +	}
  29.218 +}
  29.219 +
  29.220 +/*
  29.221 + * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
  29.222 + */
  29.223 +u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
  29.224 +{
  29.225 +	union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
  29.226 +	u64 ret = 0;
  29.227 +
  29.228 +	if (pcibus_info) {
  29.229 +		switch (pcibus_info->pbi_bridge_type) {
  29.230 +		case PCIBR_BRIDGETYPE_TIOCP:
  29.231 +			ret =
  29.232 +			    __sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]);
  29.233 +			break;
  29.234 +		case PCIBR_BRIDGETYPE_PIC:
  29.235 +			ret =
  29.236 +			    __sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]);
  29.237 +			break;
  29.238 +		default:
  29.239 +		      panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", ptr);
  29.240 +		}
  29.241 +
  29.242 +	}
  29.243 +	/* Read of the Write Buffer Flush should always return zero */
  29.244 +	return ret;
  29.245 +}
  29.246 +
  29.247 +void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
  29.248 +			u64 val)
  29.249 +{
  29.250 +	union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
  29.251 +
  29.252 +	if (pcibus_info) {
  29.253 +		switch (pcibus_info->pbi_bridge_type) {
  29.254 +		case PCIBR_BRIDGETYPE_TIOCP:
  29.255 +			writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]);
  29.256 +			break;
  29.257 +		case PCIBR_BRIDGETYPE_PIC:
  29.258 +			writeq(val, &ptr->pic.p_int_ate_ram[ate_index]);
  29.259 +			break;
  29.260 +		default:
  29.261 +			panic
  29.262 +			    ("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
  29.263 +			     ptr);
  29.264 +		}
  29.265 +	}
  29.266 +}
  29.267 +
  29.268 +u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
  29.269 +{
  29.270 +	union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
  29.271 +	u64 __iomem *ret = NULL;
  29.272 +
  29.273 +	if (pcibus_info) {
  29.274 +		switch (pcibus_info->pbi_bridge_type) {
  29.275 +		case PCIBR_BRIDGETYPE_TIOCP:
  29.276 +			ret = &ptr->tio.cp_int_ate_ram[ate_index];
  29.277 +			break;
  29.278 +		case PCIBR_BRIDGETYPE_PIC:
  29.279 +			ret = &ptr->pic.p_int_ate_ram[ate_index];
  29.280 +			break;
  29.281 +		default:
  29.282 +			panic
  29.283 +			    ("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
  29.284 +			     ptr);
  29.285 +		}
  29.286 +	}
  29.287 +	return ret;
  29.288 +}
    30.1 --- a/xen/arch/ia64/xen/irq.c	Wed Dec 20 08:53:42 2006 -0700
    30.2 +++ b/xen/arch/ia64/xen/irq.c	Wed Dec 20 14:55:02 2006 -0700
    30.3 @@ -48,6 +48,13 @@
    30.4  #include <xen/event.h>
    30.5  #define apicid_to_phys_cpu_present(x)	1
    30.6  
    30.7 +#ifdef CONFIG_IA64_GENERIC
    30.8 +unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
    30.9 +{
   30.10 +	return (unsigned int) vec;
   30.11 +}
   30.12 +#endif
   30.13 +
   30.14  /*
   30.15   * Linux has a controller-independent x86 interrupt architecture.
   30.16   * every controller has a 'controller-template', that is used
    31.1 --- a/xen/arch/ia64/xen/vhpt.c	Wed Dec 20 08:53:42 2006 -0700
    31.2 +++ b/xen/arch/ia64/xen/vhpt.c	Wed Dec 20 14:55:02 2006 -0700
    31.3 @@ -21,9 +21,6 @@
    31.4  #include <asm/vcpumask.h>
    31.5  #include <asm/vmmu.h>
    31.6  
    31.7 -/* Defined in tlb.c  */
    31.8 -extern void ia64_global_tlb_purge(u64 start, u64 end, u64 nbits);
    31.9 -
   31.10  extern long running_on_sim;
   31.11  
   31.12  DEFINE_PER_CPU (unsigned long, vhpt_paddr);
   31.13 @@ -364,7 +361,7 @@ void domain_flush_vtlb_range (struct dom
   31.14  	// ptc.ga has release semantics.
   31.15  
   31.16  	/* ptc.ga  */
   31.17 -	ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
   31.18 +	platform_global_tlb_purge(vadr, vadr + addr_range, PAGE_SHIFT);
   31.19  	perfc_incrc(domain_flush_vtlb_range);
   31.20  }
   31.21  
   31.22 @@ -442,7 +439,8 @@ void
   31.23  		perfc_incrc(domain_flush_vtlb_local);
   31.24  	} else {
   31.25  		/* ptc.ga has release semantics. */
   31.26 -		ia64_global_tlb_purge(vaddr, vaddr + PAGE_SIZE, PAGE_SHIFT);
   31.27 +		platform_global_tlb_purge(vaddr, vaddr + PAGE_SIZE,
   31.28 +		                          PAGE_SHIFT);
   31.29  		perfc_incrc(domain_flush_vtlb_global);
   31.30  	}
   31.31  
    32.1 --- a/xen/include/asm-ia64/config.h	Wed Dec 20 08:53:42 2006 -0700
    32.2 +++ b/xen/include/asm-ia64/config.h	Wed Dec 20 14:55:02 2006 -0700
    32.3 @@ -8,8 +8,9 @@
    32.4  // manufactured from component pieces
    32.5  
    32.6  // defined in linux/arch/ia64/defconfig
    32.7 -//#define	CONFIG_IA64_GENERIC
    32.8 -#define	CONFIG_IA64_HP_SIM
    32.9 +#define	CONFIG_IA64_GENERIC
   32.10 +#define CONFIG_HZ	32
   32.11 +
   32.12  #define	CONFIG_IA64_L1_CACHE_SHIFT 7
   32.13  // needed by include/asm-ia64/page.h
   32.14  #define	CONFIG_IA64_PAGE_SIZE_16KB	// 4KB doesn't work?!?
   32.15 @@ -145,14 +146,6 @@ extern int smp_num_siblings;
   32.16  // avoid redefining task_struct in asm/current.h
   32.17  #define task_struct vcpu
   32.18  
   32.19 -// linux/include/asm-ia64/machvec.h (linux/arch/ia64/lib/io.c)
   32.20 -#define platform_inb	__ia64_inb
   32.21 -#define platform_inw	__ia64_inw
   32.22 -#define platform_inl	__ia64_inl
   32.23 -#define platform_outb	__ia64_outb
   32.24 -#define platform_outw	__ia64_outw
   32.25 -#define platform_outl	__ia64_outl
   32.26 -
   32.27  #include <xen/cache.h>
   32.28  #ifndef CONFIG_SMP
   32.29  #define __cacheline_aligned_in_smp
   32.30 @@ -206,6 +199,16 @@ void sort_main_extable(void);
   32.31  // Deprivated linux inf and put here for short time compatibility
   32.32  #define kmalloc(s, t) xmalloc_bytes((s))
   32.33  #define kfree(s) xfree((s))
   32.34 +#define kzalloc(size, flags) 				\
   32.35 +({							\
   32.36 +	unsigned char *mem;				\
   32.37 +	mem = (unsigned char *)xmalloc_bytes(size);	\
   32.38 +	if (mem)					\
   32.39 +		memset(mem, 0, size);			\
   32.40 +	(void *)mem;					\
   32.41 +})
   32.42 +#define kcalloc(n, size, flags)		kzalloc(n * size, flags)
   32.43 +#define alloc_bootmem_node(pg, size)	xmalloc_bytes(size)
   32.44  
   32.45  // see common/keyhandler.c
   32.46  #define	nop()	asm volatile ("nop 0")
    33.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.2 +++ b/xen/include/asm-ia64/linux-null/linux/dmapool.h	Wed Dec 20 14:55:02 2006 -0700
    33.3 @@ -0,0 +1,1 @@
    33.4 +/* This file is intentionally left empty. */
    34.1 --- a/xen/include/asm-ia64/linux-null/linux/ioport.h	Wed Dec 20 08:53:42 2006 -0700
    34.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.3 @@ -1,1 +0,0 @@
    34.4 -/* This file is intentionally left empty. */
    35.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.2 +++ b/xen/include/asm-ia64/linux-null/linux/rwsem.h	Wed Dec 20 14:55:02 2006 -0700
    35.3 @@ -0,0 +1,1 @@
    35.4 +/* This file is intentionally left empty. */
    36.1 --- a/xen/include/asm-ia64/linux-xen/asm/README.origin	Wed Dec 20 08:53:42 2006 -0700
    36.2 +++ b/xen/include/asm-ia64/linux-xen/asm/README.origin	Wed Dec 20 14:55:02 2006 -0700
    36.3 @@ -34,3 +34,10 @@ iosapic.h		-> linux/include/asm-ia64/ios
    36.4  # The files below are from Linux-2.6.16.33
    36.5  perfmon.h		-> linux/include/asm-ia64/perfmon.h
    36.6  perfmon_default_smpl.h	-> linux/include/asm-ia64/perfmon_default_smpl.h
    36.7 +
    36.8 +# The files below are from Linux-2.6.19
    36.9 +machvec.h		-> linux/include/asm-ia64/machvec.h
   36.10 +machvec_dig.h		-> linux/include/asm-ia64/machvec_dig.h
   36.11 +machvec_sn2.h		-> linux/include/asm-ia64/machvec_sn2.h
   36.12 +machvec_hpzx1.h		-> linux/include/asm-ia64/machvec_hpzx1.h
   36.13 +machvec_pci.h		-> linux/include/asm-ia64/pci.h
    37.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    37.2 +++ b/xen/include/asm-ia64/linux-xen/asm/machvec.h	Wed Dec 20 14:55:02 2006 -0700
    37.3 @@ -0,0 +1,498 @@
    37.4 +/*
    37.5 + * Machine vector for IA-64.
    37.6 + *
    37.7 + * Copyright (C) 1999 Silicon Graphics, Inc.
    37.8 + * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
    37.9 + * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
   37.10 + * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
   37.11 + *	David Mosberger-Tang <davidm@hpl.hp.com>
   37.12 + */
   37.13 +#ifndef _ASM_IA64_MACHVEC_H
   37.14 +#define _ASM_IA64_MACHVEC_H
   37.15 +
   37.16 +#include <linux/config.h>
   37.17 +#include <linux/types.h>
   37.18 +
   37.19 +/* forward declarations: */
   37.20 +struct device;
   37.21 +struct pt_regs;
   37.22 +struct scatterlist;
   37.23 +struct page;
   37.24 +struct mm_struct;
   37.25 +struct pci_bus;
   37.26 +
   37.27 +typedef void ia64_mv_setup_t (char **);
   37.28 +typedef void ia64_mv_cpu_init_t (void);
   37.29 +typedef void ia64_mv_irq_init_t (void);
   37.30 +typedef void ia64_mv_send_ipi_t (int, int, int, int);
   37.31 +typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
   37.32 +typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long);
   37.33 +typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
   37.34 +typedef unsigned int ia64_mv_local_vector_to_irq (u8);
   37.35 +typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
   37.36 +typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
   37.37 +				       u8 size);
   37.38 +typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
   37.39 +					u8 size);
   37.40 +
   37.41 +/* DMA-mapping interface: */
   37.42 +typedef void ia64_mv_dma_init (void);
   37.43 +typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int);
   37.44 +typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
   37.45 +typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
   37.46 +typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
   37.47 +typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
   37.48 +typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
   37.49 +typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
   37.50 +typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
   37.51 +typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
   37.52 +typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
   37.53 +typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
   37.54 +typedef int ia64_mv_dma_supported (struct device *, u64);
   37.55 +
   37.56 +/*
   37.57 + * WARNING: The legacy I/O space is _architected_.  Platforms are
   37.58 + * expected to follow this architected model (see Section 10.7 in the
   37.59 + * IA-64 Architecture Software Developer's Manual).  Unfortunately,
   37.60 + * some broken machines do not follow that model, which is why we have
   37.61 + * to make the inX/outX operations part of the machine vector.
   37.62 + * Platform designers should follow the architected model whenever
   37.63 + * possible.
   37.64 + */
   37.65 +typedef unsigned int ia64_mv_inb_t (unsigned long);
   37.66 +typedef unsigned int ia64_mv_inw_t (unsigned long);
   37.67 +typedef unsigned int ia64_mv_inl_t (unsigned long);
   37.68 +typedef void ia64_mv_outb_t (unsigned char, unsigned long);
   37.69 +typedef void ia64_mv_outw_t (unsigned short, unsigned long);
   37.70 +typedef void ia64_mv_outl_t (unsigned int, unsigned long);
   37.71 +typedef void ia64_mv_mmiowb_t (void);
   37.72 +typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
   37.73 +typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
   37.74 +typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
   37.75 +typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
   37.76 +typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
   37.77 +typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
   37.78 +typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
   37.79 +typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
   37.80 +
   37.81 +static inline void
   37.82 +machvec_noop (void)
   37.83 +{
   37.84 +}
   37.85 +
   37.86 +static inline void
   37.87 +machvec_noop_mm (struct mm_struct *mm)
   37.88 +{
   37.89 +}
   37.90 +
   37.91 +#ifdef XEN
   37.92 +#include <xen/lib.h>
   37.93 +/*
   37.94 + * These should never get called, they just fill out the machine
   37.95 + * vectors and make the compiler happy.
   37.96 + */
   37.97 +static inline void*
   37.98 +machvec_noop_dma_alloc_coherent (struct device *dev, size_t size,
   37.99 +                                 dma_addr_t *addr, int dir)
  37.100 +{
  37.101 +	panic("%s() called", __FUNCTION__);
  37.102 +	return (void *)0;
  37.103 +}
  37.104 +
  37.105 +static inline void
  37.106 +machvec_noop_dma_free_coherent (struct device *dev, size_t size,
  37.107 +                                void *vaddr, dma_addr_t handle)
  37.108 +{
  37.109 +	panic("%s() called", __FUNCTION__);
  37.110 +}
  37.111 +
  37.112 +static inline dma_addr_t
  37.113 +machvec_noop_dma_map_single (struct device *dev, void *addr,
  37.114 +                             size_t size, int dir)
  37.115 +{
  37.116 +	panic("%s() called", __FUNCTION__);
  37.117 +	return (dma_addr_t)0;
  37.118 +}
  37.119 +
  37.120 +static inline void
  37.121 +machvec_noop_dma_unmap_single (struct device *dev, dma_addr_t vaddr,
  37.122 +                               size_t size, int dir)
  37.123 +{
  37.124 +	panic("%s() called", __FUNCTION__);
  37.125 +}
  37.126 +
  37.127 +static inline int
  37.128 +machvec_noop_dma_map_sg (struct device *dev, struct scatterlist *sglist,
  37.129 +                         int nents, int dir)
  37.130 +{
  37.131 +	panic("%s() called", __FUNCTION__);
  37.132 +	return 0;
  37.133 +}
  37.134 +
  37.135 +static inline void
  37.136 +machvec_noop_dma_unmap_sg (struct device *dev, struct scatterlist *sglist,
  37.137 +                           int nents, int dir)
  37.138 +{
  37.139 +	panic("%s() called", __FUNCTION__);
  37.140 +}
  37.141 +
  37.142 +static inline void
  37.143 +machvec_noop_dma_sync_single_for_cpu (struct device *dev, dma_addr_t vaddr,
  37.144 +                                      size_t size, int dir)
  37.145 +{
  37.146 +	panic("%s() called", __FUNCTION__);
  37.147 +}
  37.148 +
  37.149 +#define machvec_noop_dma_sync_single_for_device		\
  37.150 +	machvec_noop_dma_sync_single_for_cpu
  37.151 +
  37.152 +static inline void
  37.153 +machvec_noop_dma_sync_sg_for_cpu (struct device *dev,
  37.154 +                                  struct scatterlist *sglist,
  37.155 +                                  int nents, int dir)
  37.156 +{
  37.157 +	panic("%s() called", __FUNCTION__);
  37.158 +}
  37.159 +
  37.160 +#define machvec_noop_dma_sync_sg_for_device		\
  37.161 +	machvec_noop_dma_sync_sg_for_cpu
  37.162 +
  37.163 +static inline int
  37.164 +machvec_noop_dma_mapping_error (dma_addr_t dma_addr)
  37.165 +{
  37.166 +	panic("%s() called", __FUNCTION__);
  37.167 +	return 1;
  37.168 +}
  37.169 +
  37.170 +static inline int
  37.171 +machvec_noop_dma_supported (struct device *dev, u64 mask)
  37.172 +{
  37.173 +	panic("%s() called", __FUNCTION__);
  37.174 +	return 0;
  37.175 +}
  37.176 +
  37.177 +static inline char*
  37.178 +machvec_noop_pci_get_legacy_mem (struct pci_bus *bus)
  37.179 +{
  37.180 +	panic("%s() called", __FUNCTION__);
  37.181 +	return 0;
  37.182 +}
  37.183 +
  37.184 +static inline int
  37.185 +machvec_noop_pci_legacy_read (struct pci_bus *bus, u16 port, u32 *val, u8 size)
  37.186 +{
  37.187 +	panic("%s() called", __FUNCTION__);
  37.188 +	return 0;
  37.189 +}
  37.190 +
  37.191 +static inline int
  37.192 +machvec_noop_pci_legacy_write (struct pci_bus *bus, u16 port, u32 val, u8 size)
  37.193 +{
  37.194 +	panic("%s() called", __FUNCTION__);
  37.195 +	return 0;
  37.196 +}
  37.197 +#endif
  37.198 +
  37.199 +extern void machvec_setup (char **);
  37.200 +extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
  37.201 +extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
  37.202 +extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
  37.203 +extern void machvec_tlb_migrate_finish (struct mm_struct *);
  37.204 +
  37.205 +# if defined (CONFIG_IA64_HP_SIM)
  37.206 +#  include <asm/machvec_hpsim.h>
  37.207 +# elif defined (CONFIG_IA64_DIG)
  37.208 +#  include <asm/machvec_dig.h>
  37.209 +# elif defined (CONFIG_IA64_HP_ZX1)
  37.210 +#  include <asm/machvec_hpzx1.h>
  37.211 +# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
  37.212 +#  include <asm/machvec_hpzx1_swiotlb.h>
  37.213 +# elif defined (CONFIG_IA64_SGI_SN2)
  37.214 +#  include <asm/machvec_sn2.h>
  37.215 +# elif defined (CONFIG_IA64_GENERIC)
  37.216 +
  37.217 +# ifdef MACHVEC_PLATFORM_HEADER
  37.218 +#  include MACHVEC_PLATFORM_HEADER
  37.219 +# else
  37.220 +#  define platform_name		ia64_mv.name
  37.221 +#  define platform_setup	ia64_mv.setup
  37.222 +#  define platform_cpu_init	ia64_mv.cpu_init
  37.223 +#  define platform_irq_init	ia64_mv.irq_init
  37.224 +#  define platform_send_ipi	ia64_mv.send_ipi
  37.225 +#  define platform_timer_interrupt	ia64_mv.timer_interrupt
  37.226 +#  define platform_global_tlb_purge	ia64_mv.global_tlb_purge
  37.227 +#  define platform_tlb_migrate_finish	ia64_mv.tlb_migrate_finish
  37.228 +#  define platform_dma_init		ia64_mv.dma_init
  37.229 +#  define platform_dma_alloc_coherent	ia64_mv.dma_alloc_coherent
  37.230 +#  define platform_dma_free_coherent	ia64_mv.dma_free_coherent
  37.231 +#  define platform_dma_map_single	ia64_mv.dma_map_single
  37.232 +#  define platform_dma_unmap_single	ia64_mv.dma_unmap_single
  37.233 +#  define platform_dma_map_sg		ia64_mv.dma_map_sg
  37.234 +#  define platform_dma_unmap_sg		ia64_mv.dma_unmap_sg
  37.235 +#  define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
  37.236 +#  define platform_dma_sync_sg_for_cpu	ia64_mv.dma_sync_sg_for_cpu
  37.237 +#  define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
  37.238 +#  define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
  37.239 +#  define platform_dma_mapping_error		ia64_mv.dma_mapping_error
  37.240 +#  define platform_dma_supported	ia64_mv.dma_supported
  37.241 +#  define platform_local_vector_to_irq	ia64_mv.local_vector_to_irq
  37.242 +#  define platform_pci_get_legacy_mem	ia64_mv.pci_get_legacy_mem
  37.243 +#  define platform_pci_legacy_read	ia64_mv.pci_legacy_read
  37.244 +#  define platform_pci_legacy_write	ia64_mv.pci_legacy_write
  37.245 +#  define platform_inb		ia64_mv.inb
  37.246 +#  define platform_inw		ia64_mv.inw
  37.247 +#  define platform_inl		ia64_mv.inl
  37.248 +#  define platform_outb		ia64_mv.outb
  37.249 +#  define platform_outw		ia64_mv.outw
  37.250 +#  define platform_outl		ia64_mv.outl
  37.251 +#  define platform_mmiowb	ia64_mv.mmiowb
  37.252 +#  define platform_readb        ia64_mv.readb
  37.253 +#  define platform_readw        ia64_mv.readw
  37.254 +#  define platform_readl        ia64_mv.readl
  37.255 +#  define platform_readq        ia64_mv.readq
  37.256 +#  define platform_readb_relaxed        ia64_mv.readb_relaxed
  37.257 +#  define platform_readw_relaxed        ia64_mv.readw_relaxed
  37.258 +#  define platform_readl_relaxed        ia64_mv.readl_relaxed
  37.259 +#  define platform_readq_relaxed        ia64_mv.readq_relaxed
  37.260 +# endif
  37.261 +
  37.262 +/* __attribute__((__aligned__(16))) is required to make size of the
  37.263 + * structure multiple of 16 bytes.
  37.264 + * This will fillup the holes created because of section 3.3.1 in
  37.265 + * Software Conventions guide.
  37.266 + */
  37.267 +struct ia64_machine_vector {
  37.268 +	const char *name;
  37.269 +	ia64_mv_setup_t *setup;
  37.270 +	ia64_mv_cpu_init_t *cpu_init;
  37.271 +	ia64_mv_irq_init_t *irq_init;
  37.272 +	ia64_mv_send_ipi_t *send_ipi;
  37.273 +	ia64_mv_timer_interrupt_t *timer_interrupt;
  37.274 +	ia64_mv_global_tlb_purge_t *global_tlb_purge;
  37.275 +	ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
  37.276 +	ia64_mv_dma_init *dma_init;
  37.277 +	ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
  37.278 +	ia64_mv_dma_free_coherent *dma_free_coherent;
  37.279 +	ia64_mv_dma_map_single *dma_map_single;
  37.280 +	ia64_mv_dma_unmap_single *dma_unmap_single;
  37.281 +	ia64_mv_dma_map_sg *dma_map_sg;
  37.282 +	ia64_mv_dma_unmap_sg *dma_unmap_sg;
  37.283 +	ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
  37.284 +	ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
  37.285 +	ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
  37.286 +	ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
  37.287 +	ia64_mv_dma_mapping_error *dma_mapping_error;
  37.288 +	ia64_mv_dma_supported *dma_supported;
  37.289 +	ia64_mv_local_vector_to_irq *local_vector_to_irq;
  37.290 +	ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
  37.291 +	ia64_mv_pci_legacy_read_t *pci_legacy_read;
  37.292 +	ia64_mv_pci_legacy_write_t *pci_legacy_write;
  37.293 +	ia64_mv_inb_t *inb;
  37.294 +	ia64_mv_inw_t *inw;
  37.295 +	ia64_mv_inl_t *inl;
  37.296 +	ia64_mv_outb_t *outb;
  37.297 +	ia64_mv_outw_t *outw;
  37.298 +	ia64_mv_outl_t *outl;
  37.299 +	ia64_mv_mmiowb_t *mmiowb;
  37.300 +	ia64_mv_readb_t *readb;
  37.301 +	ia64_mv_readw_t *readw;
  37.302 +	ia64_mv_readl_t *readl;
  37.303 +	ia64_mv_readq_t *readq;
  37.304 +	ia64_mv_readb_relaxed_t *readb_relaxed;
  37.305 +	ia64_mv_readw_relaxed_t *readw_relaxed;
  37.306 +	ia64_mv_readl_relaxed_t *readl_relaxed;
  37.307 +	ia64_mv_readq_relaxed_t *readq_relaxed;
  37.308 +} __attribute__((__aligned__(16))); /* align attrib? see above comment */
  37.309 +
  37.310 +#define MACHVEC_INIT(name)			\
  37.311 +{						\
  37.312 +	#name,					\
  37.313 +	platform_setup,				\
  37.314 +	platform_cpu_init,			\
  37.315 +	platform_irq_init,			\
  37.316 +	platform_send_ipi,			\
  37.317 +	platform_timer_interrupt,		\
  37.318 +	platform_global_tlb_purge,		\
  37.319 +	platform_tlb_migrate_finish,		\
  37.320 +	platform_dma_init,			\
  37.321 +	platform_dma_alloc_coherent,		\
  37.322 +	platform_dma_free_coherent,		\
  37.323 +	platform_dma_map_single,		\
  37.324 +	platform_dma_unmap_single,		\
  37.325 +	platform_dma_map_sg,			\
  37.326 +	platform_dma_unmap_sg,			\
  37.327 +	platform_dma_sync_single_for_cpu,	\
  37.328 +	platform_dma_sync_sg_for_cpu,		\
  37.329 +	platform_dma_sync_single_for_device,	\
  37.330 +	platform_dma_sync_sg_for_device,	\
  37.331 +	platform_dma_mapping_error,			\
  37.332 +	platform_dma_supported,			\
  37.333 +	platform_local_vector_to_irq,		\
  37.334 +	platform_pci_get_legacy_mem,		\
  37.335 +	platform_pci_legacy_read,		\
  37.336 +	platform_pci_legacy_write,		\
  37.337 +	platform_inb,				\
  37.338 +	platform_inw,				\
  37.339 +	platform_inl,				\
  37.340 +	platform_outb,				\
  37.341 +	platform_outw,				\
  37.342 +	platform_outl,				\
  37.343 +	platform_mmiowb,			\
  37.344 +	platform_readb,				\
  37.345 +	platform_readw,				\
  37.346 +	platform_readl,				\
  37.347 +	platform_readq,				\
  37.348 +	platform_readb_relaxed,			\
  37.349 +	platform_readw_relaxed,			\
  37.350 +	platform_readl_relaxed,			\
  37.351 +	platform_readq_relaxed,			\
  37.352 +}
  37.353 +
  37.354 +extern struct ia64_machine_vector ia64_mv;
  37.355 +extern void machvec_init (const char *name);
  37.356 +
  37.357 +# else
  37.358 +#  error Unknown configuration.  Update asm-ia64/machvec.h.
  37.359 +# endif /* CONFIG_IA64_GENERIC */
  37.360 +
  37.361 +/*
  37.362 + * Declare default routines which aren't declared anywhere else:
  37.363 + */
  37.364 +extern ia64_mv_dma_init			swiotlb_init;
  37.365 +extern ia64_mv_dma_alloc_coherent	swiotlb_alloc_coherent;
  37.366 +extern ia64_mv_dma_free_coherent	swiotlb_free_coherent;
  37.367 +extern ia64_mv_dma_map_single		swiotlb_map_single;
  37.368 +extern ia64_mv_dma_unmap_single		swiotlb_unmap_single;
  37.369 +extern ia64_mv_dma_map_sg		swiotlb_map_sg;
  37.370 +extern ia64_mv_dma_unmap_sg		swiotlb_unmap_sg;
  37.371 +extern ia64_mv_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu;
  37.372 +extern ia64_mv_dma_sync_sg_for_cpu	swiotlb_sync_sg_for_cpu;
  37.373 +extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
  37.374 +extern ia64_mv_dma_sync_sg_for_device	swiotlb_sync_sg_for_device;
  37.375 +extern ia64_mv_dma_mapping_error	swiotlb_dma_mapping_error;
  37.376 +extern ia64_mv_dma_supported		swiotlb_dma_supported;
  37.377 +
  37.378 +/*
  37.379 + * Define default versions so we can extend machvec for new platforms without having
  37.380 + * to update the machvec files for all existing platforms.
  37.381 + */
  37.382 +#ifndef platform_setup
  37.383 +# define platform_setup			machvec_setup
  37.384 +#endif
  37.385 +#ifndef platform_cpu_init
  37.386 +# define platform_cpu_init		machvec_noop
  37.387 +#endif
  37.388 +#ifndef platform_irq_init
  37.389 +# define platform_irq_init		machvec_noop
  37.390 +#endif
  37.391 +
  37.392 +#ifndef platform_send_ipi
  37.393 +# define platform_send_ipi		ia64_send_ipi	/* default to architected version */
  37.394 +#endif
  37.395 +#ifndef platform_timer_interrupt
  37.396 +# define platform_timer_interrupt 	machvec_timer_interrupt
  37.397 +#endif
  37.398 +#ifndef platform_global_tlb_purge
  37.399 +# define platform_global_tlb_purge	ia64_global_tlb_purge /* default to architected version */
  37.400 +#endif
  37.401 +#ifndef platform_tlb_migrate_finish
  37.402 +# define platform_tlb_migrate_finish	machvec_noop_mm
  37.403 +#endif
  37.404 +#ifndef platform_dma_init
  37.405 +# define platform_dma_init		swiotlb_init
  37.406 +#endif
  37.407 +#ifndef platform_dma_alloc_coherent
  37.408 +# define platform_dma_alloc_coherent	swiotlb_alloc_coherent
  37.409 +#endif
  37.410 +#ifndef platform_dma_free_coherent
  37.411 +# define platform_dma_free_coherent	swiotlb_free_coherent
  37.412 +#endif
  37.413 +#ifndef platform_dma_map_single
  37.414 +# define platform_dma_map_single	swiotlb_map_single
  37.415 +#endif
  37.416 +#ifndef platform_dma_unmap_single
  37.417 +# define platform_dma_unmap_single	swiotlb_unmap_single
  37.418 +#endif
  37.419 +#ifndef platform_dma_map_sg
  37.420 +# define platform_dma_map_sg		swiotlb_map_sg
  37.421 +#endif
  37.422 +#ifndef platform_dma_unmap_sg
  37.423 +# define platform_dma_unmap_sg		swiotlb_unmap_sg
  37.424 +#endif
  37.425 +#ifndef platform_dma_sync_single_for_cpu
  37.426 +# define platform_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu
  37.427 +#endif
  37.428 +#ifndef platform_dma_sync_sg_for_cpu
  37.429 +# define platform_dma_sync_sg_for_cpu		swiotlb_sync_sg_for_cpu
  37.430 +#endif
  37.431 +#ifndef platform_dma_sync_single_for_device
  37.432 +# define platform_dma_sync_single_for_device	swiotlb_sync_single_for_device
  37.433 +#endif
  37.434 +#ifndef platform_dma_sync_sg_for_device
  37.435 +# define platform_dma_sync_sg_for_device	swiotlb_sync_sg_for_device
  37.436 +#endif
  37.437 +#ifndef platform_dma_mapping_error
  37.438 +# define platform_dma_mapping_error		swiotlb_dma_mapping_error
  37.439 +#endif
  37.440 +#ifndef platform_dma_supported
  37.441 +# define  platform_dma_supported	swiotlb_dma_supported
  37.442 +#endif
  37.443 +#ifndef platform_local_vector_to_irq
  37.444 +# define platform_local_vector_to_irq	__ia64_local_vector_to_irq
  37.445 +#endif
  37.446 +#ifndef platform_pci_get_legacy_mem
  37.447 +# define platform_pci_get_legacy_mem	ia64_pci_get_legacy_mem
  37.448 +#endif
  37.449 +#ifndef platform_pci_legacy_read
  37.450 +# define platform_pci_legacy_read	ia64_pci_legacy_read
  37.451 +#endif
  37.452 +#ifndef platform_pci_legacy_write
  37.453 +# define platform_pci_legacy_write	ia64_pci_legacy_write
  37.454 +#endif
  37.455 +#ifndef platform_inb
  37.456 +# define platform_inb		__ia64_inb
  37.457 +#endif
  37.458 +#ifndef platform_inw
  37.459 +# define platform_inw		__ia64_inw
  37.460 +#endif
  37.461 +#ifndef platform_inl
  37.462 +# define platform_inl		__ia64_inl
  37.463 +#endif
  37.464 +#ifndef platform_outb
  37.465 +# define platform_outb		__ia64_outb
  37.466 +#endif
  37.467 +#ifndef platform_outw
  37.468 +# define platform_outw		__ia64_outw
  37.469 +#endif
  37.470 +#ifndef platform_outl
  37.471 +# define platform_outl		__ia64_outl
  37.472 +#endif
  37.473 +#ifndef platform_mmiowb
  37.474 +# define platform_mmiowb	__ia64_mmiowb
  37.475 +#endif
  37.476 +#ifndef platform_readb
  37.477 +# define platform_readb		__ia64_readb
  37.478 +#endif
  37.479 +#ifndef platform_readw
  37.480 +# define platform_readw		__ia64_readw
  37.481 +#endif
  37.482 +#ifndef platform_readl
  37.483 +# define platform_readl		__ia64_readl
  37.484 +#endif
  37.485 +#ifndef platform_readq
  37.486 +# define platform_readq		__ia64_readq
  37.487 +#endif
  37.488 +#ifndef platform_readb_relaxed
  37.489 +# define platform_readb_relaxed	__ia64_readb_relaxed
  37.490 +#endif
  37.491 +#ifndef platform_readw_relaxed
  37.492 +# define platform_readw_relaxed	__ia64_readw_relaxed
  37.493 +#endif
  37.494 +#ifndef platform_readl_relaxed
  37.495 +# define platform_readl_relaxed	__ia64_readl_relaxed
  37.496 +#endif
  37.497 +#ifndef platform_readq_relaxed
  37.498 +# define platform_readq_relaxed	__ia64_readq_relaxed
  37.499 +#endif
  37.500 +
  37.501 +#endif /* _ASM_IA64_MACHVEC_H */
    38.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    38.2 +++ b/xen/include/asm-ia64/linux-xen/asm/machvec_dig.h	Wed Dec 20 14:55:02 2006 -0700
    38.3 @@ -0,0 +1,46 @@
    38.4 +#ifndef _ASM_IA64_MACHVEC_DIG_h
    38.5 +#define _ASM_IA64_MACHVEC_DIG_h
    38.6 +
    38.7 +extern ia64_mv_setup_t dig_setup;
    38.8 +
    38.9 +/*
   38.10 + * This stuff has dual use!
   38.11 + *
   38.12 + * For a generic kernel, the macros are used to initialize the
   38.13 + * platform's machvec structure.  When compiling a non-generic kernel,
   38.14 + * the macros are used directly.
   38.15 + */
   38.16 +#define platform_name		"dig"
   38.17 +#ifdef XEN
   38.18 +/*
   38.19 + * All the World is a PC .... yay! yay! yay!
   38.20 + */
   38.21 +extern ia64_mv_setup_t hpsim_setup;
   38.22 +#define platform_setup				hpsim_setup
   38.23 +
   38.24 +#define platform_dma_init			machvec_noop
   38.25 +#define platform_dma_alloc_coherent		machvec_noop_dma_alloc_coherent
   38.26 +#define platform_dma_free_coherent		machvec_noop_dma_free_coherent
   38.27 +#define platform_dma_map_single			machvec_noop_dma_map_single
   38.28 +#define platform_dma_unmap_single		machvec_noop_dma_unmap_single
   38.29 +#define platform_dma_map_sg			machvec_noop_dma_map_sg
   38.30 +#define platform_dma_unmap_sg			machvec_noop_dma_unmap_sg
   38.31 +#define platform_dma_sync_single_for_cpu	\
   38.32 +	machvec_noop_dma_sync_single_for_cpu
   38.33 +#define platform_dma_sync_sg_for_cpu		\
   38.34 +	machvec_noop_dma_sync_sg_for_cpu
   38.35 +#define platform_dma_sync_single_for_device	\
   38.36 +	machvec_noop_dma_sync_single_for_device
   38.37 +#define platform_dma_sync_sg_for_device		\
   38.38 +	machvec_noop_dma_sync_sg_for_device
   38.39 +#define platform_dma_mapping_error		machvec_noop_dma_mapping_error
   38.40 +#define platform_dma_supported			machvec_noop_dma_supported
   38.41 +
   38.42 +#define platform_pci_get_legacy_mem		machvec_noop_pci_get_legacy_mem
   38.43 +#define platform_pci_legacy_read		machvec_noop_pci_legacy_read
   38.44 +#define platform_pci_legacy_write		machvec_noop_pci_legacy_write
   38.45 +#else
   38.46 +#define platform_setup		dig_setup
   38.47 +#endif
   38.48 +
   38.49 +#endif /* _ASM_IA64_MACHVEC_DIG_h */
    39.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    39.2 +++ b/xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h	Wed Dec 20 14:55:02 2006 -0700
    39.3 @@ -0,0 +1,66 @@
    39.4 +#ifndef _ASM_IA64_MACHVEC_HPZX1_h
    39.5 +#define _ASM_IA64_MACHVEC_HPZX1_h
    39.6 +
    39.7 +extern ia64_mv_setup_t			dig_setup;
    39.8 +extern ia64_mv_dma_alloc_coherent	sba_alloc_coherent;
    39.9 +extern ia64_mv_dma_free_coherent	sba_free_coherent;
   39.10 +extern ia64_mv_dma_map_single		sba_map_single;
   39.11 +extern ia64_mv_dma_unmap_single		sba_unmap_single;
   39.12 +extern ia64_mv_dma_map_sg		sba_map_sg;
   39.13 +extern ia64_mv_dma_unmap_sg		sba_unmap_sg;
   39.14 +extern ia64_mv_dma_supported		sba_dma_supported;
   39.15 +extern ia64_mv_dma_mapping_error	sba_dma_mapping_error;
   39.16 +
   39.17 +/*
   39.18 + * This stuff has dual use!
   39.19 + *
   39.20 + * For a generic kernel, the macros are used to initialize the
   39.21 + * platform's machvec structure.  When compiling a non-generic kernel,
   39.22 + * the macros are used directly.
   39.23 + */
   39.24 +#define platform_name				"hpzx1"
   39.25 +#ifdef XEN
   39.26 +extern ia64_mv_setup_t hpsim_setup;
   39.27 +extern ia64_mv_irq_init_t hpsim_irq_init;
   39.28 +#define platform_setup				hpsim_setup
   39.29 +#define platform_irq_init			hpsim_irq_init
   39.30 +
   39.31 +#define platform_dma_init			machvec_noop
   39.32 +#define platform_dma_alloc_coherent		machvec_noop_dma_alloc_coherent
   39.33 +#define platform_dma_free_coherent		machvec_noop_dma_free_coherent
   39.34 +#define platform_dma_map_single			machvec_noop_dma_map_single
   39.35 +#define platform_dma_unmap_single		machvec_noop_dma_unmap_single
   39.36 +#define platform_dma_map_sg			machvec_noop_dma_map_sg
   39.37 +#define platform_dma_unmap_sg			machvec_noop_dma_unmap_sg
   39.38 +#define platform_dma_sync_single_for_cpu	\
   39.39 +	machvec_noop_dma_sync_single_for_cpu
   39.40 +#define platform_dma_sync_sg_for_cpu		\
   39.41 +	machvec_noop_dma_sync_sg_for_cpu
   39.42 +#define platform_dma_sync_single_for_device	\
   39.43 +	machvec_noop_dma_sync_single_for_device
   39.44 +#define platform_dma_sync_sg_for_device		\
   39.45 +	machvec_noop_dma_sync_sg_for_device
   39.46 +#define platform_dma_mapping_error		machvec_noop_dma_mapping_error
   39.47 +#define platform_dma_supported			machvec_noop_dma_supported
   39.48 +
   39.49 +#define platform_pci_get_legacy_mem		machvec_noop_pci_get_legacy_mem
   39.50 +#define platform_pci_legacy_read		machvec_noop_pci_legacy_read
   39.51 +#define platform_pci_legacy_write		machvec_noop_pci_legacy_write
   39.52 +#else
   39.53 +#define platform_setup				dig_setup
   39.54 +#define platform_dma_init			machvec_noop
   39.55 +#define platform_dma_alloc_coherent		sba_alloc_coherent
   39.56 +#define platform_dma_free_coherent		sba_free_coherent
   39.57 +#define platform_dma_map_single			sba_map_single
   39.58 +#define platform_dma_unmap_single		sba_unmap_single
   39.59 +#define platform_dma_map_sg			sba_map_sg
   39.60 +#define platform_dma_unmap_sg			sba_unmap_sg
   39.61 +#define platform_dma_sync_single_for_cpu	machvec_dma_sync_single
   39.62 +#define platform_dma_sync_sg_for_cpu		machvec_dma_sync_sg
   39.63 +#define platform_dma_sync_single_for_device	machvec_dma_sync_single
   39.64 +#define platform_dma_sync_sg_for_device		machvec_dma_sync_sg
   39.65 +#define platform_dma_supported			sba_dma_supported
   39.66 +#define platform_dma_mapping_error		sba_dma_mapping_error
   39.67 +#endif
   39.68 +
   39.69 +#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
    40.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    40.2 +++ b/xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h	Wed Dec 20 14:55:02 2006 -0700
    40.3 @@ -0,0 +1,166 @@
    40.4 +/*
    40.5 + * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc.  All Rights Reserved.
    40.6 + * 
    40.7 + * This program is free software; you can redistribute it and/or modify it 
    40.8 + * under the terms of version 2 of the GNU General Public License 
    40.9 + * as published by the Free Software Foundation.
   40.10 + * 
   40.11 + * This program is distributed in the hope that it would be useful, but 
   40.12 + * WITHOUT ANY WARRANTY; without even the implied warranty of 
   40.13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 
   40.14 + * 
   40.15 + * Further, this software is distributed without any warranty that it is 
   40.16 + * free of the rightful claim of any third person regarding infringement 
   40.17 + * or the like.  Any license provided herein, whether implied or 
   40.18 + * otherwise, applies only to this software file.  Patent licenses, if 
   40.19 + * any, provided herein do not apply to combinations of this program with 
   40.20 + * other software, or any other product whatsoever.
   40.21 + * 
   40.22 + * You should have received a copy of the GNU General Public 
   40.23 + * License along with this program; if not, write the Free Software 
   40.24 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
   40.25 + * 
   40.26 + * For further information regarding this notice, see: 
   40.27 + * 
   40.28 + * http://oss.sgi.com/projects/GenInfo/NoticeExplan
   40.29 + */
   40.30 +
   40.31 +#ifndef _ASM_IA64_MACHVEC_SN2_H
   40.32 +#define _ASM_IA64_MACHVEC_SN2_H
   40.33 +
   40.34 +extern ia64_mv_setup_t sn_setup;
   40.35 +extern ia64_mv_cpu_init_t sn_cpu_init;
   40.36 +extern ia64_mv_irq_init_t sn_irq_init;
   40.37 +extern ia64_mv_send_ipi_t sn2_send_IPI;
   40.38 +extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
   40.39 +extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
   40.40 +extern ia64_mv_tlb_migrate_finish_t	sn_tlb_migrate_finish;
   40.41 +extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
   40.42 +extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
   40.43 +extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
   40.44 +extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write;
   40.45 +extern ia64_mv_inb_t __sn_inb;
   40.46 +extern ia64_mv_inw_t __sn_inw;
   40.47 +extern ia64_mv_inl_t __sn_inl;
   40.48 +extern ia64_mv_outb_t __sn_outb;
   40.49 +extern ia64_mv_outw_t __sn_outw;
   40.50 +extern ia64_mv_outl_t __sn_outl;
   40.51 +extern ia64_mv_mmiowb_t __sn_mmiowb;
   40.52 +extern ia64_mv_readb_t __sn_readb;
   40.53 +extern ia64_mv_readw_t __sn_readw;
   40.54 +extern ia64_mv_readl_t __sn_readl;
   40.55 +extern ia64_mv_readq_t __sn_readq;
   40.56 +extern ia64_mv_readb_t __sn_readb_relaxed;
   40.57 +extern ia64_mv_readw_t __sn_readw_relaxed;
   40.58 +extern ia64_mv_readl_t __sn_readl_relaxed;
   40.59 +extern ia64_mv_readq_t __sn_readq_relaxed;
   40.60 +extern ia64_mv_dma_alloc_coherent	sn_dma_alloc_coherent;
   40.61 +extern ia64_mv_dma_free_coherent	sn_dma_free_coherent;
   40.62 +extern ia64_mv_dma_map_single		sn_dma_map_single;
   40.63 +extern ia64_mv_dma_unmap_single		sn_dma_unmap_single;
   40.64 +extern ia64_mv_dma_map_sg		sn_dma_map_sg;
   40.65 +extern ia64_mv_dma_unmap_sg		sn_dma_unmap_sg;
   40.66 +extern ia64_mv_dma_sync_single_for_cpu	sn_dma_sync_single_for_cpu;
   40.67 +extern ia64_mv_dma_sync_sg_for_cpu	sn_dma_sync_sg_for_cpu;
   40.68 +extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
   40.69 +extern ia64_mv_dma_sync_sg_for_device	sn_dma_sync_sg_for_device;
   40.70 +extern ia64_mv_dma_mapping_error	sn_dma_mapping_error;
   40.71 +extern ia64_mv_dma_supported		sn_dma_supported;
   40.72 +#ifndef XEN
   40.73 +extern ia64_mv_migrate_t		sn_migrate;
   40.74 +extern ia64_mv_setup_msi_irq_t		sn_setup_msi_irq;
   40.75 +extern ia64_mv_teardown_msi_irq_t	sn_teardown_msi_irq;
   40.76 +#endif
   40.77 +
   40.78 +
   40.79 +/*
   40.80 + * This stuff has dual use!
   40.81 + *
   40.82 + * For a generic kernel, the macros are used to initialize the
   40.83 + * platform's machvec structure.  When compiling a non-generic kernel,
   40.84 + * the macros are used directly.
   40.85 + */
   40.86 +#define platform_name			"sn2"
   40.87 +#define platform_setup			sn_setup
   40.88 +#define platform_cpu_init		sn_cpu_init
   40.89 +#define platform_irq_init		sn_irq_init
   40.90 +#define platform_send_ipi		sn2_send_IPI
   40.91 +#ifndef XEN
   40.92 +#define platform_timer_interrupt	sn_timer_interrupt
   40.93 +#endif
   40.94 +#define platform_global_tlb_purge       sn2_global_tlb_purge
   40.95 +#ifndef XEN
   40.96 +#define platform_tlb_migrate_finish	sn_tlb_migrate_finish
   40.97 +#endif
   40.98 +#define platform_pci_fixup		sn_pci_fixup
   40.99 +#define platform_inb			__sn_inb
  40.100 +#define platform_inw			__sn_inw
  40.101 +#define platform_inl			__sn_inl
  40.102 +#define platform_outb			__sn_outb
  40.103 +#define platform_outw			__sn_outw
  40.104 +#define platform_outl			__sn_outl
  40.105 +#define platform_mmiowb			__sn_mmiowb
  40.106 +#define platform_readb			__sn_readb
  40.107 +#define platform_readw			__sn_readw
  40.108 +#define platform_readl			__sn_readl
  40.109 +#define platform_readq			__sn_readq
  40.110 +#define platform_readb_relaxed		__sn_readb_relaxed
  40.111 +#define platform_readw_relaxed		__sn_readw_relaxed
  40.112 +#define platform_readl_relaxed		__sn_readl_relaxed
  40.113 +#define platform_readq_relaxed		__sn_readq_relaxed
  40.114 +#define platform_local_vector_to_irq	sn_local_vector_to_irq
  40.115 +#ifdef XEN
  40.116 +#define platform_pci_get_legacy_mem	machvec_noop_pci_get_legacy_mem
  40.117 +#define platform_pci_legacy_read	machvec_noop_pci_legacy_read
  40.118 +#define platform_pci_legacy_write	machvec_noop_pci_legacy_write
  40.119 +#else
  40.120 +#define platform_pci_get_legacy_mem	sn_pci_get_legacy_mem
  40.121 +#define platform_pci_legacy_read	sn_pci_legacy_read
  40.122 +#define platform_pci_legacy_write	sn_pci_legacy_write
  40.123 +#endif
  40.124 +#define platform_dma_init		machvec_noop
  40.125 +#ifdef XEN
  40.126 +#define platform_dma_alloc_coherent	machvec_noop_dma_alloc_coherent
  40.127 +#define platform_dma_free_coherent	machvec_noop_dma_free_coherent
  40.128 +#define platform_dma_map_single		machvec_noop_dma_map_single
  40.129 +#define platform_dma_unmap_single	machvec_noop_dma_unmap_single
  40.130 +#define platform_dma_map_sg		machvec_noop_dma_map_sg
  40.131 +#define platform_dma_unmap_sg		machvec_noop_dma_unmap_sg
  40.132 +#define platform_dma_sync_single_for_cpu	\
  40.133 +	machvec_noop_dma_sync_single_for_cpu
  40.134 +#define platform_dma_sync_sg_for_cpu		\
  40.135 +	machvec_noop_dma_sync_sg_for_cpu
  40.136 +#define platform_dma_sync_single_for_device	\
  40.137 +	machvec_noop_dma_sync_single_for_device
  40.138 +#define platform_dma_sync_sg_for_device	machvec_noop_dma_sync_sg_for_device
  40.139 +#define platform_dma_mapping_error	machvec_noop_dma_mapping_error
  40.140 +#define platform_dma_supported		machvec_noop_dma_supported
  40.141 +#else
  40.142 +#define platform_dma_alloc_coherent	sn_dma_alloc_coherent
  40.143 +#define platform_dma_free_coherent	sn_dma_free_coherent
  40.144 +#define platform_dma_map_single		sn_dma_map_single
  40.145 +#define platform_dma_unmap_single	sn_dma_unmap_single
  40.146 +#define platform_dma_map_sg		sn_dma_map_sg
  40.147 +#define platform_dma_unmap_sg		sn_dma_unmap_sg
  40.148 +#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
  40.149 +#define platform_dma_sync_sg_for_cpu	sn_dma_sync_sg_for_cpu
  40.150 +#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
  40.151 +#define platform_dma_sync_sg_for_device	sn_dma_sync_sg_for_device
  40.152 +#define platform_dma_mapping_error		sn_dma_mapping_error
  40.153 +#define platform_dma_supported		sn_dma_supported
  40.154 +#define platform_migrate		sn_migrate
  40.155 +#endif
  40.156 +
  40.157 +#ifndef XEN
  40.158 +#ifdef CONFIG_PCI_MSI
  40.159 +#define platform_setup_msi_irq		sn_setup_msi_irq
  40.160 +#define platform_teardown_msi_irq	sn_teardown_msi_irq
  40.161 +#else
  40.162 +#define platform_setup_msi_irq		((ia64_mv_setup_msi_irq_t*)NULL)
  40.163 +#define platform_teardown_msi_irq	((ia64_mv_teardown_msi_irq_t*)NULL)
  40.164 +#endif
  40.165 +#endif
  40.166 +
  40.167 +#include <asm/sn/io.h>
  40.168 +
  40.169 +#endif /* _ASM_IA64_MACHVEC_SN2_H */
    41.1 --- a/xen/include/asm-ia64/linux-xen/asm/page.h	Wed Dec 20 08:53:42 2006 -0700
    41.2 +++ b/xen/include/asm-ia64/linux-xen/asm/page.h	Wed Dec 20 14:55:02 2006 -0700
    41.3 @@ -12,6 +12,16 @@
    41.4  #include <asm/intrinsics.h>
    41.5  #include <asm/types.h>
    41.6  
    41.7 +#ifdef XEN  /* This will go away with newer upstream */
    41.8 +#define RGN_SHIFT	61
    41.9 +#define RGN_BASE(r)	(r << RGN_SHIFT)
   41.10 +#define RGN_BITS	RGN_BASE(-1)
   41.11 +#define RGN_HPAGE	REGION_HPAGE
   41.12 +#ifndef CONFIG_HUGETLB_PAGE
   41.13 +# define REGION_HPAGE	(4UL)
   41.14 +#endif
   41.15 +#endif
   41.16 +
   41.17  /*
   41.18   * PAGE_SHIFT determines the actual kernel page size.
   41.19   */
    42.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    42.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pci.h	Wed Dec 20 14:55:02 2006 -0700
    42.3 @@ -0,0 +1,185 @@
    42.4 +#ifndef _ASM_IA64_PCI_H
    42.5 +#define _ASM_IA64_PCI_H
    42.6 +
    42.7 +#include <linux/mm.h>
    42.8 +#include <linux/slab.h>
    42.9 +#include <linux/spinlock.h>
   42.10 +#include <linux/string.h>
   42.11 +#include <linux/types.h>
   42.12 +#ifdef XEN
   42.13 +#include <linux/ioport.h>
   42.14 +#endif
   42.15 +
   42.16 +#include <asm/io.h>
   42.17 +#ifndef XEN
   42.18 +#include <asm/scatterlist.h>
   42.19 +#endif
   42.20 +
   42.21 +/*
   42.22 + * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
   42.23 + * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
   42.24 + * loader.
   42.25 + */
   42.26 +#define pcibios_assign_all_busses()     0
   42.27 +#define pcibios_scan_all_fns(a, b)	0
   42.28 +
   42.29 +#define PCIBIOS_MIN_IO		0x1000
   42.30 +#define PCIBIOS_MIN_MEM		0x10000000
   42.31 +
   42.32 +void pcibios_config_init(void);
   42.33 +
   42.34 +struct pci_dev;
   42.35 +
   42.36 +/*
   42.37 + * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence
   42.38 + * between device bus addresses and CPU physical addresses.  Platforms with a hardware I/O
   42.39 + * MMU _must_ turn this off to suppress the bounce buffer handling code in the block and
   42.40 + * network device layers.  Platforms with separate bus address spaces _must_ turn this off
   42.41 + * and provide a device DMA mapping implementation that takes care of the necessary
   42.42 + * address translation.
   42.43 + *
   42.44 + * For now, the ia64 platforms which may have separate/multiple bus address spaces all
   42.45 + * have I/O MMUs which support the merging of physically discontiguous buffers, so we can
   42.46 + * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
   42.47 + */
   42.48 +extern unsigned long ia64_max_iommu_merge_mask;
   42.49 +#define PCI_DMA_BUS_IS_PHYS	(ia64_max_iommu_merge_mask == ~0UL)
   42.50 +
   42.51 +static inline void
   42.52 +pcibios_set_master (struct pci_dev *dev)
   42.53 +{
   42.54 +	/* No special bus mastering setup handling */
   42.55 +}
   42.56 +
   42.57 +static inline void
   42.58 +pcibios_penalize_isa_irq (int irq, int active)
   42.59 +{
   42.60 +	/* We don't do dynamic PCI IRQ allocation */
   42.61 +}
   42.62 +
   42.63 +#define HAVE_ARCH_PCI_MWI 1
   42.64 +extern int pcibios_prep_mwi (struct pci_dev *);
   42.65 +
   42.66 +#ifndef XEN
   42.67 +#include <asm-generic/pci-dma-compat.h>
   42.68 +#endif
   42.69 +
   42.70 +/* pci_unmap_{single,page} is not a nop, thus... */
   42.71 +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
   42.72 +	dma_addr_t ADDR_NAME;
   42.73 +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
   42.74 +	__u32 LEN_NAME;
   42.75 +#define pci_unmap_addr(PTR, ADDR_NAME)			\
   42.76 +	((PTR)->ADDR_NAME)
   42.77 +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
   42.78 +	(((PTR)->ADDR_NAME) = (VAL))
   42.79 +#define pci_unmap_len(PTR, LEN_NAME)			\
   42.80 +	((PTR)->LEN_NAME)
   42.81 +#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
   42.82 +	(((PTR)->LEN_NAME) = (VAL))
   42.83 +
   42.84 +/* The ia64 platform always supports 64-bit addressing. */
   42.85 +#define pci_dac_dma_supported(pci_dev, mask)		(1)
   42.86 +#define pci_dac_page_to_dma(dev,pg,off,dir)		((dma_addr_t) page_to_bus(pg) + (off))
   42.87 +#define pci_dac_dma_to_page(dev,dma_addr)		(virt_to_page(bus_to_virt(dma_addr)))
   42.88 +#define pci_dac_dma_to_offset(dev,dma_addr)		offset_in_page(dma_addr)
   42.89 +#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir)	do { } while (0)
   42.90 +#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir)	do { mb(); } while (0)
   42.91 +
   42.92 +#define sg_dma_len(sg)		((sg)->dma_length)
   42.93 +#define sg_dma_address(sg)	((sg)->dma_address)
   42.94 +
   42.95 +#ifdef CONFIG_PCI
   42.96 +static inline void pci_dma_burst_advice(struct pci_dev *pdev,
   42.97 +					enum pci_dma_burst_strategy *strat,
   42.98 +					unsigned long *strategy_parameter)
   42.99 +{
  42.100 +	unsigned long cacheline_size;
  42.101 +	u8 byte;
  42.102 +
  42.103 +	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
  42.104 +	if (byte == 0)
  42.105 +		cacheline_size = 1024;
  42.106 +	else
  42.107 +		cacheline_size = (int) byte * 4;
  42.108 +
  42.109 +	*strat = PCI_DMA_BURST_MULTIPLE;
  42.110 +	*strategy_parameter = cacheline_size;
  42.111 +}
  42.112 +#endif
  42.113 +
  42.114 +#define HAVE_PCI_MMAP
  42.115 +extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
  42.116 +				enum pci_mmap_state mmap_state, int write_combine);
  42.117 +#define HAVE_PCI_LEGACY
  42.118 +extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
  42.119 +				      struct vm_area_struct *vma);
  42.120 +#ifndef XEN
  42.121 +extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
  42.122 +				  size_t count);
  42.123 +extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
  42.124 +				   size_t count);
  42.125 +extern int pci_mmap_legacy_mem(struct kobject *kobj,
  42.126 +			       struct bin_attribute *attr,
  42.127 +			       struct vm_area_struct *vma);
  42.128 +#endif
  42.129 +
  42.130 +#define pci_get_legacy_mem platform_pci_get_legacy_mem
  42.131 +#define pci_legacy_read platform_pci_legacy_read
  42.132 +#define pci_legacy_write platform_pci_legacy_write
  42.133 +
  42.134 +struct pci_window {
  42.135 +	struct resource resource;
  42.136 +	u64 offset;
  42.137 +};
  42.138 +
  42.139 +struct pci_controller {
  42.140 +	void *acpi_handle;
  42.141 +	void *iommu;
  42.142 +	int segment;
  42.143 +	int node;		/* nearest node with memory or -1 for global allocation */
  42.144 +
  42.145 +	unsigned int windows;
  42.146 +	struct pci_window *window;
  42.147 +
  42.148 +	void *platform_data;
  42.149 +};
  42.150 +
  42.151 +#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
  42.152 +#define pci_domain_nr(busdev)    (PCI_CONTROLLER(busdev)->segment)
  42.153 +
  42.154 +extern struct pci_ops pci_root_ops;
  42.155 +
  42.156 +static inline int pci_proc_domain(struct pci_bus *bus)
  42.157 +{
  42.158 +	return (pci_domain_nr(bus) != 0);
  42.159 +}
  42.160 +
  42.161 +static inline void pcibios_add_platform_entries(struct pci_dev *dev)
  42.162 +{
  42.163 +}
  42.164 +
  42.165 +extern void pcibios_resource_to_bus(struct pci_dev *dev,
  42.166 +		struct pci_bus_region *region, struct resource *res);
  42.167 +
  42.168 +extern void pcibios_bus_to_resource(struct pci_dev *dev,
  42.169 +		struct resource *res, struct pci_bus_region *region);
  42.170 +
  42.171 +#ifndef XEN
  42.172 +static inline struct resource *
  42.173 +pcibios_select_root(struct pci_dev *pdev, struct resource *res)
  42.174 +{
  42.175 +	struct resource *root = NULL;
  42.176 +
  42.177 +	if (res->flags & IORESOURCE_IO)
  42.178 +		root = &ioport_resource;
  42.179 +	if (res->flags & IORESOURCE_MEM)
  42.180 +		root = &iomem_resource;
  42.181 +
  42.182 +	return root;
  42.183 +}
  42.184 +#endif
  42.185 +
  42.186 +#define pcibios_scan_all_fns(a, b)	0
  42.187 +
  42.188 +#endif /* _ASM_IA64_PCI_H */
    43.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    43.2 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/README.origin	Wed Dec 20 14:55:02 2006 -0700
    43.3 @@ -0,0 +1,16 @@
    43.4 +# Source files in this directory are near-identical copies of linux-2.6.19
    43.5 +# files:
    43.6 +
    43.7 +# NOTE: ALL changes to these files should be clearly marked
    43.8 +# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
    43.9 +# easily updated to future versions of the corresponding Linux files.
   43.10 +
   43.11 +addrs.h			-> linux/include/asm-ia64/sn/addrs.h
   43.12 +arch.h			-> linux/include/asm-ia64/sn/arch.h
   43.13 +hubdev.h		-> linux/arch/ia64/sn/include/xtalk/hubdev.h
   43.14 +intr.h			-> linux/include/asm-ia64/sn/intr.h
   43.15 +io.h			-> linux/include/asm-ia64/sn/io.h
   43.16 +nodepda.h		-> linux/include/asm-ia64/sn/nodepda.h
   43.17 +pcibr_provider.h	-> linux/include/asm-ia64/sn/pcibr_provider.h
   43.18 +rw_mmr.h		-> linux/include/asm-ia64/sn/rw_mmr.h
   43.19 +types.h			-> linux/include/asm-ia64/sn/types.h
    44.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    44.2 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/addrs.h	Wed Dec 20 14:55:02 2006 -0700
    44.3 @@ -0,0 +1,299 @@
    44.4 +/*
    44.5 + * This file is subject to the terms and conditions of the GNU General Public
    44.6 + * License.  See the file "COPYING" in the main directory of this archive
    44.7 + * for more details.
    44.8 + *
    44.9 + * Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
   44.10 + */
   44.11 +
   44.12 +#ifndef _ASM_IA64_SN_ADDRS_H
   44.13 +#define _ASM_IA64_SN_ADDRS_H
   44.14 +
   44.15 +#include <asm/percpu.h>
   44.16 +#include <asm/sn/types.h>
   44.17 +#include <asm/sn/arch.h>
   44.18 +#include <asm/sn/pda.h>
   44.19 +
   44.20 +/*
   44.21 + *  Memory/SHUB Address Format:
   44.22 + *  +-+---------+--+--------------+
   44.23 + *  |0|  NASID  |AS| NodeOffset   |
   44.24 + *  +-+---------+--+--------------+
   44.25 + *
   44.26 + *  NASID: (low NASID bit is 0) Memory and SHUB MMRs
   44.27 + *   AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0
   44.28 + *     00: Local Resources and MMR space
   44.29 + *           Top bit of NodeOffset
   44.30 + *               0: Local resources space
   44.31 + *                  node id:
   44.32 + *                        0: IA64/NT compatibility space
   44.33 + *                        2: Local MMR Space
   44.34 + *                        4: Local memory, regardless of local node id
   44.35 + *               1: Global MMR space
   44.36 + *     01: GET space.
   44.37 + *     10: AMO space.
   44.38 + *     11: Cacheable memory space.
   44.39 + *
   44.40 + *   NodeOffset: byte offset
   44.41 + *
   44.42 + *
   44.43 + *  TIO address format:
   44.44 + *  +-+----------+--+--------------+
   44.45 + *  |0|  NASID   |AS| Nodeoffset   |
   44.46 + *  +-+----------+--+--------------+
   44.47 + *
   44.48 + *  NASID: (low NASID bit is 1) TIO
   44.49 + *   AS: 2-bit Chiplet Identifier
   44.50 + *     00: TIO LB (Indicates TIO MMR access.)
   44.51 + *     01: TIO ICE (indicates coretalk space access.)
   44.52 + * 
   44.53 + *   NodeOffset: top bit must be set.
   44.54 + *
   44.55 + *
   44.56 + * Note that in both of the above address formats, the low
   44.57 + * NASID bit indicates if the reference is to the SHUB or TIO MMRs.
   44.58 + */
   44.59 +
   44.60 +
   44.61 +/*
   44.62 + * Define basic shift & mask constants for manipulating NASIDs and AS values.
   44.63 + */
   44.64 +#define NASID_BITMASK		(sn_hub_info->nasid_bitmask)
   44.65 +#define NASID_SHIFT		(sn_hub_info->nasid_shift)
   44.66 +#define AS_SHIFT		(sn_hub_info->as_shift)
   44.67 +#define AS_BITMASK		0x3UL
   44.68 +
   44.69 +#define NASID_MASK              ((u64)NASID_BITMASK << NASID_SHIFT)
   44.70 +#define AS_MASK			((u64)AS_BITMASK << AS_SHIFT)
   44.71 +
   44.72 +
   44.73 +/*
   44.74 + * AS values. These are the same on both SHUB1 & SHUB2.
   44.75 + */
   44.76 +#define AS_GET_VAL		1UL
   44.77 +#define AS_AMO_VAL		2UL
   44.78 +#define AS_CAC_VAL		3UL
   44.79 +#define AS_GET_SPACE		(AS_GET_VAL << AS_SHIFT)
   44.80 +#define AS_AMO_SPACE		(AS_AMO_VAL << AS_SHIFT)
   44.81 +#define AS_CAC_SPACE		(AS_CAC_VAL << AS_SHIFT)
   44.82 +
   44.83 +
   44.84 +/* 
   44.85 + * Virtual Mode Local & Global MMR space.  
   44.86 + */
   44.87 +#define SH1_LOCAL_MMR_OFFSET	0x8000000000UL
   44.88 +#define SH2_LOCAL_MMR_OFFSET	0x0200000000UL
   44.89 +#define LOCAL_MMR_OFFSET	(is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET)
   44.90 +#define LOCAL_MMR_SPACE		(__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET)
   44.91 +#define LOCAL_PHYS_MMR_SPACE	(RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET)
   44.92 +
   44.93 +#define SH1_GLOBAL_MMR_OFFSET	0x0800000000UL
   44.94 +#define SH2_GLOBAL_MMR_OFFSET	0x0300000000UL
   44.95 +#define GLOBAL_MMR_OFFSET	(is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET)
   44.96 +#define GLOBAL_MMR_SPACE	(__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET)
   44.97 +
   44.98 +/*
   44.99 + * Physical mode addresses
  44.100 + */
  44.101 +#define GLOBAL_PHYS_MMR_SPACE	(RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET)
  44.102 +
  44.103 +
  44.104 +/*
  44.105 + * Clear region & AS bits.
  44.106 + */
  44.107 +#define TO_PHYS_MASK		(~(RGN_BITS | AS_MASK))
  44.108 +
  44.109 +
  44.110 +/*
  44.111 + * Misc NASID manipulation.
  44.112 + */
  44.113 +#define NASID_SPACE(n)		((u64)(n) << NASID_SHIFT)
  44.114 +#define REMOTE_ADDR(n,a)	(NASID_SPACE(n) | (a))
  44.115 +#define NODE_OFFSET(x)		((x) & (NODE_ADDRSPACE_SIZE - 1))
  44.116 +#define NODE_ADDRSPACE_SIZE     (1UL << AS_SHIFT)
  44.117 +#define NASID_GET(x)		(int) (((u64) (x) >> NASID_SHIFT) & NASID_BITMASK)
  44.118 +#define LOCAL_MMR_ADDR(a)	(LOCAL_MMR_SPACE | (a))
  44.119 +#define GLOBAL_MMR_ADDR(n,a)	(GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a))
  44.120 +#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
  44.121 +#define GLOBAL_CAC_ADDR(n,a)	(CAC_BASE | REMOTE_ADDR(n,a))
  44.122 +#define CHANGE_NASID(n,x)	((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n)))
  44.123 +#define IS_TIO_NASID(n)		((n) & 1)
  44.124 +
  44.125 +
  44.126 +/* non-II mmr's start at top of big window space (4G) */
  44.127 +#define BWIN_TOP		0x0000000100000000UL
  44.128 +
  44.129 +/*
  44.130 + * general address defines
  44.131 + */
  44.132 +#define CAC_BASE		(PAGE_OFFSET | AS_CAC_SPACE)
  44.133 +#define AMO_BASE		(__IA64_UNCACHED_OFFSET | AS_AMO_SPACE)
  44.134 +#define AMO_PHYS_BASE		(RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE)
  44.135 +#define GET_BASE		(PAGE_OFFSET | AS_GET_SPACE)
  44.136 +
  44.137 +/*
  44.138 + * Convert Memory addresses between various addressing modes.
  44.139 + */
  44.140 +#define TO_PHYS(x)		(TO_PHYS_MASK & (x))
  44.141 +#define TO_CAC(x)		(CAC_BASE     | TO_PHYS(x))
  44.142 +#if defined(CONFIG_SGI_SN) || defined(XEN)
  44.143 +#define TO_AMO(x)		(AMO_BASE     | TO_PHYS(x))
  44.144 +#define TO_GET(x)		(GET_BASE     | TO_PHYS(x))
  44.145 +#else
  44.146 +#define TO_AMO(x)		({ BUG(); x; })
  44.147 +#define TO_GET(x)		({ BUG(); x; })
  44.148 +#endif
  44.149 +
  44.150 +/*
  44.151 + * Covert from processor physical address to II/TIO physical address:
  44.152 + *	II - squeeze out the AS bits
  44.153 + *	TIO- requires a chiplet id in bits 38-39.  For DMA to memory,
  44.154 + *           the chiplet id is zero.  If we implement TIO-TIO dma, we might need
  44.155 + *           to insert a chiplet id into this macro.  However, it is our belief
  44.156 + *           right now that this chiplet id will be ICE, which is also zero.
  44.157 + */
  44.158 +#define SH1_TIO_PHYS_TO_DMA(x) 						\
  44.159 +	((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
  44.160 +
  44.161 +#define SH2_NETWORK_BANK_OFFSET(x) 					\
  44.162 +        ((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1))
  44.163 +
  44.164 +#define SH2_NETWORK_BANK_SELECT(x) 					\
  44.165 +        ((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4)))	\
  44.166 +        	>> (sn_hub_info->nasid_shift - 4)) << 36)
  44.167 +
  44.168 +#define SH2_NETWORK_ADDRESS(x) 						\
  44.169 +	(SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x))
  44.170 +
  44.171 +#define SH2_TIO_PHYS_TO_DMA(x) 						\
  44.172 +        (((u64)(NASID_GET(x)) << 40) | 	SH2_NETWORK_ADDRESS(x))
  44.173 +
  44.174 +#define PHYS_TO_TIODMA(x)						\
  44.175 +	(is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x))
  44.176 +
  44.177 +#define PHYS_TO_DMA(x)							\
  44.178 +	((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
  44.179 +
  44.180 +
  44.181 +/*
  44.182 + * Macros to test for address type.
  44.183 + */
  44.184 +#define IS_AMO_ADDRESS(x)	(((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE)
  44.185 +#define IS_AMO_PHYS_ADDRESS(x)	(((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE)
  44.186 +
  44.187 +
  44.188 +/*
  44.189 + * The following definitions pertain to the IO special address
  44.190 + * space.  They define the location of the big and little windows
  44.191 + * of any given node.
  44.192 + */
  44.193 +#define BWIN_SIZE_BITS			29	/* big window size: 512M */
  44.194 +#define TIO_BWIN_SIZE_BITS		30	/* big window size: 1G */
  44.195 +#define NODE_SWIN_BASE(n, w)		((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \
  44.196 +		: RAW_NODE_SWIN_BASE(n, w))
  44.197 +#define TIO_SWIN_BASE(n, w) 		(TIO_IO_BASE(n) + \
  44.198 +					    ((u64) (w) << TIO_SWIN_SIZE_BITS))
  44.199 +#define NODE_IO_BASE(n)			(GLOBAL_MMR_SPACE | NASID_SPACE(n))
  44.200 +#define TIO_IO_BASE(n)                  (__IA64_UNCACHED_OFFSET | NASID_SPACE(n))
  44.201 +#define BWIN_SIZE			(1UL << BWIN_SIZE_BITS)
  44.202 +#define NODE_BWIN_BASE0(n)		(NODE_IO_BASE(n) + BWIN_SIZE)
  44.203 +#define NODE_BWIN_BASE(n, w)		(NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
  44.204 +#define RAW_NODE_SWIN_BASE(n, w)	(NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS))
  44.205 +#define BWIN_WIDGET_MASK		0x7
  44.206 +#define BWIN_WINDOWNUM(x)		(((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
  44.207 +#define SH1_IS_BIG_WINDOW_ADDR(x)	((x) & BWIN_TOP)
  44.208 +
  44.209 +#define TIO_BWIN_WINDOW_SELECT_MASK	0x7
  44.210 +#define TIO_BWIN_WINDOWNUM(x)		(((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK)
  44.211 +
  44.212 +#define TIO_HWIN_SHIFT_BITS		33
  44.213 +#define TIO_HWIN(x)			(NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS)
  44.214 +
  44.215 +/*
  44.216 + * The following definitions pertain to the IO special address
  44.217 + * space.  They define the location of the big and little windows
  44.218 + * of any given node.
  44.219 + */
  44.220 +
  44.221 +#define SWIN_SIZE_BITS			24
  44.222 +#define	SWIN_WIDGET_MASK		0xF
  44.223 +
  44.224 +#define TIO_SWIN_SIZE_BITS		28
  44.225 +#define TIO_SWIN_SIZE			(1UL << TIO_SWIN_SIZE_BITS)
  44.226 +#define TIO_SWIN_WIDGET_MASK		0x3
  44.227 +
  44.228 +/*
  44.229 + * Convert smallwindow address to xtalk address.
  44.230 + *
  44.231 + * 'addr' can be physical or virtual address, but will be converted
  44.232 + * to Xtalk address in the range 0 -> SWINZ_SIZEMASK
  44.233 + */
  44.234 +#define	SWIN_WIDGETNUM(x)		(((x)  >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
  44.235 +#define TIO_SWIN_WIDGETNUM(x)		(((x)  >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
  44.236 +
  44.237 +
  44.238 +/*
  44.239 + * The following macros produce the correct base virtual address for
  44.240 + * the hub registers. The REMOTE_HUB_* macro produce
  44.241 + * the address for the specified hub's registers.  The intent is
  44.242 + * that the appropriate PI, MD, NI, or II register would be substituted
  44.243 + * for x.
  44.244 + *
  44.245 + *   WARNING:
  44.246 + *	When certain Hub chip workaround are defined, it's not sufficient
  44.247 + *	to dereference the *_HUB_ADDR() macros.  You should instead use
  44.248 + *	HUB_L() and HUB_S() if you must deal with pointers to hub registers.
  44.249 + *	Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
  44.250 + *	They're always safe.
  44.251 + */
  44.252 +/* Shub1 TIO & MMR addressing macros */
  44.253 +#define SH1_TIO_IOSPACE_ADDR(n,x)					\
  44.254 +	GLOBAL_MMR_ADDR(n,x)
  44.255 +
  44.256 +#define SH1_REMOTE_BWIN_MMR(n,x)					\
  44.257 +	GLOBAL_MMR_ADDR(n,x)
  44.258 +
  44.259 +#define SH1_REMOTE_SWIN_MMR(n,x)					\
  44.260 +	(NODE_SWIN_BASE(n,1) + 0x800000UL + (x))
  44.261 +
  44.262 +#define SH1_REMOTE_MMR(n,x)						\
  44.263 +	(SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) :		\
  44.264 +	 	SH1_REMOTE_SWIN_MMR(n,x))
  44.265 +
  44.266 +/* Shub1 TIO & MMR addressing macros */
  44.267 +#define SH2_TIO_IOSPACE_ADDR(n,x)					\
  44.268 +	((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2)))
  44.269 +
  44.270 +#define SH2_REMOTE_MMR(n,x)						\
  44.271 +	GLOBAL_MMR_ADDR(n,x)
  44.272 +
  44.273 +
  44.274 +/* TIO & MMR addressing macros that work on both shub1 & shub2 */
  44.275 +#define TIO_IOSPACE_ADDR(n,x)						\
  44.276 +	((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) :		\
  44.277 +		 SH2_TIO_IOSPACE_ADDR(n,x)))
  44.278 +
  44.279 +#define SH_REMOTE_MMR(n,x)						\
  44.280 +	(is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x))
  44.281 +
  44.282 +#define REMOTE_HUB_ADDR(n,x)						\
  44.283 +	(IS_TIO_NASID(n) ?  ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) :	\
  44.284 +	 ((volatile u64*)SH_REMOTE_MMR(n,x)))
  44.285 +
  44.286 +
  44.287 +#define HUB_L(x)			(*((volatile typeof(*x) *)x))
  44.288 +#define	HUB_S(x,d)			(*((volatile typeof(*x) *)x) = (d))
  44.289 +
  44.290 +#define REMOTE_HUB_L(n, a)		HUB_L(REMOTE_HUB_ADDR((n), (a)))
  44.291 +#define REMOTE_HUB_S(n, a, d)		HUB_S(REMOTE_HUB_ADDR((n), (a)), (d))
  44.292 +
  44.293 +/*
  44.294 + * Coretalk address breakdown
  44.295 + */
  44.296 +#define CTALK_NASID_SHFT		40
  44.297 +#define CTALK_NASID_MASK		(0x3FFFULL << CTALK_NASID_SHFT)
  44.298 +#define CTALK_CID_SHFT			38
  44.299 +#define CTALK_CID_MASK			(0x3ULL << CTALK_CID_SHFT)
  44.300 +#define CTALK_NODE_OFFSET		0x3FFFFFFFFF
  44.301 +
  44.302 +#endif /* _ASM_IA64_SN_ADDRS_H */
    45.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    45.2 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/arch.h	Wed Dec 20 14:55:02 2006 -0700
    45.3 @@ -0,0 +1,92 @@
    45.4 +/*
    45.5 + * This file is subject to the terms and conditions of the GNU General Public
    45.6 + * License.  See the file "COPYING" in the main directory of this archive
    45.7 + * for more details.
    45.8 + *
    45.9 + * SGI specific setup.
   45.10 + *
   45.11 + * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc.  All rights reserved.
   45.12 + * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
   45.13 + */
   45.14 +#ifndef _ASM_IA64_SN_ARCH_H
   45.15 +#define _ASM_IA64_SN_ARCH_H
   45.16 +
   45.17 +#ifndef XEN
   45.18 +#include <linux/numa.h>
   45.19 +#include <asm/types.h>
   45.20 +#include <asm/percpu.h>
   45.21 +#include <asm/sn/types.h>
   45.22 +#endif
   45.23 +#include <asm/sn/sn_cpuid.h>
   45.24 +
   45.25 +/*
   45.26 + * This is the maximum number of NUMALINK nodes that can be part of a single
   45.27 + * SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in
   45.28 + * remote partitions are NOT included in this number.
   45.29 + * The number of compact nodes cannot exceed size of a coherency domain.
   45.30 + * The purpose of this define is to specify a node count that includes
   45.31 + * all C/M/TIO nodes in an SSI system.
   45.32 + *
   45.33 + * SGI system can currently support up to 256 C/M nodes plus additional TIO nodes.
   45.34 + *
   45.35 + * 	Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade
   45.36 + * 	to ACPI3.0, this limit will be removed. The notion of "compact nodes"
   45.37 + * 	should be deleted and TIOs should be included in MAX_NUMNODES.
   45.38 + */
   45.39 +#define MAX_TIO_NODES		MAX_NUMNODES
   45.40 +#define MAX_COMPACT_NODES	(MAX_NUMNODES + MAX_TIO_NODES)
   45.41 +
   45.42 +/*
   45.43 + * Maximum number of nodes in all partitions and in all coherency domains.
   45.44 + * This is the total number of nodes accessible in the numalink fabric. It
   45.45 + * includes all C & M bricks, plus all TIOs.
   45.46 + *
   45.47 + * This value is also the value of the maximum number of NASIDs in the numalink
   45.48 + * fabric.
   45.49 + */
   45.50 +#define MAX_NUMALINK_NODES	16384
   45.51 +
   45.52 +/*
   45.53 + * The following defines attributes of the HUB chip. These attributes are
   45.54 + * frequently referenced. They are kept in the per-cpu data areas of each cpu.
   45.55 + * They are kept together in a struct to minimize cache misses.
   45.56 + */
   45.57 +struct sn_hub_info_s {
   45.58 +	u8 shub2;
   45.59 +	u8 nasid_shift;
   45.60 +	u8 as_shift;
   45.61 +	u8 shub_1_1_found;
   45.62 +	u16 nasid_bitmask;
   45.63 +};
   45.64 +DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
   45.65 +#define sn_hub_info 	(&__get_cpu_var(__sn_hub_info))
   45.66 +#ifndef XEN
   45.67 +#define is_shub2()	(sn_hub_info->shub2)
   45.68 +#define is_shub1()	(sn_hub_info->shub2 == 0)
   45.69 +#else
   45.70 +#define is_shub2()	0
   45.71 +#define is_shub1()	1
   45.72 +#endif
   45.73 +
   45.74 +/*
   45.75 + * Use this macro to test if shub 1.1 wars should be enabled
   45.76 + */
   45.77 +#define enable_shub_wars_1_1()	(sn_hub_info->shub_1_1_found)
   45.78 +
   45.79 +
   45.80 +/*
   45.81 + * Compact node ID to nasid mappings kept in the per-cpu data areas of each
   45.82 + * cpu.
   45.83 + */
   45.84 +DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
   45.85 +#define sn_cnodeid_to_nasid	(&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
   45.86 +
   45.87 +#ifndef XEN
   45.88 +extern u8 sn_partition_id;
   45.89 +extern u8 sn_system_size;
   45.90 +extern u8 sn_sharing_domain_size;
   45.91 +extern u8 sn_region_size;
   45.92 +
   45.93 +extern void sn_flush_all_caches(long addr, long bytes);
   45.94 +#endif
   45.95 +#endif /* _ASM_IA64_SN_ARCH_H */
    46.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    46.2 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h	Wed Dec 20 14:55:02 2006 -0700
    46.3 @@ -0,0 +1,95 @@
    46.4 +/*
    46.5 + * This file is subject to the terms and conditions of the GNU General Public
    46.6 + * License.  See the file "COPYING" in the main directory of this archive
    46.7 + * for more details.
    46.8 + *
    46.9 + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
   46.10 + */
   46.11 +#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
   46.12 +#define _ASM_IA64_SN_XTALK_HUBDEV_H
   46.13 +
   46.14 +#ifndef XEN
   46.15 +#include "xtalk/xwidgetdev.h"
   46.16 +#else
   46.17 +#include <asm/sn/xwidgetdev.h>
   46.18 +#endif
   46.19 +
   46.20 +#define HUB_WIDGET_ID_MAX 0xf
   46.21 +#define DEV_PER_WIDGET (2*2*8)
   46.22 +#define IIO_ITTE_WIDGET_BITS    4       /* size of widget field */
   46.23 +#define IIO_ITTE_WIDGET_MASK    ((1<<IIO_ITTE_WIDGET_BITS)-1)
   46.24 +#define IIO_ITTE_WIDGET_SHIFT   8
   46.25 +
   46.26 +#define IIO_ITTE_WIDGET(itte)	\
   46.27 +	(((itte) >> IIO_ITTE_WIDGET_SHIFT) & IIO_ITTE_WIDGET_MASK)
   46.28 +
   46.29 +/*
   46.30 + * Use the top big window as a surrogate for the first small window
   46.31 + */
   46.32 +#define SWIN0_BIGWIN            HUB_NUM_BIG_WINDOW
   46.33 +#define IIO_NUM_ITTES   7
   46.34 +#define HUB_NUM_BIG_WINDOW      (IIO_NUM_ITTES - 1)
   46.35 +
   46.36 +/* This struct is shared between the PROM and the kernel.
   46.37 + * Changes to this struct will require corresponding changes to the kernel.
   46.38 + */
   46.39 +struct sn_flush_device_common {
   46.40 +	int sfdl_bus;
   46.41 +	int sfdl_slot;
   46.42 +	int sfdl_pin;
   46.43 +	struct common_bar_list {
   46.44 +		unsigned long start;
   46.45 +		unsigned long end;
   46.46 +	} sfdl_bar_list[6];
   46.47 +	unsigned long sfdl_force_int_addr;
   46.48 +	unsigned long sfdl_flush_value;
   46.49 +	volatile unsigned long *sfdl_flush_addr;
   46.50 +	u32 sfdl_persistent_busnum;
   46.51 +	u32 sfdl_persistent_segment;
   46.52 +	struct pcibus_info *sfdl_pcibus_info;
   46.53 +};
   46.54 +
   46.55 +/* This struct is kernel only and is not used by the PROM */
   46.56 +struct sn_flush_device_kernel {
   46.57 +	spinlock_t sfdl_flush_lock;
   46.58 +	struct sn_flush_device_common *common;
   46.59 +};
   46.60 +
   46.61 +/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included
   46.62 + * for older official PROMs to function on the new kernel base.  This struct
   46.63 + * will be removed when the next official PROM release occurs. */
   46.64 +
   46.65 +struct sn_flush_device_war {
   46.66 +	struct sn_flush_device_common common;
   46.67 +	u32 filler; /* older PROMs expect the default size of a spinlock_t */
   46.68 +};
   46.69 +
   46.70 +/*
   46.71 + * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel.
   46.72 + */
   46.73 +struct sn_flush_nasid_entry  {
   46.74 +	struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num
   46.75 +	u64 iio_itte[8];
   46.76 +};
   46.77 +
   46.78 +struct hubdev_info {
   46.79 +	geoid_t				hdi_geoid;
   46.80 +	short				hdi_nasid;
   46.81 +	short				hdi_peer_nasid;   /* Dual Porting Peer */
   46.82 +
   46.83 +	struct sn_flush_nasid_entry	hdi_flush_nasid_list;
   46.84 +	struct xwidget_info		hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1];
   46.85 +
   46.86 +
   46.87 +	void				*hdi_nodepda;
   46.88 +	void				*hdi_node_vertex;
   46.89 +	u32				max_segment_number;
   46.90 +	u32				max_pcibus_number;
   46.91 +};
   46.92 +
   46.93 +extern void hubdev_init_node(nodepda_t *, cnodeid_t);
   46.94 +extern void hub_error_init(struct hubdev_info *);
   46.95 +extern void ice_error_init(struct hubdev_info *);
   46.96 +
   46.97 +
   46.98 +#endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */
    47.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    47.2 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/intr.h	Wed Dec 20 14:55:02 2006 -0700
    47.3 @@ -0,0 +1,73 @@
    47.4 +/*
    47.5 + * This file is subject to the terms and conditions of the GNU General Public
    47.6 + * License.  See the file "COPYING" in the main directory of this archive
    47.7 + * for more details.
    47.8 + *
    47.9 + * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
   47.10 + */
   47.11 +
   47.12 +#ifndef _ASM_IA64_SN_INTR_H
   47.13 +#define _ASM_IA64_SN_INTR_H
   47.14 +
   47.15 +#ifndef XEN
   47.16 +#include <linux/rcupdate.h>
   47.17 +#else
   47.18 +#include <linux/list.h>
   47.19 +#endif
   47.20 +#include <asm/sn/types.h>
   47.21 +
   47.22 +#define SGI_UART_VECTOR		0xe9
   47.23 +
   47.24 +/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
   47.25 +#define SGI_XPC_ACTIVATE	0x30
   47.26 +#define SGI_II_ERROR		0x31
   47.27 +#define SGI_XBOW_ERROR		0x32
   47.28 +#define SGI_PCIASIC_ERROR	0x33
   47.29 +#define SGI_ACPI_SCI_INT	0x34
   47.30 +#define SGI_TIOCA_ERROR		0x35
   47.31 +#define SGI_TIO_ERROR		0x36
   47.32 +#define SGI_TIOCX_ERROR		0x37
   47.33 +#define SGI_MMTIMER_VECTOR	0x38
   47.34 +#define SGI_XPC_NOTIFY		0xe7
   47.35 +
   47.36 +#define IA64_SN2_FIRST_DEVICE_VECTOR	0x3c
   47.37 +#define IA64_SN2_LAST_DEVICE_VECTOR	0xe6
   47.38 +
   47.39 +#define SN2_IRQ_RESERVED	0x1
   47.40 +#define SN2_IRQ_CONNECTED	0x2
   47.41 +#define SN2_IRQ_SHARED		0x4
   47.42 +
   47.43 +// The SN PROM irq struct
   47.44 +struct sn_irq_info {
   47.45 +	struct sn_irq_info *irq_next;	/* deprecated DO NOT USE     */
   47.46 +	short		irq_nasid;	/* Nasid IRQ is assigned to  */
   47.47 +	int		irq_slice;	/* slice IRQ is assigned to  */
   47.48 +	int		irq_cpuid;	/* kernel logical cpuid	     */
   47.49 +	int		irq_irq;	/* the IRQ number */
   47.50 +	int		irq_int_bit;	/* Bridge interrupt pin */
   47.51 +					/* <0 means MSI */
   47.52 +	u64	irq_xtalkaddr;	/* xtalkaddr IRQ is sent to  */
   47.53 +	int		irq_bridge_type;/* pciio asic type (pciio.h) */
   47.54 +	void	       *irq_bridge;	/* bridge generating irq     */
   47.55 +	void	       *irq_pciioinfo;	/* associated pciio_info_t   */
   47.56 +	int		irq_last_intr;	/* For Shub lb lost intr WAR */
   47.57 +	int		irq_cookie;	/* unique cookie 	     */
   47.58 +	int		irq_flags;	/* flags */
   47.59 +	int		irq_share_cnt;	/* num devices sharing IRQ   */
   47.60 +	struct list_head	list;	/* list of sn_irq_info structs */
   47.61 +#ifndef XEN
   47.62 +	struct rcu_head		rcu;	/* rcu callback list */
   47.63 +#endif
   47.64 +};
   47.65 +
   47.66 +extern void sn_send_IPI_phys(int, long, int, int);
   47.67 +extern u64 sn_intr_alloc(nasid_t, int,
   47.68 +			      struct sn_irq_info *,
   47.69 +			      int, nasid_t, int);
   47.70 +extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
   47.71 +extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int);
   47.72 +extern struct list_head **sn_irq_lh;
   47.73 +
   47.74 +#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
   47.75 +
   47.76 +#endif /* _ASM_IA64_SN_INTR_H */
    48.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    48.2 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/io.h	Wed Dec 20 14:55:02 2006 -0700
    48.3 @@ -0,0 +1,281 @@
    48.4 +/* 
    48.5 + * This file is subject to the terms and conditions of the GNU General Public
    48.6 + * License.  See the file "COPYING" in the main directory of this archive
    48.7 + * for more details.
    48.8 + *
    48.9 + * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
   48.10 + */
   48.11 +
   48.12 +#ifndef _ASM_SN_IO_H
   48.13 +#define _ASM_SN_IO_H
   48.14 +#include <linux/compiler.h>
   48.15 +#include <asm/intrinsics.h>
   48.16 +
   48.17 +extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
   48.18 +extern void __sn_mmiowb(void); /* Forward definition */
   48.19 +
   48.20 +extern int num_cnodes;
   48.21 +
   48.22 +#define __sn_mf_a()   ia64_mfa()
   48.23 +
   48.24 +#ifdef XEN
   48.25 +/*
   48.26 + * Xen doesn't deal with any PIC devices directly, it's all handled in dom0
   48.27 + */
   48.28 +#define sn_dma_flush(foo)		do {} while(0)
   48.29 +#else
   48.30 +extern void sn_dma_flush(unsigned long);
   48.31 +#endif
   48.32 +
   48.33 +#define __sn_inb ___sn_inb
   48.34 +#define __sn_inw ___sn_inw
   48.35 +#define __sn_inl ___sn_inl
   48.36 +#define __sn_outb ___sn_outb
   48.37 +#define __sn_outw ___sn_outw
   48.38 +#define __sn_outl ___sn_outl
   48.39 +#define __sn_readb ___sn_readb
   48.40 +#define __sn_readw ___sn_readw
   48.41 +#define __sn_readl ___sn_readl
   48.42 +#define __sn_readq ___sn_readq
   48.43 +#define __sn_readb_relaxed ___sn_readb_relaxed
   48.44 +#define __sn_readw_relaxed ___sn_readw_relaxed
   48.45 +#define __sn_readl_relaxed ___sn_readl_relaxed
   48.46 +#define __sn_readq_relaxed ___sn_readq_relaxed
   48.47 +
   48.48 +/*
   48.49 + * Convenience macros for setting/clearing bits using the above accessors
   48.50 + */
   48.51 +
   48.52 +#define __sn_setq_relaxed(addr, val) \
   48.53 +	writeq((__sn_readq_relaxed(addr) | (val)), (addr))
   48.54 +#define __sn_clrq_relaxed(addr, val) \
   48.55 +	writeq((__sn_readq_relaxed(addr) & ~(val)), (addr))
   48.56 +
   48.57 +/*
   48.58 + * The following routines are SN Platform specific, called when
   48.59 + * a reference is made to inX/outX set macros.  SN Platform
   48.60 + * inX set of macros ensures that Posted DMA writes on the
   48.61 + * Bridge is flushed.
   48.62 + *
   48.63 + * The routines should be self explainatory.
   48.64 + */
   48.65 +
   48.66 +static inline unsigned int
   48.67 +___sn_inb (unsigned long port)
   48.68 +{
   48.69 +	volatile unsigned char *addr;
   48.70 +	unsigned char ret = -1;
   48.71 +
   48.72 +	if ((addr = sn_io_addr(port))) {
   48.73 +		ret = *addr;
   48.74 +		__sn_mf_a();
   48.75 +		sn_dma_flush((unsigned long)addr);
   48.76 +	}
   48.77 +	return ret;
   48.78 +}
   48.79 +
   48.80 +static inline unsigned int
   48.81 +___sn_inw (unsigned long port)
   48.82 +{
   48.83 +	volatile unsigned short *addr;
   48.84 +	unsigned short ret = -1;
   48.85 +
   48.86 +	if ((addr = sn_io_addr(port))) {
   48.87 +		ret = *addr;
   48.88 +		__sn_mf_a();
   48.89 +		sn_dma_flush((unsigned long)addr);
   48.90 +	}
   48.91 +	return ret;
   48.92 +}
   48.93 +
   48.94 +static inline unsigned int
   48.95 +___sn_inl (unsigned long port)
   48.96 +{
   48.97 +	volatile unsigned int *addr;
   48.98 +	unsigned int ret = -1;
   48.99 +
  48.100 +	if ((addr = sn_io_addr(port))) {
  48.101 +		ret = *addr;
  48.102 +		__sn_mf_a();
  48.103 +		sn_dma_flush((unsigned long)addr);
  48.104 +	}
  48.105 +	return ret;
  48.106 +}
  48.107 +
  48.108 +static inline void
  48.109 +___sn_outb (unsigned char val, unsigned long port)
  48.110 +{
  48.111 +	volatile unsigned char *addr;
  48.112 +
  48.113 +	if ((addr = sn_io_addr(port))) {
  48.114 +		*addr = val;
  48.115 +		__sn_mmiowb();
  48.116 +	}
  48.117 +}
  48.118 +
  48.119 +static inline void
  48.120 +___sn_outw (unsigned short val, unsigned long port)
  48.121 +{
  48.122 +	volatile unsigned short *addr;
  48.123 +
  48.124 +	if ((addr = sn_io_addr(port))) {
  48.125 +		*addr = val;
  48.126 +		__sn_mmiowb();
  48.127 +	}
  48.128 +}
  48.129 +
  48.130 +static inline void
  48.131 +___sn_outl (unsigned int val, unsigned long port)
  48.132 +{
  48.133 +	volatile unsigned int *addr;
  48.134 +
  48.135 +	if ((addr = sn_io_addr(port))) {
  48.136 +		*addr = val;
  48.137 +		__sn_mmiowb();
  48.138 +	}
  48.139 +}
  48.140 +
  48.141 +/*
  48.142 + * The following routines are SN Platform specific, called when 
  48.143 + * a reference is made to readX/writeX set macros.  SN Platform 
  48.144 + * readX set of macros ensures that Posted DMA writes on the 
  48.145 + * Bridge is flushed.
  48.146 + * 
  48.147 + * The routines should be self explainatory.
  48.148 + */
  48.149 +
  48.150 +static inline unsigned char
  48.151 +___sn_readb (const volatile void __iomem *addr)
  48.152 +{
  48.153 +	unsigned char val;
  48.154 +
  48.155 +	val = *(volatile unsigned char __force *)addr;
  48.156 +	__sn_mf_a();
  48.157 +	sn_dma_flush((unsigned long)addr);
  48.158 +        return val;
  48.159 +}
  48.160 +
  48.161 +static inline unsigned short
  48.162 +___sn_readw (const volatile void __iomem *addr)
  48.163 +{
  48.164 +	unsigned short val;
  48.165 +
  48.166 +	val = *(volatile unsigned short __force *)addr;
  48.167 +	__sn_mf_a();
  48.168 +	sn_dma_flush((unsigned long)addr);
  48.169 +        return val;
  48.170 +}
  48.171 +
  48.172 +static inline unsigned int
  48.173 +___sn_readl (const volatile void __iomem *addr)
  48.174 +{
  48.175 +	unsigned int val;
  48.176 +
  48.177 +	val = *(volatile unsigned int __force *)addr;
  48.178 +	__sn_mf_a();
  48.179 +	sn_dma_flush((unsigned long)addr);
  48.180 +        return val;
  48.181 +}
  48.182 +
  48.183 +static inline unsigned long
  48.184 +___sn_readq (const volatile void __iomem *addr)
  48.185 +{
  48.186 +	unsigned long val;
  48.187 +
  48.188 +	val = *(volatile unsigned long __force *)addr;
  48.189 +	__sn_mf_a();
  48.190 +	sn_dma_flush((unsigned long)addr);
  48.191 +        return val;
  48.192 +}
  48.193 +
  48.194 +/*
  48.195 + * For generic and SN2 kernels, we have a set of fast access
  48.196 + * PIO macros.	These macros are provided on SN Platform
  48.197 + * because the normal inX and readX macros perform an
  48.198 + * additional task of flushing Post DMA request on the Bridge.
  48.199 + *
  48.200 + * These routines should be self explainatory.
  48.201 + */
  48.202 +
  48.203 +static inline unsigned int
  48.204 +sn_inb_fast (unsigned long port)
  48.205 +{
  48.206 +	volatile unsigned char *addr = (unsigned char *)port;
  48.207 +	unsigned char ret;
  48.208 +
  48.209 +	ret = *addr;
  48.210 +	__sn_mf_a();
  48.211 +	return ret;
  48.212 +}
  48.213 +
  48.214 +static inline unsigned int
  48.215 +sn_inw_fast (unsigned long port)
  48.216 +{
  48.217 +	volatile unsigned short *addr = (unsigned short *)port;
  48.218 +	unsigned short ret;
  48.219 +
  48.220 +	ret = *addr;
  48.221 +	__sn_mf_a();
  48.222 +	return ret;
  48.223 +}
  48.224 +
  48.225 +static inline unsigned int
  48.226 +sn_inl_fast (unsigned long port)
  48.227 +{
  48.228 +	volatile unsigned int *addr = (unsigned int *)port;
  48.229 +	unsigned int ret;
  48.230 +
  48.231 +	ret = *addr;
  48.232 +	__sn_mf_a();
  48.233 +	return ret;
  48.234 +}
  48.235 +
  48.236 +static inline unsigned char
  48.237 +___sn_readb_relaxed (const volatile void __iomem *addr)
  48.238 +{
  48.239 +	return *(volatile unsigned char __force *)addr;
  48.240 +}
  48.241 +
  48.242 +static inline unsigned short
  48.243 +___sn_readw_relaxed (const volatile void __iomem *addr)
  48.244 +{
  48.245 +	return *(volatile unsigned short __force *)addr;
  48.246 +}
  48.247 +
  48.248 +static inline unsigned int
  48.249 +___sn_readl_relaxed (const volatile void __iomem *addr)
  48.250 +{
  48.251 +	return *(volatile unsigned int __force *) addr;
  48.252 +}
  48.253 +
  48.254 +static inline unsigned long
  48.255 +___sn_readq_relaxed (const volatile void __iomem *addr)
  48.256 +{
  48.257 +	return *(volatile unsigned long __force *) addr;
  48.258 +}
  48.259 +
  48.260 +struct pci_dev;
  48.261 +
  48.262 +static inline int
  48.263 +sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan)
  48.264 +{
  48.265 +
  48.266 +	if (vchan > 1) {
  48.267 +		return -1;
  48.268 +	}
  48.269 +
  48.270 +	if (!(*addr >> 32))	/* Using a mask here would be cleaner */
  48.271 +		return 0;	/* but this generates better code */
  48.272 +
  48.273 +	if (vchan == 1) {
  48.274 +		/* Set Bit 57 */
  48.275 +		*addr |= (1UL << 57);
  48.276 +	} else {
  48.277 +		/* Clear Bit 57 */
  48.278 +		*addr &= ~(1UL << 57);
  48.279 +	}
  48.280 +
  48.281 +	return 0;
  48.282 +}
  48.283 +
  48.284 +#endif	/* _ASM_SN_IO_H */
    49.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    49.2 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h	Wed Dec 20 14:55:02 2006 -0700
    49.3 @@ -0,0 +1,87 @@
    49.4 +/*
    49.5 + * This file is subject to the terms and conditions of the GNU General Public
    49.6 + * License.  See the file "COPYING" in the main directory of this archive
    49.7 + * for more details.
    49.8 + *
    49.9 + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
   49.10 + */
   49.11 +#ifndef _ASM_IA64_SN_NODEPDA_H
   49.12 +#define _ASM_IA64_SN_NODEPDA_H
   49.13 +
   49.14 +
   49.15 +#include <asm/semaphore.h>
   49.16 +#include <asm/irq.h>
   49.17 +#include <asm/sn/arch.h>
   49.18 +#include <asm/sn/intr.h>
   49.19 +#ifndef XEN
   49.20 +#include <asm/sn/bte.h>
   49.21 +#endif
   49.22 +
   49.23 +/*
   49.24 + * NUMA Node-Specific Data structures are defined in this file.
   49.25 + * In particular, this is the location of the node PDA.
   49.26 + * A pointer to the right node PDA is saved in each CPU PDA.
   49.27 + */
   49.28 +
   49.29 +/*
   49.30 + * Node-specific data structure.
   49.31 + *
   49.32 + * One of these structures is allocated on each node of a NUMA system.
   49.33 + *
   49.34 + * This structure provides a convenient way of keeping together 
   49.35 + * all per-node data structures. 
   49.36 + */
   49.37 +struct phys_cpuid {
   49.38 +	short			nasid;
   49.39 +	char			subnode;
   49.40 +	char			slice;
   49.41 +};
   49.42 +
   49.43 +struct nodepda_s {
   49.44 +	void 		*pdinfo;	/* Platform-dependent per-node info */
   49.45 +
   49.46 +#ifndef XEN
   49.47 +	/*
   49.48 +	 * The BTEs on this node are shared by the local cpus
   49.49 +	 */
   49.50 +	struct bteinfo_s	bte_if[MAX_BTES_PER_NODE];	/* Virtual Interface */
   49.51 +	struct timer_list	bte_recovery_timer;
   49.52 +	spinlock_t		bte_recovery_lock;
   49.53 +#endif
   49.54 +
   49.55 +	/* 
   49.56 +	 * Array of pointers to the nodepdas for each node.
   49.57 +	 */
   49.58 +	struct nodepda_s	*pernode_pdaindr[MAX_COMPACT_NODES]; 
   49.59 +
   49.60 +	/*
   49.61 +	 * Array of physical cpu identifiers. Indexed by cpuid.
   49.62 +	 */
   49.63 +	struct phys_cpuid	phys_cpuid[NR_CPUS];
   49.64 +	spinlock_t		ptc_lock ____cacheline_aligned_in_smp;
   49.65 +};
   49.66 +
   49.67 +typedef struct nodepda_s nodepda_t;
   49.68 +
   49.69 +/*
   49.70 + * Access Functions for node PDA.
   49.71 + * Since there is one nodepda for each node, we need a convenient mechanism
   49.72 + * to access these nodepdas without cluttering code with #ifdefs.
   49.73 + * The next set of definitions provides this.
   49.74 + * Routines are expected to use 
   49.75 + *
   49.76 + *	sn_nodepda   - to access node PDA for the node on which code is running
   49.77 + *	NODEPDA(cnodeid)   - to access node PDA for cnodeid
   49.78 + */
   49.79 +
   49.80 +DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
   49.81 +#define sn_nodepda		(__get_cpu_var(__sn_nodepda))
   49.82 +#define	NODEPDA(cnodeid)	(sn_nodepda->pernode_pdaindr[cnodeid])
   49.83 +
   49.84 +/*
   49.85 + * Check if given a compact node id the corresponding node has all the
   49.86 + * cpus disabled. 
   49.87 + */
   49.88 +#define is_headless_node(cnodeid)	(nr_cpus_node(cnodeid) == 0)
   49.89 +
   49.90 +#endif /* _ASM_IA64_SN_NODEPDA_H */
    50.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    50.2 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h	Wed Dec 20 14:55:02 2006 -0700
    50.3 @@ -0,0 +1,153 @@
    50.4 +/*
    50.5 + * This file is subject to the terms and conditions of the GNU General Public
    50.6 + * License.  See the file "COPYING" in the main directory of this archive
    50.7 + * for more details.
    50.8 + *
    50.9 + * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved.
   50.10 + */
   50.11 +#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
   50.12 +#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
   50.13 +
   50.14 +#ifdef XEN
   50.15 +#include <linux/spinlock.h>
   50.16 +#include <linux/pci.h>
   50.17 +#endif
   50.18 +#include <asm/sn/intr.h>
   50.19 +#include <asm/sn/pcibus_provider_defs.h>
   50.20 +
   50.21 +/* Workarounds */
   50.22 +#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
   50.23 +
   50.24 +#define BUSTYPE_MASK                    0x1
   50.25 +
   50.26 +/* Macros given a pcibus structure */
   50.27 +#define IS_PCIX(ps)     ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
   50.28 +#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
   50.29 +                asic == PCIIO_ASIC_TYPE_TIOCP)
   50.30 +#define IS_PIC_SOFT(ps)     (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
   50.31 +
   50.32 +
   50.33 +/*
   50.34 + * The different PCI Bridge types supported on the SGI Altix platforms
   50.35 + */
   50.36 +#define PCIBR_BRIDGETYPE_UNKNOWN       -1
   50.37 +#define PCIBR_BRIDGETYPE_PIC            2
   50.38 +#define PCIBR_BRIDGETYPE_TIOCP          3
   50.39 +
   50.40 +/*
   50.41 + * Bridge 64bit Direct Map Attributes
   50.42 + */
   50.43 +#define PCI64_ATTR_PREF                 (1ull << 59)
   50.44 +#define PCI64_ATTR_PREC                 (1ull << 58)
   50.45 +#define PCI64_ATTR_VIRTUAL              (1ull << 57)
   50.46 +#define PCI64_ATTR_BAR                  (1ull << 56)
   50.47 +#define PCI64_ATTR_SWAP                 (1ull << 55)
   50.48 +#define PCI64_ATTR_VIRTUAL1             (1ull << 54)
   50.49 +
   50.50 +#define PCI32_LOCAL_BASE                0
   50.51 +#define PCI32_MAPPED_BASE               0x40000000
   50.52 +#define PCI32_DIRECT_BASE               0x80000000
   50.53 +
   50.54 +#define IS_PCI32_MAPPED(x)              ((u64)(x) < PCI32_DIRECT_BASE && \
   50.55 +                                         (u64)(x) >= PCI32_MAPPED_BASE)
   50.56 +#define IS_PCI32_DIRECT(x)              ((u64)(x) >= PCI32_MAPPED_BASE)
   50.57 +
   50.58 +
   50.59 +/*
   50.60 + * Bridge PMU Address Transaltion Entry Attibutes
   50.61 + */
   50.62 +#define PCI32_ATE_V                     (0x1 << 0)
   50.63 +#define PCI32_ATE_CO                    (0x1 << 1)
   50.64 +#define PCI32_ATE_PREC                  (0x1 << 2)
   50.65 +#define PCI32_ATE_MSI                   (0x1 << 2)
   50.66 +#define PCI32_ATE_PREF                  (0x1 << 3)
   50.67 +#define PCI32_ATE_BAR                   (0x1 << 4)
   50.68 +#define PCI32_ATE_ADDR_SHFT             12
   50.69 +
   50.70 +#define MINIMAL_ATES_REQUIRED(addr, size) \
   50.71 +	(IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
   50.72 +
   50.73 +#define MINIMAL_ATE_FLAG(addr, size) \
   50.74 +	(MINIMAL_ATES_REQUIRED((u64)addr, size) ? 1 : 0)
   50.75 +
   50.76 +/* bit 29 of the pci address is the SWAP bit */
   50.77 +#define ATE_SWAPSHIFT                   29
   50.78 +#define ATE_SWAP_ON(x)                  ((x) |= (1 << ATE_SWAPSHIFT))
   50.79 +#define ATE_SWAP_OFF(x)                 ((x) &= ~(1 << ATE_SWAPSHIFT))
   50.80 +
   50.81 +/*
   50.82 + * I/O page size
   50.83 + */
   50.84 +#if PAGE_SIZE < 16384
   50.85 +#define IOPFNSHIFT                      12      /* 4K per mapped page */
   50.86 +#else
   50.87 +#define IOPFNSHIFT                      14      /* 16K per mapped page */
   50.88 +#endif
   50.89 +
   50.90 +#define IOPGSIZE                        (1 << IOPFNSHIFT)
   50.91 +#define IOPG(x)                         ((x) >> IOPFNSHIFT)
   50.92 +#define IOPGOFF(x)                      ((x) & (IOPGSIZE-1))
   50.93 +
   50.94 +#define PCIBR_DEV_SWAP_DIR              (1ull << 19)
   50.95 +#define PCIBR_CTRL_PAGE_SIZE            (0x1 << 21)
   50.96 +
   50.97 +/*
   50.98 + * PMU resources.
   50.99 + */
  50.100 +struct ate_resource{
  50.101 +	u64 *ate;
  50.102 +	u64 num_ate;
  50.103 +	u64 lowest_free_index;
  50.104 +};
  50.105 +
  50.106 +struct pcibus_info {
  50.107 +	struct pcibus_bussoft	pbi_buscommon;   /* common header */
  50.108 +	u32                pbi_moduleid;
  50.109 +	short                   pbi_bridge_type;
  50.110 +	short                   pbi_bridge_mode;
  50.111 +
  50.112 +	struct ate_resource     pbi_int_ate_resource;
  50.113 +	u64                pbi_int_ate_size;
  50.114 +
  50.115 +	u64                pbi_dir_xbase;
  50.116 +	char                    pbi_hub_xid;
  50.117 +
  50.118 +	u64                pbi_devreg[8];
  50.119 +
  50.120 +	u32		pbi_valid_devices;
  50.121 +	u32		pbi_enabled_devices;
  50.122 +
  50.123 +	spinlock_t              pbi_lock;
  50.124 +};
  50.125 +
  50.126 +extern int  pcibr_init_provider(void);
  50.127 +extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
  50.128 +extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type);
  50.129 +extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type);
  50.130 +extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
  50.131 +
  50.132 +/*
  50.133 + * prototypes for the bridge asic register access routines in pcibr_reg.c
  50.134 + */
  50.135 +extern void             pcireg_control_bit_clr(struct pcibus_info *, u64);
  50.136 +extern void             pcireg_control_bit_set(struct pcibus_info *, u64);
  50.137 +extern u64         pcireg_tflush_get(struct pcibus_info *);
  50.138 +extern u64         pcireg_intr_status_get(struct pcibus_info *);
  50.139 +extern void             pcireg_intr_enable_bit_clr(struct pcibus_info *, u64);
  50.140 +extern void             pcireg_intr_enable_bit_set(struct pcibus_info *, u64);
  50.141 +extern void             pcireg_intr_addr_addr_set(struct pcibus_info *, int, u64);
  50.142 +extern void             pcireg_force_intr_set(struct pcibus_info *, int);
  50.143 +extern u64         pcireg_wrb_flush_get(struct pcibus_info *, int);
  50.144 +extern void             pcireg_int_ate_set(struct pcibus_info *, int, u64);
  50.145 +extern u64 __iomem *	pcireg_int_ate_addr(struct pcibus_info *, int);
  50.146 +extern void 		pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
  50.147 +extern void 		pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info);
  50.148 +extern int 		pcibr_ate_alloc(struct pcibus_info *, int);
  50.149 +extern void 		pcibr_ate_free(struct pcibus_info *, int);
  50.150 +extern void 		ate_write(struct pcibus_info *, int, int, u64);
  50.151 +extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device,
  50.152 +				 void *resp);
  50.153 +extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device,
  50.154 +				  int action, void *resp);
  50.155 +extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus);
  50.156 +#endif
    51.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    51.2 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h	Wed Dec 20 14:55:02 2006 -0700
    51.3 @@ -0,0 +1,32 @@
    51.4 +/*
    51.5 + * This file is subject to the terms and conditions of the GNU General Public
    51.6 + * License.  See the file "COPYING" in the main directory of this archive
    51.7 + * for more details.
    51.8 + *
    51.9 + * Copyright (C) 2002-2006 Silicon Graphics, Inc.  All Rights Reserved.
   51.10 + */
   51.11 +#ifndef _ASM_IA64_SN_RW_MMR_H
   51.12 +#define _ASM_IA64_SN_RW_MMR_H
   51.13 +
   51.14 +
   51.15 +/*
   51.16 + * This file that access MMRs via uncached physical addresses.
   51.17 + * 	pio_phys_read_mmr  - read an MMR
   51.18 + * 	pio_phys_write_mmr - write an MMR
   51.19 + * 	pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
   51.20 + *		Second MMR will be skipped if address is NULL
   51.21 + *
   51.22 + * Addresses passed to these routines should be uncached physical addresses
   51.23 + * ie., 0x80000....
   51.24 + */
   51.25 +
   51.26 +
   51.27 +extern long pio_phys_read_mmr(volatile long *mmr); 
   51.28 +extern void pio_phys_write_mmr(volatile long *mmr, long val);
   51.29 +#ifndef XEN
   51.30 +extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2); 
   51.31 +#else
   51.32 +extern void pio_atomic_phys_write_mmrs(volatile unsigned long *mmr1, long val1, volatile unsigned long *mmr2, long val2); 
   51.33 +#endif
   51.34 +
   51.35 +#endif /* _ASM_IA64_SN_RW_MMR_H */
    52.1 --- a/xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h	Wed Dec 20 08:53:42 2006 -0700
    52.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    52.3 @@ -1,994 +0,0 @@
    52.4 -#ifndef _ASM_IA64_SN_SN_SAL_H
    52.5 -#define _ASM_IA64_SN_SN_SAL_H
    52.6 -
    52.7 -/*
    52.8 - * System Abstraction Layer definitions for IA64
    52.9 - *
   52.10 - * This file is subject to the terms and conditions of the GNU General Public
   52.11 - * License.  See the file "COPYING" in the main directory of this archive
   52.12 - * for more details.
   52.13 - *
   52.14 - * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All rights reserved.
   52.15 - */
   52.16 -
   52.17 -
   52.18 -#include <linux/config.h>
   52.19 -#include <asm/sal.h>
   52.20 -#include <asm/sn/sn_cpuid.h>
   52.21 -#include <asm/sn/arch.h>
   52.22 -#include <asm/sn/geo.h>
   52.23 -#include <asm/sn/nodepda.h>
   52.24 -
   52.25 -// SGI Specific Calls
   52.26 -#define  SN_SAL_POD_MODE                           0x02000001
   52.27 -#define  SN_SAL_SYSTEM_RESET                       0x02000002
   52.28 -#define  SN_SAL_PROBE                              0x02000003
   52.29 -#define  SN_SAL_GET_MASTER_NASID                   0x02000004
   52.30 -#define	 SN_SAL_GET_KLCONFIG_ADDR		   0x02000005
   52.31 -#define  SN_SAL_LOG_CE				   0x02000006
   52.32 -#define  SN_SAL_REGISTER_CE			   0x02000007
   52.33 -#define  SN_SAL_GET_PARTITION_ADDR		   0x02000009
   52.34 -#define  SN_SAL_XP_ADDR_REGION			   0x0200000f
   52.35 -#define  SN_SAL_NO_FAULT_ZONE_VIRTUAL		   0x02000010
   52.36 -#define  SN_SAL_NO_FAULT_ZONE_PHYSICAL		   0x02000011
   52.37 -#define  SN_SAL_PRINT_ERROR			   0x02000012
   52.38 -#define  SN_SAL_SET_ERROR_HANDLING_FEATURES	   0x0200001a	// reentrant
   52.39 -#define  SN_SAL_GET_FIT_COMPT			   0x0200001b	// reentrant
   52.40 -#define  SN_SAL_GET_HUB_INFO                       0x0200001c
   52.41 -#define  SN_SAL_GET_SAPIC_INFO                     0x0200001d
   52.42 -#define  SN_SAL_CONSOLE_PUTC                       0x02000021
   52.43 -#define  SN_SAL_CONSOLE_GETC                       0x02000022
   52.44 -#define  SN_SAL_CONSOLE_PUTS                       0x02000023
   52.45 -#define  SN_SAL_CONSOLE_GETS                       0x02000024
   52.46 -#define  SN_SAL_CONSOLE_GETS_TIMEOUT               0x02000025
   52.47 -#define  SN_SAL_CONSOLE_POLL                       0x02000026
   52.48 -#define  SN_SAL_CONSOLE_INTR                       0x02000027
   52.49 -#define  SN_SAL_CONSOLE_PUTB			   0x02000028
   52.50 -#define  SN_SAL_CONSOLE_XMIT_CHARS		   0x0200002a
   52.51 -#define  SN_SAL_CONSOLE_READC			   0x0200002b
   52.52 -#define  SN_SAL_SYSCTL_MODID_GET	           0x02000031
   52.53 -#define  SN_SAL_SYSCTL_GET                         0x02000032
   52.54 -#define  SN_SAL_SYSCTL_IOBRICK_MODULE_GET          0x02000033
   52.55 -#define  SN_SAL_SYSCTL_IO_PORTSPEED_GET            0x02000035
   52.56 -#define  SN_SAL_SYSCTL_SLAB_GET                    0x02000036
   52.57 -#define  SN_SAL_BUS_CONFIG		   	   0x02000037
   52.58 -#define  SN_SAL_SYS_SERIAL_GET			   0x02000038
   52.59 -#define  SN_SAL_PARTITION_SERIAL_GET		   0x02000039
   52.60 -#define  SN_SAL_SYSCTL_PARTITION_GET		   0x0200003a
   52.61 -#define  SN_SAL_SYSTEM_POWER_DOWN		   0x0200003b
   52.62 -#define  SN_SAL_GET_MASTER_BASEIO_NASID		   0x0200003c
   52.63 -#define  SN_SAL_COHERENCE                          0x0200003d
   52.64 -#define  SN_SAL_MEMPROTECT                         0x0200003e
   52.65 -#define  SN_SAL_SYSCTL_FRU_CAPTURE		   0x0200003f
   52.66 -
   52.67 -#define  SN_SAL_SYSCTL_IOBRICK_PCI_OP		   0x02000042	// reentrant
   52.68 -#define	 SN_SAL_IROUTER_OP			   0x02000043
   52.69 -#define  SN_SAL_IOIF_INTERRUPT			   0x0200004a
   52.70 -#define  SN_SAL_HWPERF_OP			   0x02000050   // lock
   52.71 -#define  SN_SAL_IOIF_ERROR_INTERRUPT		   0x02000051
   52.72 -
   52.73 -#define  SN_SAL_IOIF_SLOT_ENABLE		   0x02000053
   52.74 -#define  SN_SAL_IOIF_SLOT_DISABLE		   0x02000054
   52.75 -#define  SN_SAL_IOIF_GET_HUBDEV_INFO		   0x02000055
   52.76 -#define  SN_SAL_IOIF_GET_PCIBUS_INFO		   0x02000056
   52.77 -#define  SN_SAL_IOIF_GET_PCIDEV_INFO		   0x02000057
   52.78 -#define  SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST	   0x02000058
   52.79 -
   52.80 -#define SN_SAL_HUB_ERROR_INTERRUPT		   0x02000060
   52.81 -
   52.82 -
   52.83 -/*
   52.84 - * Service-specific constants
   52.85 - */
   52.86 -
   52.87 -/* Console interrupt manipulation */
   52.88 -	/* action codes */
   52.89 -#define SAL_CONSOLE_INTR_OFF    0       /* turn the interrupt off */
   52.90 -#define SAL_CONSOLE_INTR_ON     1       /* turn the interrupt on */
   52.91 -#define SAL_CONSOLE_INTR_STATUS 2	/* retrieve the interrupt status */
   52.92 -	/* interrupt specification & status return codes */
   52.93 -#define SAL_CONSOLE_INTR_XMIT	1	/* output interrupt */
   52.94 -#define SAL_CONSOLE_INTR_RECV	2	/* input interrupt */
   52.95 -
   52.96 -/* interrupt handling */
   52.97 -#define SAL_INTR_ALLOC		1
   52.98 -#define SAL_INTR_FREE		2
   52.99 -
  52.100 -/*
  52.101 - * IRouter (i.e. generalized system controller) operations
  52.102 - */
  52.103 -#define SAL_IROUTER_OPEN	0	/* open a subchannel */
  52.104 -#define SAL_IROUTER_CLOSE	1	/* close a subchannel */
  52.105 -#define SAL_IROUTER_SEND	2	/* send part of an IRouter packet */
  52.106 -#define SAL_IROUTER_RECV	3	/* receive part of an IRouter packet */
  52.107 -#define SAL_IROUTER_INTR_STATUS	4	/* check the interrupt status for
  52.108 -					 * an open subchannel
  52.109 -					 */
  52.110 -#define SAL_IROUTER_INTR_ON	5	/* enable an interrupt */
  52.111 -#define SAL_IROUTER_INTR_OFF	6	/* disable an interrupt */
  52.112 -#define SAL_IROUTER_INIT	7	/* initialize IRouter driver */
  52.113 -
  52.114 -/* IRouter interrupt mask bits */
  52.115 -#define SAL_IROUTER_INTR_XMIT	SAL_CONSOLE_INTR_XMIT
  52.116 -#define SAL_IROUTER_INTR_RECV	SAL_CONSOLE_INTR_RECV
  52.117 -
  52.118 -
  52.119 -/*
  52.120 - * SAL Error Codes
  52.121 - */
  52.122 -#define SALRET_MORE_PASSES	1
  52.123 -#define SALRET_OK		0
  52.124 -#define SALRET_NOT_IMPLEMENTED	(-1)
  52.125 -#define SALRET_INVALID_ARG	(-2)
  52.126 -#define SALRET_ERROR		(-3)
  52.127 -
  52.128 -
  52.129 -#ifndef XEN
  52.130 -/**
  52.131 - * sn_sal_rev_major - get the major SGI SAL revision number
  52.132 - *
  52.133 - * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
  52.134 - * This routine simply extracts the major value from the
  52.135 - * @ia64_sal_systab structure constructed by ia64_sal_init().
  52.136 - */
  52.137 -static inline int
  52.138 -sn_sal_rev_major(void)
  52.139 -{
  52.140 -	struct ia64_sal_systab *systab = efi.sal_systab;
  52.141 -
  52.142 -	return (int)systab->sal_b_rev_major;
  52.143 -}
  52.144 -
  52.145 -/**
  52.146 - * sn_sal_rev_minor - get the minor SGI SAL revision number
  52.147 - *
  52.148 - * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
  52.149 - * This routine simply extracts the minor value from the
  52.150 - * @ia64_sal_systab structure constructed by ia64_sal_init().
  52.151 - */
  52.152 -static inline int
  52.153 -sn_sal_rev_minor(void)
  52.154 -{
  52.155 -	struct ia64_sal_systab *systab = efi.sal_systab;
  52.156 -	
  52.157 -	return (int)systab->sal_b_rev_minor;
  52.158 -}
  52.159 -
  52.160 -/*
  52.161 - * Specify the minimum PROM revsion required for this kernel.
  52.162 - * Note that they're stored in hex format...
  52.163 - */
  52.164 -#define SN_SAL_MIN_MAJOR	0x4  /* SN2 kernels need at least PROM 4.0 */
  52.165 -#define SN_SAL_MIN_MINOR	0x0
  52.166 -
  52.167 -/*
  52.168 - * Returns the master console nasid, if the call fails, return an illegal
  52.169 - * value.
  52.170 - */
  52.171 -static inline u64
  52.172 -ia64_sn_get_console_nasid(void)
  52.173 -{
  52.174 -	struct ia64_sal_retval ret_stuff;
  52.175 -
  52.176 -	ret_stuff.status = 0;
  52.177 -	ret_stuff.v0 = 0;
  52.178 -	ret_stuff.v1 = 0;
  52.179 -	ret_stuff.v2 = 0;
  52.180 -	SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0);
  52.181 -
  52.182 -	if (ret_stuff.status < 0)
  52.183 -		return ret_stuff.status;
  52.184 -
  52.185 -	/* Master console nasid is in 'v0' */
  52.186 -	return ret_stuff.v0;
  52.187 -}
  52.188 -
  52.189 -/*
  52.190 - * Returns the master baseio nasid, if the call fails, return an illegal
  52.191 - * value.
  52.192 - */
  52.193 -static inline u64
  52.194 -ia64_sn_get_master_baseio_nasid(void)
  52.195 -{
  52.196 -	struct ia64_sal_retval ret_stuff;
  52.197 -
  52.198 -	ret_stuff.status = 0;
  52.199 -	ret_stuff.v0 = 0;
  52.200 -	ret_stuff.v1 = 0;
  52.201 -	ret_stuff.v2 = 0;
  52.202 -	SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 0);
  52.203 -
  52.204 -	if (ret_stuff.status < 0)
  52.205 -		return ret_stuff.status;
  52.206 -
  52.207 -	/* Master baseio nasid is in 'v0' */
  52.208 -	return ret_stuff.v0;
  52.209 -}
  52.210 -
  52.211 -static inline char *
  52.212 -ia64_sn_get_klconfig_addr(nasid_t nasid)
  52.213 -{
  52.214 -	struct ia64_sal_retval ret_stuff;
  52.215 -	int cnodeid;
  52.216 -
  52.217 -	cnodeid = nasid_to_cnodeid(nasid);
  52.218 -	ret_stuff.status = 0;
  52.219 -	ret_stuff.v0 = 0;
  52.220 -	ret_stuff.v1 = 0;
  52.221 -	ret_stuff.v2 = 0;
  52.222 -	SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 0, 0);
  52.223 -
  52.224 -	/*
  52.225 -	 * We should panic if a valid cnode nasid does not produce
  52.226 -	 * a klconfig address.
  52.227 -	 */
  52.228 -	if (ret_stuff.status != 0) {
  52.229 -		panic("ia64_sn_get_klconfig_addr: Returned error %lx\n", ret_stuff.status);
  52.230 -	}
  52.231 -	return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
  52.232 -}
  52.233 -#endif /* !XEN */
  52.234 -
  52.235 -/*
  52.236 - * Returns the next console character.
  52.237 - */
  52.238 -static inline u64
  52.239 -ia64_sn_console_getc(int *ch)
  52.240 -{
  52.241 -	struct ia64_sal_retval ret_stuff;
  52.242 -
  52.243 -	ret_stuff.status = 0;
  52.244 -	ret_stuff.v0 = 0;
  52.245 -	ret_stuff.v1 = 0;
  52.246 -	ret_stuff.v2 = 0;
  52.247 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0);
  52.248 -
  52.249 -	/* character is in 'v0' */
  52.250 -	*ch = (int)ret_stuff.v0;
  52.251 -
  52.252 -	return ret_stuff.status;
  52.253 -}
  52.254 -
  52.255 -/*
  52.256 - * Read a character from the SAL console device, after a previous interrupt
  52.257 - * or poll operation has given us to know that a character is available
  52.258 - * to be read.
  52.259 - */
  52.260 -static inline u64
  52.261 -ia64_sn_console_readc(void)
  52.262 -{
  52.263 -	struct ia64_sal_retval ret_stuff;
  52.264 -
  52.265 -	ret_stuff.status = 0;
  52.266 -	ret_stuff.v0 = 0;
  52.267 -	ret_stuff.v1 = 0;
  52.268 -	ret_stuff.v2 = 0;
  52.269 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0);
  52.270 -
  52.271 -	/* character is in 'v0' */
  52.272 -	return ret_stuff.v0;
  52.273 -}
  52.274 -
  52.275 -/*
  52.276 - * Sends the given character to the console.
  52.277 - */
  52.278 -static inline u64
  52.279 -ia64_sn_console_putc(char ch)
  52.280 -{
  52.281 -	struct ia64_sal_retval ret_stuff;
  52.282 -
  52.283 -	ret_stuff.status = 0;
  52.284 -	ret_stuff.v0 = 0;
  52.285 -	ret_stuff.v1 = 0;
  52.286 -	ret_stuff.v2 = 0;
  52.287 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (uint64_t)ch, 0, 0, 0, 0, 0, 0);
  52.288 -
  52.289 -	return ret_stuff.status;
  52.290 -}
  52.291 -
  52.292 -/*
  52.293 - * Sends the given buffer to the console.
  52.294 - */
  52.295 -static inline u64
  52.296 -ia64_sn_console_putb(const char *buf, int len)
  52.297 -{
  52.298 -	struct ia64_sal_retval ret_stuff;
  52.299 -
  52.300 -	ret_stuff.status = 0;
  52.301 -	ret_stuff.v0 = 0; 
  52.302 -	ret_stuff.v1 = 0;
  52.303 -	ret_stuff.v2 = 0;
  52.304 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (uint64_t)buf, (uint64_t)len, 0, 0, 0, 0, 0);
  52.305 -
  52.306 -	if ( ret_stuff.status == 0 ) {
  52.307 -		return ret_stuff.v0;
  52.308 -	}
  52.309 -	return (u64)0;
  52.310 -}
  52.311 -
  52.312 -#ifndef XEN
  52.313 -/*
  52.314 - * Print a platform error record
  52.315 - */
  52.316 -static inline u64
  52.317 -ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec)
  52.318 -{
  52.319 -	struct ia64_sal_retval ret_stuff;
  52.320 -
  52.321 -	ret_stuff.status = 0;
  52.322 -	ret_stuff.v0 = 0;
  52.323 -	ret_stuff.v1 = 0;
  52.324 -	ret_stuff.v2 = 0;
  52.325 -	SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, (uint64_t)rec, 0, 0, 0, 0, 0);
  52.326 -
  52.327 -	return ret_stuff.status;
  52.328 -}
  52.329 -
  52.330 -/*
  52.331 - * Check for Platform errors
  52.332 - */
  52.333 -static inline u64
  52.334 -ia64_sn_plat_cpei_handler(void)
  52.335 -{
  52.336 -	struct ia64_sal_retval ret_stuff;
  52.337 -
  52.338 -	ret_stuff.status = 0;
  52.339 -	ret_stuff.v0 = 0;
  52.340 -	ret_stuff.v1 = 0;
  52.341 -	ret_stuff.v2 = 0;
  52.342 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0);
  52.343 -
  52.344 -	return ret_stuff.status;
  52.345 -}
  52.346 -
  52.347 -/*
  52.348 - * Checks for console input.
  52.349 - */
  52.350 -static inline u64
  52.351 -ia64_sn_console_check(int *result)
  52.352 -{
  52.353 -	struct ia64_sal_retval ret_stuff;
  52.354 -
  52.355 -	ret_stuff.status = 0;
  52.356 -	ret_stuff.v0 = 0;
  52.357 -	ret_stuff.v1 = 0;
  52.358 -	ret_stuff.v2 = 0;
  52.359 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0);
  52.360 -
  52.361 -	/* result is in 'v0' */
  52.362 -	*result = (int)ret_stuff.v0;
  52.363 -
  52.364 -	return ret_stuff.status;
  52.365 -}
  52.366 -
  52.367 -/*
  52.368 - * Checks console interrupt status
  52.369 - */
  52.370 -static inline u64
  52.371 -ia64_sn_console_intr_status(void)
  52.372 -{
  52.373 -	struct ia64_sal_retval ret_stuff;
  52.374 -
  52.375 -	ret_stuff.status = 0;
  52.376 -	ret_stuff.v0 = 0;
  52.377 -	ret_stuff.v1 = 0;
  52.378 -	ret_stuff.v2 = 0;
  52.379 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
  52.380 -		 0, SAL_CONSOLE_INTR_STATUS,
  52.381 -		 0, 0, 0, 0, 0);
  52.382 -
  52.383 -	if (ret_stuff.status == 0) {
  52.384 -	    return ret_stuff.v0;
  52.385 -	}
  52.386 -	
  52.387 -	return 0;
  52.388 -}
  52.389 -
  52.390 -/*
  52.391 - * Enable an interrupt on the SAL console device.
  52.392 - */
  52.393 -static inline void
  52.394 -ia64_sn_console_intr_enable(uint64_t intr)
  52.395 -{
  52.396 -	struct ia64_sal_retval ret_stuff;
  52.397 -
  52.398 -	ret_stuff.status = 0;
  52.399 -	ret_stuff.v0 = 0;
  52.400 -	ret_stuff.v1 = 0;
  52.401 -	ret_stuff.v2 = 0;
  52.402 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
  52.403 -		 intr, SAL_CONSOLE_INTR_ON,
  52.404 -		 0, 0, 0, 0, 0);
  52.405 -}
  52.406 -
  52.407 -/*
  52.408 - * Disable an interrupt on the SAL console device.
  52.409 - */
  52.410 -static inline void
  52.411 -ia64_sn_console_intr_disable(uint64_t intr)
  52.412 -{
  52.413 -	struct ia64_sal_retval ret_stuff;
  52.414 -
  52.415 -	ret_stuff.status = 0;
  52.416 -	ret_stuff.v0 = 0;
  52.417 -	ret_stuff.v1 = 0;
  52.418 -	ret_stuff.v2 = 0;
  52.419 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
  52.420 -		 intr, SAL_CONSOLE_INTR_OFF,
  52.421 -		 0, 0, 0, 0, 0);
  52.422 -}
  52.423 -
  52.424 -/*
  52.425 - * Sends a character buffer to the console asynchronously.
  52.426 - */
  52.427 -static inline u64
  52.428 -ia64_sn_console_xmit_chars(char *buf, int len)
  52.429 -{
  52.430 -	struct ia64_sal_retval ret_stuff;
  52.431 -
  52.432 -	ret_stuff.status = 0;
  52.433 -	ret_stuff.v0 = 0;
  52.434 -	ret_stuff.v1 = 0;
  52.435 -	ret_stuff.v2 = 0;
  52.436 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS,
  52.437 -		 (uint64_t)buf, (uint64_t)len,
  52.438 -		 0, 0, 0, 0, 0);
  52.439 -
  52.440 -	if (ret_stuff.status == 0) {
  52.441 -	    return ret_stuff.v0;
  52.442 -	}
  52.443 -
  52.444 -	return 0;
  52.445 -}
  52.446 -
  52.447 -/*
  52.448 - * Returns the iobrick module Id
  52.449 - */
  52.450 -static inline u64
  52.451 -ia64_sn_sysctl_iobrick_module_get(nasid_t nasid, int *result)
  52.452 -{
  52.453 -	struct ia64_sal_retval ret_stuff;
  52.454 -
  52.455 -	ret_stuff.status = 0;
  52.456 -	ret_stuff.v0 = 0;
  52.457 -	ret_stuff.v1 = 0;
  52.458 -	ret_stuff.v2 = 0;
  52.459 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 0, 0, 0, 0, 0);
  52.460 -
  52.461 -	/* result is in 'v0' */
  52.462 -	*result = (int)ret_stuff.v0;
  52.463 -
  52.464 -	return ret_stuff.status;
  52.465 -}
  52.466 -
  52.467 -/**
  52.468 - * ia64_sn_pod_mode - call the SN_SAL_POD_MODE function
  52.469 - *
  52.470 - * SN_SAL_POD_MODE actually takes an argument, but it's always
  52.471 - * 0 when we call it from the kernel, so we don't have to expose
  52.472 - * it to the caller.
  52.473 - */
  52.474 -static inline u64
  52.475 -ia64_sn_pod_mode(void)
  52.476 -{
  52.477 -	struct ia64_sal_retval isrv;
  52.478 -	SAL_CALL(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0);
  52.479 -	if (isrv.status)
  52.480 -		return 0;
  52.481 -	return isrv.v0;
  52.482 -}
  52.483 -
  52.484 -/**
  52.485 - * ia64_sn_probe_mem - read from memory safely
  52.486 - * @addr: address to probe
  52.487 - * @size: number bytes to read (1,2,4,8)
  52.488 - * @data_ptr: address to store value read by probe (-1 returned if probe fails)
  52.489 - *
  52.490 - * Call into the SAL to do a memory read.  If the read generates a machine
  52.491 - * check, this routine will recover gracefully and return -1 to the caller.
  52.492 - * @addr is usually a kernel virtual address in uncached space (i.e. the
  52.493 - * address starts with 0xc), but if called in physical mode, @addr should
  52.494 - * be a physical address.
  52.495 - *
  52.496 - * Return values:
  52.497 - *  0 - probe successful
  52.498 - *  1 - probe failed (generated MCA)
  52.499 - *  2 - Bad arg
  52.500 - * <0 - PAL error
  52.501 - */
  52.502 -static inline u64
  52.503 -ia64_sn_probe_mem(long addr, long size, void *data_ptr)
  52.504 -{
  52.505 -	struct ia64_sal_retval isrv;
  52.506 -
  52.507 -	SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0);
  52.508 -
  52.509 -	if (data_ptr) {
  52.510 -		switch (size) {
  52.511 -		case 1:
  52.512 -			*((u8*)data_ptr) = (u8)isrv.v0;
  52.513 -			break;
  52.514 -		case 2:
  52.515 -			*((u16*)data_ptr) = (u16)isrv.v0;
  52.516 -			break;
  52.517 -		case 4:
  52.518 -			*((u32*)data_ptr) = (u32)isrv.v0;
  52.519 -			break;
  52.520 -		case 8:
  52.521 -			*((u64*)data_ptr) = (u64)isrv.v0;
  52.522 -			break;
  52.523 -		default:
  52.524 -			isrv.status = 2;
  52.525 -		}
  52.526 -	}
  52.527 -	return isrv.status;
  52.528 -}
  52.529 -
  52.530 -/*
  52.531 - * Retrieve the system serial number as an ASCII string.
  52.532 - */
  52.533 -static inline u64
  52.534 -ia64_sn_sys_serial_get(char *buf)
  52.535 -{
  52.536 -	struct ia64_sal_retval ret_stuff;
  52.537 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 0);
  52.538 -	return ret_stuff.status;
  52.539 -}
  52.540 -
  52.541 -extern char sn_system_serial_number_string[];
  52.542 -extern u64 sn_partition_serial_number;
  52.543 -
  52.544 -static inline char *
  52.545 -sn_system_serial_number(void) {
  52.546 -	if (sn_system_serial_number_string[0]) {
  52.547 -		return(sn_system_serial_number_string);
  52.548 -	} else {
  52.549 -		ia64_sn_sys_serial_get(sn_system_serial_number_string);
  52.550 -		return(sn_system_serial_number_string);
  52.551 -	}
  52.552 -}
  52.553 -	
  52.554 -
  52.555 -/*
  52.556 - * Returns a unique id number for this system and partition (suitable for
  52.557 - * use with license managers), based in part on the system serial number.
  52.558 - */
  52.559 -static inline u64
  52.560 -ia64_sn_partition_serial_get(void)
  52.561 -{
  52.562 -	struct ia64_sal_retval ret_stuff;
  52.563 -	SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0);
  52.564 -	if (ret_stuff.status != 0)
  52.565 -	    return 0;
  52.566 -	return ret_stuff.v0;
  52.567 -}
  52.568 -
  52.569 -static inline u64
  52.570 -sn_partition_serial_number_val(void) {
  52.571 -	if (sn_partition_serial_number) {
  52.572 -		return(sn_partition_serial_number);
  52.573 -	} else {
  52.574 -		return(sn_partition_serial_number = ia64_sn_partition_serial_get());
  52.575 -	}
  52.576 -}
  52.577 -
  52.578 -/*
  52.579 - * Returns the partition id of the nasid passed in as an argument,
  52.580 - * or INVALID_PARTID if the partition id cannot be retrieved.
  52.581 - */
  52.582 -static inline partid_t
  52.583 -ia64_sn_sysctl_partition_get(nasid_t nasid)
  52.584 -{
  52.585 -	struct ia64_sal_retval ret_stuff;
  52.586 -	SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
  52.587 -		 0, 0, 0, 0, 0, 0);
  52.588 -	if (ret_stuff.status != 0)
  52.589 -	    return INVALID_PARTID;
  52.590 -	return ((partid_t)ret_stuff.v0);
  52.591 -}
  52.592 -
  52.593 -/*
  52.594 - * Returns the partition id of the current processor.
  52.595 - */
  52.596 -
  52.597 -extern partid_t sn_partid;
  52.598 -
  52.599 -static inline partid_t
  52.600 -sn_local_partid(void) {
  52.601 -	if (sn_partid < 0) {
  52.602 -		return (sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id())));
  52.603 -	} else {
  52.604 -		return sn_partid;
  52.605 -	}
  52.606 -}
  52.607 -
  52.608 -/*
  52.609 - * Register or unregister a physical address range being referenced across
  52.610 - * a partition boundary for which certain SAL errors should be scanned for,
  52.611 - * cleaned up and ignored.  This is of value for kernel partitioning code only.
  52.612 - * Values for the operation argument:
  52.613 - *	1 = register this address range with SAL
  52.614 - *	0 = unregister this address range with SAL
  52.615 - * 
  52.616 - * SAL maintains a reference count on an address range in case it is registered
  52.617 - * multiple times.
  52.618 - * 
  52.619 - * On success, returns the reference count of the address range after the SAL
  52.620 - * call has performed the current registration/unregistration.  Returns a
  52.621 - * negative value if an error occurred.
  52.622 - */
  52.623 -static inline int
  52.624 -sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
  52.625 -{
  52.626 -	struct ia64_sal_retval ret_stuff;
  52.627 -	SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation,
  52.628 -		 0, 0, 0, 0);
  52.629 -	return ret_stuff.status;
  52.630 -}
  52.631 -
  52.632 -/*
  52.633 - * Register or unregister an instruction range for which SAL errors should
  52.634 - * be ignored.  If an error occurs while in the registered range, SAL jumps
  52.635 - * to return_addr after ignoring the error.  Values for the operation argument:
  52.636 - *	1 = register this instruction range with SAL
  52.637 - *	0 = unregister this instruction range with SAL
  52.638 - *
  52.639 - * Returns 0 on success, or a negative value if an error occurred.
  52.640 - */
  52.641 -static inline int
  52.642 -sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
  52.643 -			 int virtual, int operation)
  52.644 -{
  52.645 -	struct ia64_sal_retval ret_stuff;
  52.646 -	u64 call;
  52.647 -	if (virtual) {
  52.648 -		call = SN_SAL_NO_FAULT_ZONE_VIRTUAL;
  52.649 -	} else {
  52.650 -		call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
  52.651 -	}
  52.652 -	SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1,
  52.653 -		 0, 0, 0);
  52.654 -	return ret_stuff.status;
  52.655 -}
  52.656 -
  52.657 -/*
  52.658 - * Change or query the coherence domain for this partition. Each cpu-based
  52.659 - * nasid is represented by a bit in an array of 64-bit words:
  52.660 - *      0 = not in this partition's coherency domain
  52.661 - *      1 = in this partition's coherency domain
  52.662 - *
  52.663 - * It is not possible for the local system's nasids to be removed from
  52.664 - * the coherency domain.  Purpose of the domain arguments:
  52.665 - *      new_domain = set the coherence domain to the given nasids
  52.666 - *      old_domain = return the current coherence domain
  52.667 - *
  52.668 - * Returns 0 on success, or a negative value if an error occurred.
  52.669 - */
  52.670 -static inline int
  52.671 -sn_change_coherence(u64 *new_domain, u64 *old_domain)
  52.672 -{
  52.673 -	struct ia64_sal_retval ret_stuff;
  52.674 -	SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0,
  52.675 -		 0, 0, 0);
  52.676 -	return ret_stuff.status;
  52.677 -}
  52.678 -
  52.679 -/*
  52.680 - * Change memory access protections for a physical address range.
  52.681 - * nasid_array is not used on Altix, but may be in future architectures.
  52.682 - * Available memory protection access classes are defined after the function.
  52.683 - */
  52.684 -static inline int
  52.685 -sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
  52.686 -{
  52.687 -	struct ia64_sal_retval ret_stuff;
  52.688 -	int cnodeid;
  52.689 -	unsigned long irq_flags;
  52.690 -
  52.691 -	cnodeid = nasid_to_cnodeid(get_node_number(paddr));
  52.692 -	// spin_lock(&NODEPDA(cnodeid)->bist_lock);
  52.693 -	local_irq_save(irq_flags);
  52.694 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array,
  52.695 -		 perms, 0, 0, 0);
  52.696 -	local_irq_restore(irq_flags);
  52.697 -	// spin_unlock(&NODEPDA(cnodeid)->bist_lock);
  52.698 -	return ret_stuff.status;
  52.699 -}
  52.700 -#define SN_MEMPROT_ACCESS_CLASS_0		0x14a080
  52.701 -#define SN_MEMPROT_ACCESS_CLASS_1		0x2520c2
  52.702 -#define SN_MEMPROT_ACCESS_CLASS_2		0x14a1ca
  52.703 -#define SN_MEMPROT_ACCESS_CLASS_3		0x14a290
  52.704 -#define SN_MEMPROT_ACCESS_CLASS_6		0x084080
  52.705 -#define SN_MEMPROT_ACCESS_CLASS_7		0x021080
  52.706 -
  52.707 -/*
  52.708 - * Turns off system power.
  52.709 - */
  52.710 -static inline void
  52.711 -ia64_sn_power_down(void)
  52.712 -{
  52.713 -	struct ia64_sal_retval ret_stuff;
  52.714 -	SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
  52.715 -	while(1);
  52.716 -	/* never returns */
  52.717 -}
  52.718 -
  52.719 -/**
  52.720 - * ia64_sn_fru_capture - tell the system controller to capture hw state
  52.721 - *
  52.722 - * This routine will call the SAL which will tell the system controller(s)
  52.723 - * to capture hw mmr information from each SHub in the system.
  52.724 - */
  52.725 -static inline u64
  52.726 -ia64_sn_fru_capture(void)
  52.727 -{
  52.728 -        struct ia64_sal_retval isrv;
  52.729 -        SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0);
  52.730 -        if (isrv.status)
  52.731 -                return 0;
  52.732 -        return isrv.v0;
  52.733 -}
  52.734 -
  52.735 -/*
  52.736 - * Performs an operation on a PCI bus or slot -- power up, power down
  52.737 - * or reset.
  52.738 - */
  52.739 -static inline u64
  52.740 -ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type, 
  52.741 -			      u64 bus, char slot, 
  52.742 -			      u64 action)
  52.743 -{
  52.744 -	struct ia64_sal_retval rv = {0, 0, 0, 0};
  52.745 -
  52.746 -	SAL_CALL_NOLOCK(rv, SN_SAL_SYSCTL_IOBRICK_PCI_OP, connection_type, n, action,
  52.747 -		 bus, (u64) slot, 0, 0);
  52.748 -	if (rv.status)
  52.749 -	    	return rv.v0;
  52.750 -	return 0;
  52.751 -}
  52.752 -
  52.753 -
  52.754 -/*
  52.755 - * Open a subchannel for sending arbitrary data to the system
  52.756 - * controller network via the system controller device associated with
  52.757 - * 'nasid'.  Return the subchannel number or a negative error code.
  52.758 - */
  52.759 -static inline int
  52.760 -ia64_sn_irtr_open(nasid_t nasid)
  52.761 -{
  52.762 -	struct ia64_sal_retval rv;
  52.763 -	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_OPEN, nasid,
  52.764 -			   0, 0, 0, 0, 0);
  52.765 -	return (int) rv.v0;
  52.766 -}
  52.767 -
  52.768 -/*
  52.769 - * Close system controller subchannel 'subch' previously opened on 'nasid'.
  52.770 - */
  52.771 -static inline int
  52.772 -ia64_sn_irtr_close(nasid_t nasid, int subch)
  52.773 -{
  52.774 -	struct ia64_sal_retval rv;
  52.775 -	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_CLOSE,
  52.776 -			   (u64) nasid, (u64) subch, 0, 0, 0, 0);
  52.777 -	return (int) rv.status;
  52.778 -}
  52.779 -
  52.780 -/*
  52.781 - * Read data from system controller associated with 'nasid' on
  52.782 - * subchannel 'subch'.  The buffer to be filled is pointed to by
  52.783 - * 'buf', and its capacity is in the integer pointed to by 'len'.  The
  52.784 - * referent of 'len' is set to the number of bytes read by the SAL
  52.785 - * call.  The return value is either SALRET_OK (for bytes read) or
  52.786 - * SALRET_ERROR (for error or "no data available").
  52.787 - */
  52.788 -static inline int
  52.789 -ia64_sn_irtr_recv(nasid_t nasid, int subch, char *buf, int *len)
  52.790 -{
  52.791 -	struct ia64_sal_retval rv;
  52.792 -	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_RECV,
  52.793 -			   (u64) nasid, (u64) subch, (u64) buf, (u64) len,
  52.794 -			   0, 0);
  52.795 -	return (int) rv.status;
  52.796 -}
  52.797 -
  52.798 -/*
  52.799 - * Write data to the system controller network via the system
  52.800 - * controller associated with 'nasid' on suchannel 'subch'.  The
  52.801 - * buffer to be written out is pointed to by 'buf', and 'len' is the
  52.802 - * number of bytes to be written.  The return value is either the
  52.803 - * number of bytes written (which could be zero) or a negative error
  52.804 - * code.
  52.805 - */
  52.806 -static inline int
  52.807 -ia64_sn_irtr_send(nasid_t nasid, int subch, char *buf, int len)
  52.808 -{
  52.809 -	struct ia64_sal_retval rv;
  52.810 -	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_SEND,
  52.811 -			   (u64) nasid, (u64) subch, (u64) buf, (u64) len,
  52.812 -			   0, 0);
  52.813 -	return (int) rv.v0;
  52.814 -}
  52.815 -
  52.816 -/*
  52.817 - * Check whether any interrupts are pending for the system controller
  52.818 - * associated with 'nasid' and its subchannel 'subch'.  The return
  52.819 - * value is a mask of pending interrupts (SAL_IROUTER_INTR_XMIT and/or
  52.820 - * SAL_IROUTER_INTR_RECV).
  52.821 - */
  52.822 -static inline int
  52.823 -ia64_sn_irtr_intr(nasid_t nasid, int subch)
  52.824 -{
  52.825 -	struct ia64_sal_retval rv;
  52.826 -	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_STATUS,
  52.827 -			   (u64) nasid, (u64) subch, 0, 0, 0, 0);
  52.828 -	return (int) rv.v0;
  52.829 -}
  52.830 -
  52.831 -/*
  52.832 - * Enable the interrupt indicated by the intr parameter (either
  52.833 - * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
  52.834 - */
  52.835 -static inline int
  52.836 -ia64_sn_irtr_intr_enable(nasid_t nasid, int subch, u64 intr)
  52.837 -{
  52.838 -	struct ia64_sal_retval rv;
  52.839 -	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_ON,
  52.840 -			   (u64) nasid, (u64) subch, intr, 0, 0, 0);
  52.841 -	return (int) rv.v0;
  52.842 -}
  52.843 -
  52.844 -/*
  52.845 - * Disable the interrupt indicated by the intr parameter (either
  52.846 - * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
  52.847 - */
  52.848 -static inline int
  52.849 -ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr)
  52.850 -{
  52.851 -	struct ia64_sal_retval rv;
  52.852 -	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_OFF,
  52.853 -			   (u64) nasid, (u64) subch, intr, 0, 0, 0);
  52.854 -	return (int) rv.v0;
  52.855 -}
  52.856 -
  52.857 -/**
  52.858 - * ia64_sn_get_fit_compt - read a FIT entry from the PROM header
  52.859 - * @nasid: NASID of node to read
  52.860 - * @index: FIT entry index to be retrieved (0..n)
  52.861 - * @fitentry: 16 byte buffer where FIT entry will be stored.
  52.862 - * @banbuf: optional buffer for retrieving banner
  52.863 - * @banlen: length of banner buffer
  52.864 - *
  52.865 - * Access to the physical PROM chips needs to be serialized since reads and
  52.866 - * writes can't occur at the same time, so we need to call into the SAL when
  52.867 - * we want to look at the FIT entries on the chips.
  52.868 - *
  52.869 - * Returns:
  52.870 - *	%SALRET_OK if ok
  52.871 - *	%SALRET_INVALID_ARG if index too big
  52.872 - *	%SALRET_NOT_IMPLEMENTED if running on older PROM
  52.873 - *	??? if nasid invalid OR banner buffer not large enough
  52.874 - */
  52.875 -static inline int
  52.876 -ia64_sn_get_fit_compt(u64 nasid, u64 index, void *fitentry, void *banbuf,
  52.877 -		      u64 banlen)
  52.878 -{
  52.879 -	struct ia64_sal_retval rv;
  52.880 -	SAL_CALL_NOLOCK(rv, SN_SAL_GET_FIT_COMPT, nasid, index, fitentry,
  52.881 -			banbuf, banlen, 0, 0);
  52.882 -	return (int) rv.status;
  52.883 -}
  52.884 -
  52.885 -/*
  52.886 - * Initialize the SAL components of the system controller
  52.887 - * communication driver; specifically pass in a sizable buffer that
  52.888 - * can be used for allocation of subchannel queues as new subchannels
  52.889 - * are opened.  "buf" points to the buffer, and "len" specifies its
  52.890 - * length.
  52.891 - */
  52.892 -static inline int
  52.893 -ia64_sn_irtr_init(nasid_t nasid, void *buf, int len)
  52.894 -{
  52.895 -	struct ia64_sal_retval rv;
  52.896 -	SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INIT,
  52.897 -			   (u64) nasid, (u64) buf, (u64) len, 0, 0, 0);
  52.898 -	return (int) rv.status;
  52.899 -}
  52.900 -
  52.901 -/*
  52.902 - * Returns the nasid, subnode & slice corresponding to a SAPIC ID
  52.903 - *
  52.904 - *  In:
  52.905 - *	arg0 - SN_SAL_GET_SAPIC_INFO
  52.906 - *	arg1 - sapicid (lid >> 16) 
  52.907 - *  Out:
  52.908 - *	v0 - nasid
  52.909 - *	v1 - subnode
  52.910 - *	v2 - slice
  52.911 - */
  52.912 -static inline u64
  52.913 -ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice)
  52.914 -{
  52.915 -	struct ia64_sal_retval ret_stuff;
  52.916 -
  52.917 -	ret_stuff.status = 0;
  52.918 -	ret_stuff.v0 = 0;
  52.919 -	ret_stuff.v1 = 0;
  52.920 -	ret_stuff.v2 = 0;
  52.921 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, sapicid, 0, 0, 0, 0, 0, 0);
  52.922 -
  52.923 -/***** BEGIN HACK - temp til old proms no longer supported ********/
  52.924 -	if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
  52.925 -		if (nasid) *nasid = sapicid & 0xfff;
  52.926 -		if (subnode) *subnode = (sapicid >> 13) & 1;
  52.927 -		if (slice) *slice = (sapicid >> 12) & 3;
  52.928 -		return 0;
  52.929 -	}
  52.930 -/***** END HACK *******/
  52.931 -
  52.932 -	if (ret_stuff.status < 0)
  52.933 -		return ret_stuff.status;
  52.934 -
  52.935 -	if (nasid) *nasid = (int) ret_stuff.v0;
  52.936 -	if (subnode) *subnode = (int) ret_stuff.v1;
  52.937 -	if (slice) *slice = (int) ret_stuff.v2;
  52.938 -	return 0;
  52.939 -}
  52.940 - 
  52.941 -/*
  52.942 - * Returns information about the HUB/SHUB.
  52.943 - *  In:
  52.944 - *	arg0 - SN_SAL_GET_HUB_INFO
  52.945 - * 	arg1 - 0 (other values reserved for future use)
  52.946 - *  Out:
  52.947 - *	v0 - shub type (0=shub1, 1=shub2)
  52.948 - *	v1 - masid mask (ex., 0x7ff for 11 bit nasid)
  52.949 - *	v2 - bit position of low nasid bit
  52.950 - */
  52.951 -static inline u64
  52.952 -ia64_sn_get_hub_info(int fc, u64 *arg1, u64 *arg2, u64 *arg3)
  52.953 -{
  52.954 -	struct ia64_sal_retval ret_stuff;
  52.955 -
  52.956 -	ret_stuff.status = 0;
  52.957 -	ret_stuff.v0 = 0;
  52.958 -	ret_stuff.v1 = 0;
  52.959 -	ret_stuff.v2 = 0;
  52.960 -	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_HUB_INFO, fc, 0, 0, 0, 0, 0, 0);
  52.961 -
  52.962 -/***** BEGIN HACK - temp til old proms no longer supported ********/
  52.963 -	if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
  52.964 -		if (arg1) *arg1 = 0;
  52.965 -		if (arg2) *arg2 = 0x7ff;
  52.966 -		if (arg3) *arg3 = 38;
  52.967 -		return 0;
  52.968 -	}
  52.969 -/***** END HACK *******/
  52.970 -
  52.971 -	if (ret_stuff.status < 0)
  52.972 -		return ret_stuff.status;
  52.973 -
  52.974 -	if (arg1) *arg1 = ret_stuff.v0;
  52.975 -	if (arg2) *arg2 = ret_stuff.v1;
  52.976 -	if (arg3) *arg3 = ret_stuff.v2;
  52.977 -	return 0;
  52.978 -}
  52.979 - 
  52.980 -/*
  52.981 - * This is the access point to the Altix PROM hardware performance
  52.982 - * and status monitoring interface. For info on using this, see
  52.983 - * include/asm-ia64/sn/sn2/sn_hwperf.h
  52.984 - */
  52.985 -static inline int
  52.986 -ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
  52.987 -                  u64 a3, u64 a4, int *v0)
  52.988 -{
  52.989 -	struct ia64_sal_retval rv;
  52.990 -	SAL_CALL_NOLOCK(rv, SN_SAL_HWPERF_OP, (u64)nasid,
  52.991 -		opcode, a0, a1, a2, a3, a4);
  52.992 -	if (v0)
  52.993 -		*v0 = (int) rv.v0;
  52.994 -	return (int) rv.status;
  52.995 -}
  52.996 -#endif /* !XEN */
  52.997 -#endif /* _ASM_IA64_SN_SN_SAL_H */
    53.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    53.2 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/types.h	Wed Dec 20 14:55:02 2006 -0700
    53.3 @@ -0,0 +1,28 @@
    53.4 +/*
    53.5 + * This file is subject to the terms and conditions of the GNU General Public
    53.6 + * License.  See the file "COPYING" in the main directory of this archive
    53.7 + * for more details.
    53.8 + *
    53.9 + * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc.  All Rights Reserved.
   53.10 + * Copyright (C) 1999 by Ralf Baechle
   53.11 + */
   53.12 +#ifndef _ASM_IA64_SN_TYPES_H
   53.13 +#define _ASM_IA64_SN_TYPES_H
   53.14 +
   53.15 +#include <linux/types.h>
   53.16 +
   53.17 +typedef unsigned long 	cpuid_t;
   53.18 +typedef signed short	nasid_t;	/* node id in numa-as-id space */
   53.19 +typedef signed char	partid_t;	/* partition ID type */
   53.20 +typedef unsigned int    moduleid_t;     /* user-visible module number type */
   53.21 +typedef unsigned int    cmoduleid_t;    /* kernel compact module id type */
   53.22 +typedef unsigned char	slotid_t;	/* slot (blade) within module */
   53.23 +typedef unsigned char	slabid_t;	/* slab (asic) within slot */
   53.24 +typedef u64 nic_t;
   53.25 +typedef unsigned long iopaddr_t;
   53.26 +#ifndef XEN
   53.27 +typedef unsigned long paddr_t;
   53.28 +#endif
   53.29 +typedef short cnodeid_t;
   53.30 +
   53.31 +#endif /* _ASM_IA64_SN_TYPES_H */
    54.1 --- a/xen/include/asm-ia64/linux-xen/asm/system.h	Wed Dec 20 08:53:42 2006 -0700
    54.2 +++ b/xen/include/asm-ia64/linux-xen/asm/system.h	Wed Dec 20 14:55:02 2006 -0700
    54.3 @@ -190,6 +190,7 @@ do {								\
    54.4  #ifdef XEN
    54.5  #define local_irq_is_enabled() (!irqs_disabled())
    54.6  extern struct vcpu *ia64_switch_to(struct vcpu *next_task);
    54.7 +#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
    54.8  #else
    54.9  #ifdef __KERNEL__
   54.10  
    55.1 --- a/xen/include/asm-ia64/linux-xen/asm/types.h	Wed Dec 20 08:53:42 2006 -0700
    55.2 +++ b/xen/include/asm-ia64/linux-xen/asm/types.h	Wed Dec 20 14:55:02 2006 -0700
    55.3 @@ -74,6 +74,14 @@ typedef unsigned short kmem_bufctl_t;
    55.4  
    55.5  #ifdef XEN
    55.6  #include <asm/xentypes.h>
    55.7 +
    55.8 +#ifndef __ASSEMBLY__
    55.9 +typedef unsigned int gfp_t;
   55.10 +typedef u64 resource_size_t;
   55.11 +typedef u32 dev_t;
   55.12 +typedef unsigned int mode_t;
   55.13 +#define THIS_MODULE	NULL
   55.14 +#endif
   55.15  #endif
   55.16  
   55.17  #endif /* _ASM_IA64_TYPES_H */
    56.1 --- a/xen/include/asm-ia64/linux-xen/linux/README.origin	Wed Dec 20 08:53:42 2006 -0700
    56.2 +++ b/xen/include/asm-ia64/linux-xen/linux/README.origin	Wed Dec 20 14:55:02 2006 -0700
    56.3 @@ -12,3 +12,8 @@ interrupt.h 		-> linux/include/linux/int
    56.4  
    56.5  # The files below are from Linux-2.6.16.33
    56.6  oprofile.h		-> linux/include/linux/oprofile.h
    56.7 +
    56.8 +# The files below are from Linux-2.6.19
    56.9 +pci.h			-> linux/include/linux/pci.h
   56.10 +kobject.h		-> linux/include/linux/kobject.h
   56.11 +device.h		-> linux/include/linux/device.h
    57.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    57.2 +++ b/xen/include/asm-ia64/linux-xen/linux/device.h	Wed Dec 20 14:55:02 2006 -0700
    57.3 @@ -0,0 +1,489 @@
    57.4 +/*
    57.5 + * device.h - generic, centralized driver model
    57.6 + *
    57.7 + * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
    57.8 + *
    57.9 + * This file is released under the GPLv2
   57.10 + *
   57.11 + * See Documentation/driver-model/ for more information.
   57.12 + */
   57.13 +
   57.14 +#ifndef _DEVICE_H_
   57.15 +#define _DEVICE_H_
   57.16 +
   57.17 +#include <linux/ioport.h>
   57.18 +#include <linux/kobject.h>
   57.19 +#include <linux/klist.h>
   57.20 +#include <linux/list.h>
   57.21 +#include <linux/compiler.h>
   57.22 +#include <linux/types.h>
   57.23 +#include <linux/module.h>
   57.24 +#include <linux/pm.h>
   57.25 +#include <asm/semaphore.h>
   57.26 +#include <asm/atomic.h>
   57.27 +
   57.28 +#define DEVICE_NAME_SIZE	50
   57.29 +#define DEVICE_NAME_HALF	__stringify(20)	/* Less than half to accommodate slop */
   57.30 +#define DEVICE_ID_SIZE		32
   57.31 +#define BUS_ID_SIZE		KOBJ_NAME_LEN
   57.32 +
   57.33 +
   57.34 +struct device;
   57.35 +struct device_driver;
   57.36 +struct class;
   57.37 +struct class_device;
   57.38 +
   57.39 +struct bus_type {
   57.40 +	const char		* name;
   57.41 +
   57.42 +	struct subsystem	subsys;
   57.43 +	struct kset		drivers;
   57.44 +	struct kset		devices;
   57.45 +	struct klist		klist_devices;
   57.46 +	struct klist		klist_drivers;
   57.47 +
   57.48 +	struct bus_attribute	* bus_attrs;
   57.49 +	struct device_attribute	* dev_attrs;
   57.50 +	struct driver_attribute	* drv_attrs;
   57.51 +
   57.52 +	int		(*match)(struct device * dev, struct device_driver * drv);
   57.53 +	int		(*uevent)(struct device *dev, char **envp,
   57.54 +				  int num_envp, char *buffer, int buffer_size);
   57.55 +	int		(*probe)(struct device * dev);
   57.56 +	int		(*remove)(struct device * dev);
   57.57 +	void		(*shutdown)(struct device * dev);
   57.58 +
   57.59 +	int (*suspend)(struct device * dev, pm_message_t state);
   57.60 +	int (*suspend_late)(struct device * dev, pm_message_t state);
   57.61 +	int (*resume_early)(struct device * dev);
   57.62 +	int (*resume)(struct device * dev);
   57.63 +};
   57.64 +
   57.65 +extern int __must_check bus_register(struct bus_type * bus);
   57.66 +extern void bus_unregister(struct bus_type * bus);
   57.67 +
   57.68 +extern int __must_check bus_rescan_devices(struct bus_type * bus);
   57.69 +
   57.70 +/* iterator helpers for buses */
   57.71 +
   57.72 +int bus_for_each_dev(struct bus_type * bus, struct device * start, void * data,
   57.73 +		     int (*fn)(struct device *, void *));
   57.74 +struct device * bus_find_device(struct bus_type *bus, struct device *start,
   57.75 +				void *data, int (*match)(struct device *, void *));
   57.76 +
   57.77 +int __must_check bus_for_each_drv(struct bus_type *bus,
   57.78 +		struct device_driver *start, void *data,
   57.79 +		int (*fn)(struct device_driver *, void *));
   57.80 +
   57.81 +/* driverfs interface for exporting bus attributes */
   57.82 +
   57.83 +struct bus_attribute {
   57.84 +#ifndef XEN
   57.85 +	struct attribute	attr;
   57.86 +#endif
   57.87 +	ssize_t (*show)(struct bus_type *, char * buf);
   57.88 +	ssize_t (*store)(struct bus_type *, const char * buf, size_t count);
   57.89 +};
   57.90 +
   57.91 +#define BUS_ATTR(_name,_mode,_show,_store)	\
   57.92 +struct bus_attribute bus_attr_##_name = __ATTR(_name,_mode,_show,_store)
   57.93 +
   57.94 +extern int __must_check bus_create_file(struct bus_type *,
   57.95 +					struct bus_attribute *);
   57.96 +extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
   57.97 +
   57.98 +struct device_driver {
   57.99 +	const char		* name;
  57.100 +	struct bus_type		* bus;
  57.101 +
  57.102 +	struct completion	unloaded;
  57.103 +	struct kobject		kobj;
  57.104 +	struct klist		klist_devices;
  57.105 +	struct klist_node	knode_bus;
  57.106 +
  57.107 +	struct module		* owner;
  57.108 +
  57.109 +	int	(*probe)	(struct device * dev);
  57.110 +	int	(*remove)	(struct device * dev);
  57.111 +	void	(*shutdown)	(struct device * dev);
  57.112 +	int	(*suspend)	(struct device * dev, pm_message_t state);
  57.113 +	int	(*resume)	(struct device * dev);
  57.114 +
  57.115 +	unsigned int multithread_probe:1;
  57.116 +};
  57.117 +
  57.118 +
  57.119 +extern int __must_check driver_register(struct device_driver * drv);
  57.120 +extern void driver_unregister(struct device_driver * drv);
  57.121 +
  57.122 +extern struct device_driver * get_driver(struct device_driver * drv);
  57.123 +extern void put_driver(struct device_driver * drv);
  57.124 +extern struct device_driver *driver_find(const char *name, struct bus_type *bus);
  57.125 +extern int driver_probe_done(void);
  57.126 +
  57.127 +/* driverfs interface for exporting driver attributes */
  57.128 +
  57.129 +struct driver_attribute {
  57.130 +#ifndef XEN
  57.131 +	struct attribute	attr;
  57.132 +#endif
  57.133 +	ssize_t (*show)(struct device_driver *, char * buf);
  57.134 +	ssize_t (*store)(struct device_driver *, const char * buf, size_t count);
  57.135 +};
  57.136 +
  57.137 +#define DRIVER_ATTR(_name,_mode,_show,_store)	\
  57.138 +struct driver_attribute driver_attr_##_name = __ATTR(_name,_mode,_show,_store)
  57.139 +
  57.140 +extern int __must_check driver_create_file(struct device_driver *,
  57.141 +					struct driver_attribute *);
  57.142 +extern void driver_remove_file(struct device_driver *, struct driver_attribute *);
  57.143 +
  57.144 +extern int __must_check driver_for_each_device(struct device_driver * drv,
  57.145 +		struct device *start, void *data,
  57.146 +		int (*fn)(struct device *, void *));
  57.147 +struct device * driver_find_device(struct device_driver *drv,
  57.148 +				   struct device *start, void *data,
  57.149 +				   int (*match)(struct device *, void *));
  57.150 +
  57.151 +/*
  57.152 + * device classes
  57.153 + */
  57.154 +struct class {
  57.155 +	const char		* name;
  57.156 +	struct module		* owner;
  57.157 +
  57.158 +	struct subsystem	subsys;
  57.159 +	struct list_head	children;
  57.160 +	struct list_head	devices;
  57.161 +	struct list_head	interfaces;
  57.162 +#ifdef XEN
  57.163 +	spinlock_t		sem;
  57.164 +#else
  57.165 +	struct semaphore	sem;	/* locks both the children and interfaces lists */
  57.166 +#endif
  57.167 +
  57.168 +	struct kobject		*virtual_dir;
  57.169 +
  57.170 +	struct class_attribute		* class_attrs;
  57.171 +	struct class_device_attribute	* class_dev_attrs;
  57.172 +	struct device_attribute		* dev_attrs;
  57.173 +
  57.174 +	int	(*uevent)(struct class_device *dev, char **envp,
  57.175 +			   int num_envp, char *buffer, int buffer_size);
  57.176 +	int	(*dev_uevent)(struct device *dev, char **envp, int num_envp,
  57.177 +				char *buffer, int buffer_size);
  57.178 +
  57.179 +	void	(*release)(struct class_device *dev);
  57.180 +	void	(*class_release)(struct class *class);
  57.181 +	void	(*dev_release)(struct device *dev);
  57.182 +
  57.183 +	int	(*suspend)(struct device *, pm_message_t state);
  57.184 +	int	(*resume)(struct device *);
  57.185 +};
  57.186 +
  57.187 +extern int __must_check class_register(struct class *);
  57.188 +extern void class_unregister(struct class *);
  57.189 +
  57.190 +
  57.191 +struct class_attribute {
  57.192 +#ifndef XEN
  57.193 +	struct attribute	attr;
  57.194 +#endif
  57.195 +	ssize_t (*show)(struct class *, char * buf);
  57.196 +	ssize_t (*store)(struct class *, const char * buf, size_t count);
  57.197 +};
  57.198 +
  57.199 +#define CLASS_ATTR(_name,_mode,_show,_store)			\
  57.200 +struct class_attribute class_attr_##_name = __ATTR(_name,_mode,_show,_store) 
  57.201 +
  57.202 +extern int __must_check class_create_file(struct class *,
  57.203 +					const struct class_attribute *);
  57.204 +extern void class_remove_file(struct class *, const struct class_attribute *);
  57.205 +
  57.206 +struct class_device_attribute {
  57.207 +#ifndef XEN
  57.208 +	struct attribute	attr;
  57.209 +#endif
  57.210 +	ssize_t (*show)(struct class_device *, char * buf);
  57.211 +	ssize_t (*store)(struct class_device *, const char * buf, size_t count);
  57.212 +};
  57.213 +
  57.214 +#define CLASS_DEVICE_ATTR(_name,_mode,_show,_store)		\
  57.215 +struct class_device_attribute class_device_attr_##_name = 	\
  57.216 +	__ATTR(_name,_mode,_show,_store)
  57.217 +
  57.218 +extern int __must_check class_device_create_file(struct class_device *,
  57.219 +				    const struct class_device_attribute *);
  57.220 +
  57.221 +/**
  57.222 + * struct class_device - class devices
  57.223 + * @class: pointer to the parent class for this class device.  This is required.
  57.224 + * @devt: for internal use by the driver core only.
  57.225 + * @node: for internal use by the driver core only.
  57.226 + * @kobj: for internal use by the driver core only.
  57.227 + * @devt_attr: for internal use by the driver core only.
  57.228 + * @groups: optional additional groups to be created
  57.229 + * @dev: if set, a symlink to the struct device is created in the sysfs
  57.230 + * directory for this struct class device.
  57.231 + * @class_data: pointer to whatever you want to store here for this struct
  57.232 + * class_device.  Use class_get_devdata() and class_set_devdata() to get and
  57.233 + * set this pointer.
  57.234 + * @parent: pointer to a struct class_device that is the parent of this struct
  57.235 + * class_device.  If NULL, this class_device will show up at the root of the
  57.236 + * struct class in sysfs (which is probably what you want to have happen.)
  57.237 + * @release: pointer to a release function for this struct class_device.  If
  57.238 + * set, this will be called instead of the class specific release function.
  57.239 + * Only use this if you want to override the default release function, like
  57.240 + * when you are nesting class_device structures.
  57.241 + * @uevent: pointer to a uevent function for this struct class_device.  If
  57.242 + * set, this will be called instead of the class specific uevent function.
  57.243 + * Only use this if you want to override the default uevent function, like
  57.244 + * when you are nesting class_device structures.
  57.245 + */
  57.246 +struct class_device {
  57.247 +	struct list_head	node;
  57.248 +
  57.249 +	struct kobject		kobj;
  57.250 +	struct class		* class;	/* required */
  57.251 +	dev_t			devt;		/* dev_t, creates the sysfs "dev" */
  57.252 +	struct class_device_attribute *devt_attr;
  57.253 +	struct class_device_attribute uevent_attr;
  57.254 +	struct device		* dev;		/* not necessary, but nice to have */
  57.255 +	void			* class_data;	/* class-specific data */
  57.256 +	struct class_device	*parent;	/* parent of this child device, if there is one */
  57.257 +	struct attribute_group  ** groups;	/* optional groups */
  57.258 +
  57.259 +	void	(*release)(struct class_device *dev);
  57.260 +	int	(*uevent)(struct class_device *dev, char **envp,
  57.261 +			   int num_envp, char *buffer, int buffer_size);
  57.262 +	char	class_id[BUS_ID_SIZE];	/* unique to this class */
  57.263 +};
  57.264 +
  57.265 +static inline void *
  57.266 +class_get_devdata (struct class_device *dev)
  57.267 +{
  57.268 +	return dev->class_data;
  57.269 +}
  57.270 +
  57.271 +static inline void
  57.272 +class_set_devdata (struct class_device *dev, void *data)
  57.273 +{
  57.274 +	dev->class_data = data;
  57.275 +}
  57.276 +
  57.277 +
  57.278 +extern int __must_check class_device_register(struct class_device *);
  57.279 +extern void class_device_unregister(struct class_device *);
  57.280 +extern void class_device_initialize(struct class_device *);
  57.281 +extern int __must_check class_device_add(struct class_device *);
  57.282 +extern void class_device_del(struct class_device *);
  57.283 +
  57.284 +extern int class_device_rename(struct class_device *, char *);
  57.285 +
  57.286 +extern struct class_device * class_device_get(struct class_device *);
  57.287 +extern void class_device_put(struct class_device *);
  57.288 +
  57.289 +extern void class_device_remove_file(struct class_device *, 
  57.290 +				     const struct class_device_attribute *);
  57.291 +extern int __must_check class_device_create_bin_file(struct class_device *,
  57.292 +					struct bin_attribute *);
  57.293 +extern void class_device_remove_bin_file(struct class_device *,
  57.294 +					 struct bin_attribute *);
  57.295 +
  57.296 +struct class_interface {
  57.297 +	struct list_head	node;
  57.298 +	struct class		*class;
  57.299 +
  57.300 +	int (*add)	(struct class_device *, struct class_interface *);
  57.301 +	void (*remove)	(struct class_device *, struct class_interface *);
  57.302 +	int (*add_dev)		(struct device *, struct class_interface *);
  57.303 +	void (*remove_dev)	(struct device *, struct class_interface *);
  57.304 +};
  57.305 +
  57.306 +extern int __must_check class_interface_register(struct class_interface *);
  57.307 +extern void class_interface_unregister(struct class_interface *);
  57.308 +
  57.309 +extern struct class *class_create(struct module *owner, const char *name);
  57.310 +extern void class_destroy(struct class *cls);
  57.311 +extern struct class_device *class_device_create(struct class *cls,
  57.312 +						struct class_device *parent,
  57.313 +						dev_t devt,
  57.314 +						struct device *device,
  57.315 +						const char *fmt, ...)
  57.316 +					__attribute__((format(printf,5,6)));
  57.317 +extern void class_device_destroy(struct class *cls, dev_t devt);
  57.318 +
  57.319 +/* interface for exporting device attributes */
  57.320 +struct device_attribute {
  57.321 +	struct attribute	attr;
  57.322 +	ssize_t (*show)(struct device *dev, struct device_attribute *attr,
  57.323 +			char *buf);
  57.324 +	ssize_t (*store)(struct device *dev, struct device_attribute *attr,
  57.325 +			 const char *buf, size_t count);
  57.326 +};
  57.327 +
  57.328 +#define DEVICE_ATTR(_name,_mode,_show,_store) \
  57.329 +struct device_attribute dev_attr_##_name = __ATTR(_name,_mode,_show,_store)
  57.330 +
  57.331 +extern int __must_check device_create_file(struct device *device,
  57.332 +					struct device_attribute * entry);
  57.333 +extern void device_remove_file(struct device * dev, struct device_attribute * attr);
  57.334 +extern int __must_check device_create_bin_file(struct device *dev,
  57.335 +					       struct bin_attribute *attr);
  57.336 +extern void device_remove_bin_file(struct device *dev,
  57.337 +				   struct bin_attribute *attr);
  57.338 +struct device {
  57.339 +	struct klist		klist_children;
  57.340 +	struct klist_node	knode_parent;		/* node in sibling list */
  57.341 +	struct klist_node	knode_driver;
  57.342 +	struct klist_node	knode_bus;
  57.343 +	struct device 	* parent;
  57.344 +
  57.345 +	struct kobject kobj;
  57.346 +	char	bus_id[BUS_ID_SIZE];	/* position on parent bus */
  57.347 +	unsigned		is_registered:1;
  57.348 +	struct device_attribute uevent_attr;
  57.349 +	struct device_attribute *devt_attr;
  57.350 +
  57.351 +#ifdef XEN
  57.352 +	spinlock_t		sem;
  57.353 +#else
  57.354 +	struct semaphore	sem;	/* semaphore to synchronize calls to
  57.355 +					 * its driver.
  57.356 +					 */
  57.357 +#endif
  57.358 +
  57.359 +	struct bus_type	* bus;		/* type of bus device is on */
  57.360 +	struct device_driver *driver;	/* which driver has allocated this
  57.361 +					   device */
  57.362 +	void		*driver_data;	/* data private to the driver */
  57.363 +	void		*platform_data;	/* Platform specific data, device
  57.364 +					   core doesn't touch it */
  57.365 +	void		*firmware_data; /* Firmware specific data (e.g. ACPI,
  57.366 +					   BIOS data),reserved for device core*/
  57.367 +	struct dev_pm_info	power;
  57.368 +
  57.369 +	u64		*dma_mask;	/* dma mask (if dma'able device) */
  57.370 +	u64		coherent_dma_mask;/* Like dma_mask, but for
  57.371 +					     alloc_coherent mappings as
  57.372 +					     not all hardware supports
  57.373 +					     64 bit addresses for consistent
  57.374 +					     allocations such descriptors. */
  57.375 +
  57.376 +	struct list_head	dma_pools;	/* dma pools (if dma'ble) */
  57.377 +
  57.378 +	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
  57.379 +					     override */
  57.380 +
  57.381 +	/* class_device migration path */
  57.382 +	struct list_head	node;
  57.383 +	struct class		*class;		/* optional*/
  57.384 +	dev_t			devt;		/* dev_t, creates the sysfs "dev" */
  57.385 +	struct attribute_group	**groups;	/* optional groups */
  57.386 +
  57.387 +	void	(*release)(struct device * dev);
  57.388 +};
  57.389 +
  57.390 +static inline void *
  57.391 +dev_get_drvdata (struct device *dev)
  57.392 +{
  57.393 +	return dev->driver_data;
  57.394 +}
  57.395 +
  57.396 +static inline void
  57.397 +dev_set_drvdata (struct device *dev, void *data)
  57.398 +{
  57.399 +	dev->driver_data = data;
  57.400 +}
  57.401 +
  57.402 +static inline int device_is_registered(struct device *dev)
  57.403 +{
  57.404 +	return dev->is_registered;
  57.405 +}
  57.406 +
  57.407 +/*
  57.408 + * High level routines for use by the bus drivers
  57.409 + */
  57.410 +extern int __must_check device_register(struct device * dev);
  57.411 +extern void device_unregister(struct device * dev);
  57.412 +extern void device_initialize(struct device * dev);
  57.413 +extern int __must_check device_add(struct device * dev);
  57.414 +extern void device_del(struct device * dev);
  57.415 +extern int device_for_each_child(struct device *, void *,
  57.416 +		     int (*fn)(struct device *, void *));
  57.417 +extern int device_rename(struct device *dev, char *new_name);
  57.418 +
  57.419 +/*
  57.420 + * Manual binding of a device to driver. See drivers/base/bus.c
  57.421 + * for information on use.
  57.422 + */
  57.423 +extern int __must_check device_bind_driver(struct device *dev);
  57.424 +extern void device_release_driver(struct device * dev);
  57.425 +extern int  __must_check device_attach(struct device * dev);
  57.426 +extern int __must_check driver_attach(struct device_driver *drv);
  57.427 +extern int __must_check device_reprobe(struct device *dev);
  57.428 +
  57.429 +/*
  57.430 + * Easy functions for dynamically creating devices on the fly
  57.431 + */
  57.432 +extern struct device *device_create(struct class *cls, struct device *parent,
  57.433 +				    dev_t devt, const char *fmt, ...)
  57.434 +				    __attribute__((format(printf,4,5)));
  57.435 +extern void device_destroy(struct class *cls, dev_t devt);
  57.436 +
  57.437 +extern int virtual_device_parent(struct device *dev);
  57.438 +
  57.439 +/*
  57.440 + * Platform "fixup" functions - allow the platform to have their say
  57.441 + * about devices and actions that the general device layer doesn't
  57.442 + * know about.
  57.443 + */
  57.444 +/* Notify platform of device discovery */
  57.445 +extern int (*platform_notify)(struct device * dev);
  57.446 +
  57.447 +extern int (*platform_notify_remove)(struct device * dev);
  57.448 +
  57.449 +
  57.450 +/**
  57.451 + * get_device - atomically increment the reference count for the device.
  57.452 + *
  57.453 + */
  57.454 +extern struct device * get_device(struct device * dev);
  57.455 +extern void put_device(struct device * dev);
  57.456 +
  57.457 +
  57.458 +/* drivers/base/power/shutdown.c */
  57.459 +extern void device_shutdown(void);
  57.460 +
  57.461 +
  57.462 +/* drivers/base/firmware.c */
  57.463 +extern int __must_check firmware_register(struct subsystem *);
  57.464 +extern void firmware_unregister(struct subsystem *);
  57.465 +
  57.466 +/* debugging and troubleshooting/diagnostic helpers. */
  57.467 +extern const char *dev_driver_string(struct device *dev);
  57.468 +#define dev_printk(level, dev, format, arg...)	\
  57.469 +	printk(level "%s %s: " format , dev_driver_string(dev) , (dev)->bus_id , ## arg)
  57.470 +
  57.471 +#ifdef DEBUG
  57.472 +#define dev_dbg(dev, format, arg...)		\
  57.473 +	dev_printk(KERN_DEBUG , dev , format , ## arg)
  57.474 +#else
  57.475 +#define dev_dbg(dev, format, arg...) do { (void)(dev); } while (0)
  57.476 +#endif
  57.477 +
  57.478 +#define dev_err(dev, format, arg...)		\
  57.479 +	dev_printk(KERN_ERR , dev , format , ## arg)
  57.480 +#define dev_info(dev, format, arg...)		\
  57.481 +	dev_printk(KERN_INFO , dev , format , ## arg)
  57.482 +#define dev_warn(dev, format, arg...)		\
  57.483 +	dev_printk(KERN_WARNING , dev , format , ## arg)
  57.484 +#define dev_notice(dev, format, arg...)		\
  57.485 +	dev_printk(KERN_NOTICE , dev , format , ## arg)
  57.486 +
  57.487 +/* Create alias, so I can be autoloaded. */
  57.488 +#define MODULE_ALIAS_CHARDEV(major,minor) \
  57.489 +	MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
  57.490 +#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
  57.491 +	MODULE_ALIAS("char-major-" __stringify(major) "-*")
  57.492 +#endif /* _DEVICE_H_ */
    58.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    58.2 +++ b/xen/include/asm-ia64/linux-xen/linux/kobject.h	Wed Dec 20 14:55:02 2006 -0700
    58.3 @@ -0,0 +1,286 @@
    58.4 +/*
    58.5 + * kobject.h - generic kernel object infrastructure.
    58.6 + *
    58.7 + * Copyright (c) 2002-2003	Patrick Mochel
    58.8 + * Copyright (c) 2002-2003	Open Source Development Labs
    58.9 + *
   58.10 + * This file is released under the GPLv2.
   58.11 + *
   58.12 + * 
   58.13 + * Please read Documentation/kobject.txt before using the kobject
   58.14 + * interface, ESPECIALLY the parts about reference counts and object
   58.15 + * destructors. 
   58.16 + */
   58.17 +
   58.18 +#ifndef _KOBJECT_H_
   58.19 +#define _KOBJECT_H_
   58.20 +
   58.21 +#ifdef __KERNEL__
   58.22 +
   58.23 +#include <linux/types.h>
   58.24 +#include <linux/list.h>
   58.25 +#include <linux/sysfs.h>
   58.26 +#include <linux/compiler.h>
   58.27 +#include <linux/spinlock.h>
   58.28 +#include <linux/rwsem.h>
   58.29 +#include <linux/kref.h>
   58.30 +#include <linux/kernel.h>
   58.31 +#include <linux/wait.h>
   58.32 +#include <asm/atomic.h>
   58.33 +
   58.34 +#define KOBJ_NAME_LEN			20
   58.35 +#define UEVENT_HELPER_PATH_LEN		256
   58.36 +
   58.37 +/* path to the userspace helper executed on an event */
   58.38 +extern char uevent_helper[];
   58.39 +
   58.40 +/* counter to tag the uevent, read only except for the kobject core */
   58.41 +extern u64 uevent_seqnum;
   58.42 +
   58.43 +/* the actions here must match the proper string in lib/kobject_uevent.c */
   58.44 +typedef int __bitwise kobject_action_t;
   58.45 +enum kobject_action {
   58.46 +	KOBJ_ADD	= (__force kobject_action_t) 0x01,	/* exclusive to core */
   58.47 +	KOBJ_REMOVE	= (__force kobject_action_t) 0x02,	/* exclusive to core */
   58.48 +	KOBJ_CHANGE	= (__force kobject_action_t) 0x03,	/* device state change */
   58.49 +	KOBJ_MOUNT	= (__force kobject_action_t) 0x04,	/* mount event for block devices (broken) */
   58.50 +	KOBJ_UMOUNT	= (__force kobject_action_t) 0x05,	/* umount event for block devices (broken) */
   58.51 +	KOBJ_OFFLINE	= (__force kobject_action_t) 0x06,	/* device offline */
   58.52 +	KOBJ_ONLINE	= (__force kobject_action_t) 0x07,	/* device online */
   58.53 +};
   58.54 +
   58.55 +struct kobject {
   58.56 +	const char		* k_name;
   58.57 +	char			name[KOBJ_NAME_LEN];
   58.58 +	struct kref		kref;
   58.59 +	struct list_head	entry;
   58.60 +	struct kobject		* parent;
   58.61 +	struct kset		* kset;
   58.62 +	struct kobj_type	* ktype;
   58.63 +	struct dentry		* dentry;
   58.64 +	wait_queue_head_t	poll;
   58.65 +};
   58.66 +
   58.67 +extern int kobject_set_name(struct kobject *, const char *, ...)
   58.68 +	__attribute__((format(printf,2,3)));
   58.69 +
   58.70 +static inline const char * kobject_name(const struct kobject * kobj)
   58.71 +{
   58.72 +	return kobj->k_name;
   58.73 +}
   58.74 +
   58.75 +extern void kobject_init(struct kobject *);
   58.76 +extern void kobject_cleanup(struct kobject *);
   58.77 +
   58.78 +extern int __must_check kobject_add(struct kobject *);
   58.79 +extern void kobject_del(struct kobject *);
   58.80 +
   58.81 +extern int __must_check kobject_rename(struct kobject *, const char *new_name);
   58.82 +
   58.83 +extern int __must_check kobject_register(struct kobject *);
   58.84 +extern void kobject_unregister(struct kobject *);
   58.85 +
   58.86 +extern struct kobject * kobject_get(struct kobject *);
   58.87 +extern void kobject_put(struct kobject *);
   58.88 +
   58.89 +extern struct kobject *kobject_add_dir(struct kobject *, const char *);
   58.90 +
   58.91 +extern char * kobject_get_path(struct kobject *, gfp_t);
   58.92 +
   58.93 +struct kobj_type {
   58.94 +	void (*release)(struct kobject *);
   58.95 +	struct sysfs_ops	* sysfs_ops;
   58.96 +	struct attribute	** default_attrs;
   58.97 +};
   58.98 +
   58.99 +
  58.100 +/**
  58.101 + *	kset - a set of kobjects of a specific type, belonging
  58.102 + *	to a specific subsystem.
  58.103 + *
  58.104 + *	All kobjects of a kset should be embedded in an identical 
  58.105 + *	type. This type may have a descriptor, which the kset points
  58.106 + *	to. This allows there to exist sets of objects of the same
  58.107 + *	type in different subsystems.
  58.108 + *
  58.109 + *	A subsystem does not have to be a list of only one type 
  58.110 + *	of object; multiple ksets can belong to one subsystem. All 
  58.111 + *	ksets of a subsystem share the subsystem's lock.
  58.112 + *
  58.113 + *	Each kset can support specific event variables; it can
  58.114 + *	supress the event generation or add subsystem specific
  58.115 + *	variables carried with the event.
  58.116 + */
  58.117 +struct kset_uevent_ops {
  58.118 +	int (*filter)(struct kset *kset, struct kobject *kobj);
  58.119 +	const char *(*name)(struct kset *kset, struct kobject *kobj);
  58.120 +	int (*uevent)(struct kset *kset, struct kobject *kobj, char **envp,
  58.121 +			int num_envp, char *buffer, int buffer_size);
  58.122 +};
  58.123 +
  58.124 +struct kset {
  58.125 +	struct subsystem	* subsys;
  58.126 +	struct kobj_type	* ktype;
  58.127 +	struct list_head	list;
  58.128 +	spinlock_t		list_lock;
  58.129 +	struct kobject		kobj;
  58.130 +	struct kset_uevent_ops	* uevent_ops;
  58.131 +};
  58.132 +
  58.133 +
  58.134 +extern void kset_init(struct kset * k);
  58.135 +extern int __must_check kset_add(struct kset * k);
  58.136 +extern int __must_check kset_register(struct kset * k);
  58.137 +extern void kset_unregister(struct kset * k);
  58.138 +
  58.139 +static inline struct kset * to_kset(struct kobject * kobj)
  58.140 +{
  58.141 +	return kobj ? container_of(kobj,struct kset,kobj) : NULL;
  58.142 +}
  58.143 +
  58.144 +static inline struct kset * kset_get(struct kset * k)
  58.145 +{
  58.146 +	return k ? to_kset(kobject_get(&k->kobj)) : NULL;
  58.147 +}
  58.148 +
  58.149 +static inline void kset_put(struct kset * k)
  58.150 +{
  58.151 +	kobject_put(&k->kobj);
  58.152 +}
  58.153 +
  58.154 +static inline struct kobj_type * get_ktype(struct kobject * k)
  58.155 +{
  58.156 +	if (k->kset && k->kset->ktype)
  58.157 +		return k->kset->ktype;
  58.158 +	else 
  58.159 +		return k->ktype;
  58.160 +}
  58.161 +
  58.162 +extern struct kobject * kset_find_obj(struct kset *, const char *);
  58.163 +
  58.164 +
  58.165 +/**
  58.166 + * Use this when initializing an embedded kset with no other 
  58.167 + * fields to initialize.
  58.168 + */
  58.169 +#define set_kset_name(str)	.kset = { .kobj = { .name = str } }
  58.170 +
  58.171 +
  58.172 +
  58.173 +struct subsystem {
  58.174 +	struct kset		kset;
  58.175 +#ifndef XEN
  58.176 +	struct rw_semaphore	rwsem;
  58.177 +#endif
  58.178 +};
  58.179 +
  58.180 +#define decl_subsys(_name,_type,_uevent_ops) \
  58.181 +struct subsystem _name##_subsys = { \
  58.182 +	.kset = { \
  58.183 +		.kobj = { .name = __stringify(_name) }, \
  58.184 +		.ktype = _type, \
  58.185 +		.uevent_ops =_uevent_ops, \
  58.186 +	} \
  58.187 +}
  58.188 +#define decl_subsys_name(_varname,_name,_type,_uevent_ops) \
  58.189 +struct subsystem _varname##_subsys = { \
  58.190 +	.kset = { \
  58.191 +		.kobj = { .name = __stringify(_name) }, \
  58.192 +		.ktype = _type, \
  58.193 +		.uevent_ops =_uevent_ops, \
  58.194 +	} \
  58.195 +}
  58.196 +
  58.197 +/* The global /sys/kernel/ subsystem for people to chain off of */
  58.198 +extern struct subsystem kernel_subsys;
  58.199 +/* The global /sys/hypervisor/ subsystem  */
  58.200 +extern struct subsystem hypervisor_subsys;
  58.201 +
  58.202 +/**
  58.203 + * Helpers for setting the kset of registered objects.
  58.204 + * Often, a registered object belongs to a kset embedded in a 
  58.205 + * subsystem. These do no magic, just make the resulting code
  58.206 + * easier to follow. 
  58.207 + */
  58.208 +
  58.209 +/**
  58.210 + *	kobj_set_kset_s(obj,subsys) - set kset for embedded kobject.
  58.211 + *	@obj:		ptr to some object type.
  58.212 + *	@subsys:	a subsystem object (not a ptr).
  58.213 + *
  58.214 + *	Can be used for any object type with an embedded ->kobj.
  58.215 + */
  58.216 +
  58.217 +#define kobj_set_kset_s(obj,subsys) \
  58.218 +	(obj)->kobj.kset = &(subsys).kset
  58.219 +
  58.220 +/**
  58.221 + *	kset_set_kset_s(obj,subsys) - set kset for embedded kset.
  58.222 + *	@obj:		ptr to some object type.
  58.223 + *	@subsys:	a subsystem object (not a ptr).
  58.224 + *
  58.225 + *	Can be used for any object type with an embedded ->kset.
  58.226 + *	Sets the kset of @obj's  embedded kobject (via its embedded
  58.227 + *	kset) to @subsys.kset. This makes @obj a member of that 
  58.228 + *	kset.
  58.229 + */
  58.230 +
  58.231 +#define kset_set_kset_s(obj,subsys) \
  58.232 +	(obj)->kset.kobj.kset = &(subsys).kset
  58.233 +
  58.234 +/**
  58.235 + *	subsys_set_kset(obj,subsys) - set kset for subsystem
  58.236 + *	@obj:		ptr to some object type.
  58.237 + *	@subsys:	a subsystem object (not a ptr).
  58.238 + *
  58.239 + *	Can be used for any object type with an embedded ->subsys.
  58.240 + *	Sets the kset of @obj's kobject to @subsys.kset. This makes
  58.241 + *	the object a member of that kset.
  58.242 + */
  58.243 +
  58.244 +#define subsys_set_kset(obj,_subsys) \
  58.245 +	(obj)->subsys.kset.kobj.kset = &(_subsys).kset
  58.246 +
  58.247 +extern void subsystem_init(struct subsystem *);
  58.248 +extern int __must_check subsystem_register(struct subsystem *);
  58.249 +extern void subsystem_unregister(struct subsystem *);
  58.250 +
  58.251 +static inline struct subsystem * subsys_get(struct subsystem * s)
  58.252 +{
  58.253 +	return s ? container_of(kset_get(&s->kset),struct subsystem,kset) : NULL;
  58.254 +}
  58.255 +
  58.256 +static inline void subsys_put(struct subsystem * s)
  58.257 +{
  58.258 +	kset_put(&s->kset);
  58.259 +}
  58.260 +
  58.261 +struct subsys_attribute {
  58.262 +#ifndef XEN
  58.263 +	struct attribute attr;
  58.264 +#endif
  58.265 +	ssize_t (*show)(struct subsystem *, char *);
  58.266 +	ssize_t (*store)(struct subsystem *, const char *, size_t); 
  58.267 +};
  58.268 +
  58.269 +extern int __must_check subsys_create_file(struct subsystem * ,
  58.270 +					struct subsys_attribute *);
  58.271 +
  58.272 +#if defined(CONFIG_HOTPLUG)
  58.273 +void kobject_uevent(struct kobject *kobj, enum kobject_action action);
  58.274 +
  58.275 +int add_uevent_var(char **envp, int num_envp, int *cur_index,
  58.276 +			char *buffer, int buffer_size, int *cur_len,
  58.277 +			const char *format, ...)
  58.278 +	__attribute__((format (printf, 7, 8)));
  58.279 +#else
  58.280 +static inline void kobject_uevent(struct kobject *kobj, enum kobject_action action) { }
  58.281 +
  58.282 +static inline int add_uevent_var(char **envp, int num_envp, int *cur_index,
  58.283 +				      char *buffer, int buffer_size, int *cur_len, 
  58.284 +				      const char *format, ...)
  58.285 +{ return 0; }
  58.286 +#endif
  58.287 +
  58.288 +#endif /* __KERNEL__ */
  58.289 +#endif /* _KOBJECT_H_ */
    59.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    59.2 +++ b/xen/include/asm-ia64/linux-xen/linux/pci.h	Wed Dec 20 14:55:02 2006 -0700
    59.3 @@ -0,0 +1,820 @@
    59.4 +/*
    59.5 + *	pci.h
    59.6 + *
    59.7 + *	PCI defines and function prototypes
    59.8 + *	Copyright 1994, Drew Eckhardt
    59.9 + *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
   59.10 + *
   59.11 + *	For more information, please consult the following manuals (look at
   59.12 + *	http://www.pcisig.com/ for how to get them):
   59.13 + *
   59.14 + *	PCI BIOS Specification
   59.15 + *	PCI Local Bus Specification
   59.16 + *	PCI to PCI Bridge Specification
   59.17 + *	PCI System Design Guide
   59.18 + */
   59.19 +
   59.20 +#ifndef LINUX_PCI_H
   59.21 +#define LINUX_PCI_H
   59.22 +
   59.23 +/* Include the pci register defines */
   59.24 +#include <linux/pci_regs.h>
   59.25 +
   59.26 +/* Include the ID list */
   59.27 +#include <linux/pci_ids.h>
   59.28 +#ifdef XEN
   59.29 +#include <asm/processor.h>
   59.30 +#endif
   59.31 +
   59.32 +/*
   59.33 + * The PCI interface treats multi-function devices as independent
   59.34 + * devices.  The slot/function address of each device is encoded
   59.35 + * in a single byte as follows:
   59.36 + *
   59.37 + *	7:3 = slot
   59.38 + *	2:0 = function
   59.39 + */
   59.40 +#define PCI_DEVFN(slot,func)	((((slot) & 0x1f) << 3) | ((func) & 0x07))
   59.41 +#define PCI_SLOT(devfn)		(((devfn) >> 3) & 0x1f)
   59.42 +#define PCI_FUNC(devfn)		((devfn) & 0x07)
   59.43 +
   59.44 +/* Ioctls for /proc/bus/pci/X/Y nodes. */
   59.45 +#define PCIIOC_BASE		('P' << 24 | 'C' << 16 | 'I' << 8)
   59.46 +#define PCIIOC_CONTROLLER	(PCIIOC_BASE | 0x00)	/* Get controller for PCI device. */
   59.47 +#define PCIIOC_MMAP_IS_IO	(PCIIOC_BASE | 0x01)	/* Set mmap state to I/O space. */
   59.48 +#define PCIIOC_MMAP_IS_MEM	(PCIIOC_BASE | 0x02)	/* Set mmap state to MEM space. */
   59.49 +#define PCIIOC_WRITE_COMBINE	(PCIIOC_BASE | 0x03)	/* Enable/disable write-combining. */
   59.50 +
   59.51 +#ifdef __KERNEL__
   59.52 +
   59.53 +#include <linux/mod_devicetable.h>
   59.54 +
   59.55 +#include <linux/types.h>
   59.56 +#include <linux/ioport.h>
   59.57 +#include <linux/list.h>
   59.58 +#include <linux/compiler.h>
   59.59 +#include <linux/errno.h>
   59.60 +#include <linux/device.h>
   59.61 +
   59.62 +/* File state for mmap()s on /proc/bus/pci/X/Y */
   59.63 +enum pci_mmap_state {
   59.64 +	pci_mmap_io,
   59.65 +	pci_mmap_mem
   59.66 +};
   59.67 +
   59.68 +/* This defines the direction arg to the DMA mapping routines. */
   59.69 +#define PCI_DMA_BIDIRECTIONAL	0
   59.70 +#define PCI_DMA_TODEVICE	1
   59.71 +#define PCI_DMA_FROMDEVICE	2
   59.72 +#define PCI_DMA_NONE		3
   59.73 +
   59.74 +#define DEVICE_COUNT_COMPATIBLE	4
   59.75 +#define DEVICE_COUNT_RESOURCE	12
   59.76 +
   59.77 +typedef int __bitwise pci_power_t;
   59.78 +
   59.79 +#define PCI_D0		((pci_power_t __force) 0)
   59.80 +#define PCI_D1		((pci_power_t __force) 1)
   59.81 +#define PCI_D2		((pci_power_t __force) 2)
   59.82 +#define PCI_D3hot	((pci_power_t __force) 3)
   59.83 +#define PCI_D3cold	((pci_power_t __force) 4)
   59.84 +#define PCI_UNKNOWN	((pci_power_t __force) 5)
   59.85 +#define PCI_POWER_ERROR	((pci_power_t __force) -1)
   59.86 +
   59.87 +/** The pci_channel state describes connectivity between the CPU and
   59.88 + *  the pci device.  If some PCI bus between here and the pci device
   59.89 + *  has crashed or locked up, this info is reflected here.
   59.90 + */
   59.91 +typedef unsigned int __bitwise pci_channel_state_t;
   59.92 +
   59.93 +enum pci_channel_state {
   59.94 +	/* I/O channel is in normal state */
   59.95 +	pci_channel_io_normal = (__force pci_channel_state_t) 1,
   59.96 +
   59.97 +	/* I/O to channel is blocked */
   59.98 +	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
   59.99 +
  59.100 +	/* PCI card is dead */
  59.101 +	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
  59.102 +};
  59.103 +
  59.104 +typedef unsigned short __bitwise pci_bus_flags_t;
  59.105 +enum pci_bus_flags {
  59.106 +	PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
  59.107 +};
  59.108 +
  59.109 +struct pci_cap_saved_state {
  59.110 +	struct hlist_node next;
  59.111 +	char cap_nr;
  59.112 +	u32 data[0];
  59.113 +};
  59.114 +
  59.115 +/*
  59.116 + * The pci_dev structure is used to describe PCI devices.
  59.117 + */
  59.118 +struct pci_dev {
  59.119 +	struct list_head global_list;	/* node in list of all PCI devices */
  59.120 +	struct list_head bus_list;	/* node in per-bus list */
  59.121 +	struct pci_bus	*bus;		/* bus this device is on */
  59.122 +	struct pci_bus	*subordinate;	/* bus this device bridges to */
  59.123 +
  59.124 +	void		*sysdata;	/* hook for sys-specific extension */
  59.125 +	struct proc_dir_entry *procent;	/* device entry in /proc/bus/pci */
  59.126 +
  59.127 +	unsigned int	devfn;		/* encoded device & function index */
  59.128 +	unsigned short	vendor;
  59.129 +	unsigned short	device;
  59.130 +	unsigned short	subsystem_vendor;
  59.131 +	unsigned short	subsystem_device;
  59.132 +	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
  59.133 +	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
  59.134 +	u8		rom_base_reg;	/* which config register controls the ROM */
  59.135 +	u8		pin;  		/* which interrupt pin this device uses */
  59.136 +
  59.137 +	struct pci_driver *driver;	/* which driver has allocated this device */
  59.138 +	u64		dma_mask;	/* Mask of the bits of bus address this
  59.139 +					   device implements.  Normally this is
  59.140 +					   0xffffffff.  You only need to change
  59.141 +					   this if your device has broken DMA
  59.142 +					   or supports 64-bit transfers.  */
  59.143 +
  59.144 +	pci_power_t     current_state;  /* Current operating state. In ACPI-speak,
  59.145 +					   this is D0-D3, D0 being fully functional,
  59.146 +					   and D3 being off. */
  59.147 +
  59.148 +	pci_channel_state_t error_state;	/* current connectivity state */
  59.149 +	struct	device	dev;		/* Generic device interface */
  59.150 +
  59.151 +	/* device is compatible with these IDs */
  59.152 +	unsigned short vendor_compatible[DEVICE_COUNT_COMPATIBLE];
  59.153 +	unsigned short device_compatible[DEVICE_COUNT_COMPATIBLE];
  59.154 +
  59.155 +	int		cfg_size;	/* Size of configuration space */
  59.156 +
  59.157 +	/*
  59.158 +	 * Instead of touching interrupt line and base address registers
  59.159 +	 * directly, use the values stored here. They might be different!
  59.160 +	 */
  59.161 +	unsigned int	irq;
  59.162 +	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
  59.163 +
  59.164 +	/* These fields are used by common fixups */
  59.165 +	unsigned int	transparent:1;	/* Transparent PCI bridge */
  59.166 +	unsigned int	multifunction:1;/* Part of multi-function device */
  59.167 +	/* keep track of device state */
  59.168 +	unsigned int	is_enabled:1;	/* pci_enable_device has been called */
  59.169 +	unsigned int	is_busmaster:1; /* device is busmaster */
  59.170 +	unsigned int	no_msi:1;	/* device may not use msi */
  59.171 +	unsigned int	no_d1d2:1;   /* only allow d0 or d3 */
  59.172 +	unsigned int	block_ucfg_access:1;	/* userspace config space access is blocked */
  59.173 +	unsigned int	broken_parity_status:1;	/* Device generates false positive parity */
  59.174 +	unsigned int 	msi_enabled:1;
  59.175 +	unsigned int	msix_enabled:1;
  59.176 +
  59.177 +	u32		saved_config_space[16]; /* config space saved at suspend time */
  59.178 +	struct hlist_head saved_cap_space;
  59.179 +	struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
  59.180 +	int rom_attr_enabled;		/* has display of the rom attribute been enabled? */
  59.181 +	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
  59.182 +};
  59.183 +
  59.184 +#define pci_dev_g(n) list_entry(n, struct pci_dev, global_list)
  59.185 +#define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list)
  59.186 +#define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
  59.187 +#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
  59.188 +
  59.189 +static inline struct pci_cap_saved_state *pci_find_saved_cap(
  59.190 +	struct pci_dev *pci_dev,char cap)
  59.191 +{
  59.192 +	struct pci_cap_saved_state *tmp;
  59.193 +	struct hlist_node *pos;
  59.194 +
  59.195 +	hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
  59.196 +		if (tmp->cap_nr == cap)
  59.197 +			return tmp;
  59.198 +	}
  59.199 +	return NULL;
  59.200 +}
  59.201 +
  59.202 +static inline void pci_add_saved_cap(struct pci_dev *pci_dev,
  59.203 +	struct pci_cap_saved_state *new_cap)
  59.204 +{
  59.205 +	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
  59.206 +}
  59.207 +
  59.208 +static inline void pci_remove_saved_cap(struct pci_cap_saved_state *cap)
  59.209 +{
  59.210 +	hlist_del(&cap->next);
  59.211 +}
  59.212 +
  59.213 +/*
  59.214 + *  For PCI devices, the region numbers are assigned this way:
  59.215 + *
  59.216 + *	0-5	standard PCI regions
  59.217 + *	6	expansion ROM
  59.218 + *	7-10	bridges: address space assigned to buses behind the bridge
  59.219 + */
  59.220 +
  59.221 +#define PCI_ROM_RESOURCE	6
  59.222 +#define PCI_BRIDGE_RESOURCES	7
  59.223 +#define PCI_NUM_RESOURCES	11
  59.224 +
  59.225 +#ifndef PCI_BUS_NUM_RESOURCES
  59.226 +#define PCI_BUS_NUM_RESOURCES	8
  59.227 +#endif
  59.228 +
  59.229 +#define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
  59.230 +
  59.231 +struct pci_bus {
  59.232 +	struct list_head node;		/* node in list of buses */
  59.233 +	struct pci_bus	*parent;	/* parent bus this bridge is on */
  59.234 +	struct list_head children;	/* list of child buses */
  59.235 +	struct list_head devices;	/* list of devices on this bus */
  59.236 +	struct pci_dev	*self;		/* bridge device as seen by parent */
  59.237 +	struct resource	*resource[PCI_BUS_NUM_RESOURCES];
  59.238 +					/* address space routed to this bus */
  59.239 +
  59.240 +	struct pci_ops	*ops;		/* configuration access functions */
  59.241 +	void		*sysdata;	/* hook for sys-specific extension */
  59.242 +	struct proc_dir_entry *procdir;	/* directory entry in /proc/bus/pci */
  59.243 +
  59.244 +	unsigned char	number;		/* bus number */
  59.245 +	unsigned char	primary;	/* number of primary bridge */
  59.246 +	unsigned char	secondary;	/* number of secondary bridge */
  59.247 +	unsigned char	subordinate;	/* max number of subordinate buses */
  59.248 +
  59.249 +	char		name[48];
  59.250 +
  59.251 +	unsigned short  bridge_ctl;	/* manage NO_ISA/FBB/et al behaviors */
  59.252 +	pci_bus_flags_t bus_flags;	/* Inherited by child busses */
  59.253 +	struct device		*bridge;
  59.254 +	struct class_device	class_dev;
  59.255 +	struct bin_attribute	*legacy_io; /* legacy I/O for this bus */
  59.256 +	struct bin_attribute	*legacy_mem; /* legacy mem */
  59.257 +};
  59.258 +
  59.259 +#define pci_bus_b(n)	list_entry(n, struct pci_bus, node)
  59.260 +#define to_pci_bus(n)	container_of(n, struct pci_bus, class_dev)
  59.261 +
  59.262 +/*
  59.263 + * Error values that may be returned by PCI functions.
  59.264 + */
  59.265 +#define PCIBIOS_SUCCESSFUL		0x00
  59.266 +#define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
  59.267 +#define PCIBIOS_BAD_VENDOR_ID		0x83
  59.268 +#define PCIBIOS_DEVICE_NOT_FOUND	0x86
  59.269 +#define PCIBIOS_BAD_REGISTER_NUMBER	0x87
  59.270 +#define PCIBIOS_SET_FAILED		0x88
  59.271 +#define PCIBIOS_BUFFER_TOO_SMALL	0x89
  59.272 +
  59.273 +/* Low-level architecture-dependent routines */
  59.274 +
  59.275 +struct pci_ops {
  59.276 +	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
  59.277 +	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
  59.278 +};
  59.279 +
  59.280 +struct pci_raw_ops {
  59.281 +	int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
  59.282 +		    int reg, int len, u32 *val);
  59.283 +	int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
  59.284 +		     int reg, int len, u32 val);
  59.285 +};
  59.286 +
  59.287 +extern struct pci_raw_ops *raw_pci_ops;
  59.288 +
  59.289 +struct pci_bus_region {
  59.290 +	unsigned long start;
  59.291 +	unsigned long end;
  59.292 +};
  59.293 +
  59.294 +struct pci_dynids {
  59.295 +	spinlock_t lock;            /* protects list, index */
  59.296 +	struct list_head list;      /* for IDs added at runtime */
  59.297 +	unsigned int use_driver_data:1; /* pci_driver->driver_data is used */
  59.298 +};
  59.299 +
  59.300 +/* ---------------------------------------------------------------- */
  59.301 +/** PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
  59.302 + *  a set fof callbacks in struct pci_error_handlers, then that device driver
  59.303 + *  will be notified of PCI bus errors, and will be driven to recovery
  59.304 + *  when an error occurs.
  59.305 + */
  59.306 +
  59.307 +typedef unsigned int __bitwise pci_ers_result_t;
  59.308 +
  59.309 +enum pci_ers_result {
  59.310 +	/* no result/none/not supported in device driver */
  59.311 +	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
  59.312 +
  59.313 +	/* Device driver can recover without slot reset */
  59.314 +	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
  59.315 +
  59.316 +	/* Device driver wants slot to be reset. */
  59.317 +	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
  59.318 +
  59.319 +	/* Device has completely failed, is unrecoverable */
  59.320 +	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
  59.321 +
  59.322 +	/* Device driver is fully recovered and operational */
  59.323 +	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
  59.324 +};
  59.325 +
  59.326 +/* PCI bus error event callbacks */
  59.327 +struct pci_error_handlers
  59.328 +{
  59.329 +	/* PCI bus error detected on this device */
  59.330 +	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
  59.331 +	                      enum pci_channel_state error);
  59.332 +
  59.333 +	/* MMIO has been re-enabled, but not DMA */
  59.334 +	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
  59.335 +
  59.336 +	/* PCI Express link has been reset */
  59.337 +	pci_ers_result_t (*link_reset)(struct pci_dev *dev);
  59.338 +
  59.339 +	/* PCI slot has been reset */
  59.340 +	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
  59.341 +
  59.342 +	/* Device driver may resume normal operations */
  59.343 +	void (*resume)(struct pci_dev *dev);
  59.344 +};
  59.345 +
  59.346 +/* ---------------------------------------------------------------- */
  59.347 +
  59.348 +struct module;
  59.349 +struct pci_driver {
  59.350 +	struct list_head node;
  59.351 +	char *name;
  59.352 +	const struct pci_device_id *id_table;	/* must be non-NULL for probe to be called */
  59.353 +	int  (*probe)  (struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
  59.354 +	void (*remove) (struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
  59.355 +	int  (*suspend) (struct pci_dev *dev, pm_message_t state);	/* Device suspended */
  59.356 +	int  (*suspend_late) (struct pci_dev *dev, pm_message_t state);
  59.357 +	int  (*resume_early) (struct pci_dev *dev);
  59.358 +	int  (*resume) (struct pci_dev *dev);	                /* Device woken up */
  59.359 +	int  (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable);   /* Enable wake event */
  59.360 +	void (*shutdown) (struct pci_dev *dev);
  59.361 +
  59.362 +	struct pci_error_handlers *err_handler;
  59.363 +	struct device_driver	driver;
  59.364 +	struct pci_dynids dynids;
  59.365 +
  59.366 +	int multithread_probe;
  59.367 +};
  59.368 +
  59.369 +#define	to_pci_driver(drv) container_of(drv,struct pci_driver, driver)
  59.370 +
  59.371 +/**
  59.372 + * PCI_DEVICE - macro used to describe a specific pci device
  59.373 + * @vend: the 16 bit PCI Vendor ID
  59.374 + * @dev: the 16 bit PCI Device ID
  59.375 + *
  59.376 + * This macro is used to create a struct pci_device_id that matches a
  59.377 + * specific device.  The subvendor and subdevice fields will be set to
  59.378 + * PCI_ANY_ID.
  59.379 + */
  59.380 +#define PCI_DEVICE(vend,dev) \
  59.381 +	.vendor = (vend), .device = (dev), \
  59.382 +	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
  59.383 +
  59.384 +/**
  59.385 + * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
  59.386 + * @dev_class: the class, subclass, prog-if triple for this device
  59.387 + * @dev_class_mask: the class mask for this device
  59.388 + *
  59.389 + * This macro is used to create a struct pci_device_id that matches a
  59.390 + * specific PCI class.  The vendor, device, subvendor, and subdevice
  59.391 + * fields will be set to PCI_ANY_ID.
  59.392 + */
  59.393 +#define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
  59.394 +	.class = (dev_class), .class_mask = (dev_class_mask), \
  59.395 +	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
  59.396 +	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
  59.397 +
  59.398 +/*
  59.399 + * pci_module_init is obsolete, this stays here till we fix up all usages of it
  59.400 + * in the tree.
  59.401 + */
  59.402 +#define pci_module_init	pci_register_driver
  59.403 +
  59.404 +/* these external functions are only available when PCI support is enabled */
  59.405 +#ifdef CONFIG_PCI
  59.406 +
  59.407 +extern struct bus_type pci_bus_type;
  59.408 +
  59.409 +/* Do NOT directly access these two variables, unless you are arch specific pci
  59.410 + * code, or pci core code. */
  59.411 +extern struct list_head pci_root_buses;	/* list of all known PCI buses */
  59.412 +extern struct list_head pci_devices;	/* list of all devices */
  59.413 +
  59.414 +void pcibios_fixup_bus(struct pci_bus *);
  59.415 +int __must_check pcibios_enable_device(struct pci_dev *, int mask);
  59.416 +char *pcibios_setup (char *str);
  59.417 +
  59.418 +/* Used only when drivers/pci/setup.c is used */
  59.419 +void pcibios_align_resource(void *, struct resource *, resource_size_t,
  59.420 +				resource_size_t);
  59.421 +void pcibios_update_irq(struct pci_dev *, int irq);
  59.422 +
  59.423 +/* Generic PCI functions used internally */
  59.424 +
  59.425 +extern struct pci_bus *pci_find_bus(int domain, int busnr);
  59.426 +void pci_bus_add_devices(struct pci_bus *bus);
  59.427 +struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata);
  59.428 +static inline struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata)
  59.429 +{
  59.430 +	struct pci_bus *root_bus;
  59.431 +	root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata);
  59.432 +	if (root_bus)
  59.433 +		pci_bus_add_devices(root_bus);
  59.434 +	return root_bus;
  59.435 +}
  59.436 +struct pci_bus *pci_create_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata);
  59.437 +struct pci_bus * pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr);
  59.438 +int pci_scan_slot(struct pci_bus *bus, int devfn);
  59.439 +struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn);
  59.440 +void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
  59.441 +unsigned int pci_scan_child_bus(struct pci_bus *bus);
  59.442 +int __must_check pci_bus_add_device(struct pci_dev *dev);
  59.443 +void pci_read_bridge_bases(struct pci_bus *child);
  59.444 +struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res);
  59.445 +int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
  59.446 +extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
  59.447 +extern void pci_dev_put(struct pci_dev *dev);
  59.448 +extern void pci_remove_bus(struct pci_bus *b);
  59.449 +extern void pci_remove_bus_device(struct pci_dev *dev);
  59.450 +extern void pci_stop_bus_device(struct pci_dev *dev);
  59.451 +void pci_setup_cardbus(struct pci_bus *bus);
  59.452 +extern void pci_sort_breadthfirst(void);
  59.453 +
  59.454 +/* Generic PCI functions exported to card drivers */
  59.455 +
  59.456 +struct pci_dev *pci_find_device (unsigned int vendor, unsigned int device, const struct pci_dev *from);
  59.457 +struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int device, const struct pci_dev *from);
  59.458 +struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn);
  59.459 +int pci_find_capability (struct pci_dev *dev, int cap);
  59.460 +int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap);
  59.461 +int pci_find_ext_capability (struct pci_dev *dev, int cap);
  59.462 +struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
  59.463 +
  59.464 +struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
  59.465 +				struct pci_dev *from);
  59.466 +struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device,
  59.467 +				struct pci_dev *from);
  59.468 +
  59.469 +struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device,
  59.470 +				unsigned int ss_vendor, unsigned int ss_device,
  59.471 +				struct pci_dev *from);
  59.472 +struct pci_dev *pci_get_slot (struct pci_bus *bus, unsigned int devfn);
  59.473 +struct pci_dev *pci_get_bus_and_slot (unsigned int bus, unsigned int devfn);
  59.474 +struct pci_dev *pci_get_class (unsigned int class, struct pci_dev *from);
  59.475 +int pci_dev_present(const struct pci_device_id *ids);
  59.476 +
  59.477 +int pci_bus_read_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 *val);
  59.478 +int pci_bus_read_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 *val);
  59.479 +int pci_bus_read_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 *val);
  59.480 +int pci_bus_write_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 val);
  59.481 +int pci_bus_write_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 val);
  59.482 +int pci_bus_write_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 val);
  59.483 +
  59.484 +static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val)
  59.485 +{
  59.486 +	return pci_bus_read_config_byte (dev->bus, dev->devfn, where, val);
  59.487 +}
  59.488 +static inline int pci_read_config_word(struct pci_dev *dev, int where, u16 *val)
  59.489 +{
  59.490 +	return pci_bus_read_config_word (dev->bus, dev->devfn, where, val);
  59.491 +}
  59.492 +static inline int pci_read_config_dword(struct pci_dev *dev, int where, u32 *val)
  59.493 +{
  59.494 +	return pci_bus_read_config_dword (dev->bus, dev->devfn, where, val);
  59.495 +}
  59.496 +static inline int pci_write_config_byte(struct pci_dev *dev, int where, u8 val)
  59.497 +{
  59.498 +	return pci_bus_write_config_byte (dev->bus, dev->devfn, where, val);
  59.499 +}
  59.500 +static inline int pci_write_config_word(struct pci_dev *dev, int where, u16 val)
  59.501 +{
  59.502 +	return pci_bus_write_config_word (dev->bus, dev->devfn, where, val);
  59.503 +}
  59.504 +static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 val)
  59.505 +{
  59.506 +	return pci_bus_write_config_dword (dev->bus, dev->devfn, where, val);
  59.507 +}
  59.508 +
  59.509 +int __must_check pci_enable_device(struct pci_dev *dev);
  59.510 +int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask);
  59.511 +void pci_disable_device(struct pci_dev *dev);
  59.512 +void pci_set_master(struct pci_dev *dev);
  59.513 +#define HAVE_PCI_SET_MWI
  59.514 +int __must_check pci_set_mwi(struct pci_dev *dev);
  59.515 +void pci_clear_mwi(struct pci_dev *dev);
  59.516 +void pci_intx(struct pci_dev *dev, int enable);
  59.517 +int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
  59.518 +int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask);
  59.519 +void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
  59.520 +int __must_check pci_assign_resource(struct pci_dev *dev, int i);
  59.521 +int __must_check pci_assign_resource_fixed(struct pci_dev *dev, int i);
  59.522 +void pci_restore_bars(struct pci_dev *dev);
  59.523 +
  59.524 +/* ROM control related routines */
  59.525 +void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
  59.526 +void __iomem __must_check *pci_map_rom_copy(struct pci_dev *pdev, size_t *size);
  59.527 +void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
  59.528 +void pci_remove_rom(struct pci_dev *pdev);
  59.529 +
  59.530 +/* Power management related routines */
  59.531 +int pci_save_state(struct pci_dev *dev);
  59.532 +int pci_restore_state(struct pci_dev *dev);
  59.533 +int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
  59.534 +pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
  59.535 +int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
  59.536 +
  59.537 +/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
  59.538 +void pci_bus_assign_resources(struct pci_bus *bus);
  59.539 +void pci_bus_size_bridges(struct pci_bus *bus);
  59.540 +int pci_claim_resource(struct pci_dev *, int);
  59.541 +void pci_assign_unassigned_resources(void);
  59.542 +void pdev_enable_device(struct pci_dev *);
  59.543 +void pdev_sort_resources(struct pci_dev *, struct resource_list *);
  59.544 +void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
  59.545 +		    int (*)(struct pci_dev *, u8, u8));
  59.546 +#define HAVE_PCI_REQ_REGIONS	2
  59.547 +int __must_check pci_request_regions(struct pci_dev *, const char *);
  59.548 +void pci_release_regions(struct pci_dev *);
  59.549 +int __must_check pci_request_region(struct pci_dev *, int, const char *);
  59.550 +void pci_release_region(struct pci_dev *, int);
  59.551 +
  59.552 +/* drivers/pci/bus.c */
  59.553 +int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
  59.554 +			struct resource *res, resource_size_t size,
  59.555 +			resource_size_t align, resource_size_t min,
  59.556 +			unsigned int type_mask,
  59.557 +			void (*alignf)(void *, struct resource *,
  59.558 +				resource_size_t, resource_size_t),
  59.559 +			void *alignf_data);
  59.560 +void pci_enable_bridges(struct pci_bus *bus);
  59.561 +
  59.562 +/* Proper probing supporting hot-pluggable devices */
  59.563 +int __must_check __pci_register_driver(struct pci_driver *, struct module *);
  59.564 +static inline int __must_check pci_register_driver(struct pci_driver *driver)
  59.565 +{
  59.566 +	return __pci_register_driver(driver, THIS_MODULE);
  59.567 +}
  59.568 +
  59.569 +void pci_unregister_driver(struct pci_driver *);
  59.570 +void pci_remove_behind_bridge(struct pci_dev *);
  59.571 +struct pci_driver *pci_dev_driver(const struct pci_dev *);
  59.572 +const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev);
  59.573 +const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, struct pci_dev *dev);
  59.574 +int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass);
  59.575 +
  59.576 +void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
  59.577 +		  void *userdata);
  59.578 +int pci_cfg_space_size(struct pci_dev *dev);
  59.579 +unsigned char pci_bus_max_busnr(struct pci_bus* bus);
  59.580 +
  59.581 +/* kmem_cache style wrapper around pci_alloc_consistent() */
  59.582 +
  59.583 +#include <linux/dmapool.h>
  59.584 +
  59.585 +#define	pci_pool dma_pool
  59.586 +#define pci_pool_create(name, pdev, size, align, allocation) \
  59.587 +		dma_pool_create(name, &pdev->dev, size, align, allocation)
  59.588 +#define	pci_pool_destroy(pool) dma_pool_destroy(pool)
  59.589 +#define	pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
  59.590 +#define	pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
  59.591 +
  59.592 +enum pci_dma_burst_strategy {
  59.593 +	PCI_DMA_BURST_INFINITY,	/* make bursts as large as possible,
  59.594 +				   strategy_parameter is N/A */
  59.595 +	PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
  59.596 +				   byte boundaries */
  59.597 +	PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
  59.598 +				   strategy_parameter byte boundaries */
  59.599 +};
  59.600 +
  59.601 +#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
  59.602 +extern struct pci_dev *isa_bridge;
  59.603 +#endif
  59.604 +
  59.605 +struct msix_entry {
  59.606 +	u16 	vector;	/* kernel uses to write allocated vector */
  59.607 +	u16	entry;	/* driver uses to specify entry, OS writes */
  59.608 +};
  59.609 +
  59.610 +
  59.611 +#ifndef CONFIG_PCI_MSI
  59.612 +static inline void pci_scan_msi_device(struct pci_dev *dev) {}
  59.613 +static inline int pci_enable_msi(struct pci_dev *dev) {return -1;}
  59.614 +static inline void pci_disable_msi(struct pci_dev *dev) {}
  59.615 +static inline int pci_enable_msix(struct pci_dev* dev,
  59.616 +	struct msix_entry *entries, int nvec) {return -1;}
  59.617 +static inline void pci_disable_msix(struct pci_dev *dev) {}
  59.618 +static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
  59.619 +#else
  59.620 +extern void pci_scan_msi_device(struct pci_dev *dev);
  59.621 +extern int pci_enable_msi(struct pci_dev *dev);
  59.622 +extern void pci_disable_msi(struct pci_dev *dev);
  59.623 +extern int pci_enable_msix(struct pci_dev* dev,
  59.624 +	struct msix_entry *entries, int nvec);
  59.625 +extern void pci_disable_msix(struct pci_dev *dev);
  59.626 +extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
  59.627 +#endif
  59.628 +
  59.629 +#ifdef CONFIG_HT_IRQ
  59.630 +/* The functions a driver should call */
  59.631 +int  ht_create_irq(struct pci_dev *dev, int idx);
  59.632 +void ht_destroy_irq(unsigned int irq);
  59.633 +#endif /* CONFIG_HT_IRQ */
  59.634 +
  59.635 +extern void pci_block_user_cfg_access(struct pci_dev *dev);
  59.636 +extern void pci_unblock_user_cfg_access(struct pci_dev *dev);
  59.637 +
  59.638 +/*
  59.639 + * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
  59.640 + * a PCI domain is defined to be a set of PCI busses which share
  59.641 + * configuration space.
  59.642 + */
  59.643 +#ifndef CONFIG_PCI_DOMAINS
  59.644 +static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
  59.645 +static inline int pci_proc_domain(struct pci_bus *bus)
  59.646 +{
  59.647 +	return 0;
  59.648 +}
  59.649 +#endif
  59.650 +
  59.651 +#else /* CONFIG_PCI is not enabled */
  59.652 +
  59.653 +/*
  59.654 + *  If the system does not have PCI, clearly these return errors.  Define
  59.655 + *  these as simple inline functions to avoid hair in drivers.
  59.656 + */
  59.657 +
  59.658 +#define _PCI_NOP(o,s,t) \
  59.659 +	static inline int pci_##o##_config_##s (struct pci_dev *dev, int where, t val) \
  59.660 +		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
  59.661 +#define _PCI_NOP_ALL(o,x)	_PCI_NOP(o,byte,u8 x) \
  59.662 +				_PCI_NOP(o,word,u16 x) \
  59.663 +				_PCI_NOP(o,dword,u32 x)
  59.664 +_PCI_NOP_ALL(read, *)
  59.665 +_PCI_NOP_ALL(write,)
  59.666 +
  59.667 +static inline struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from)
  59.668 +{ return NULL; }
  59.669 +
  59.670 +static inline struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn)
  59.671 +{ return NULL; }
  59.672 +
  59.673 +static inline struct pci_dev *pci_get_device(unsigned int vendor,
  59.674 +				unsigned int device, struct pci_dev *from)
  59.675 +{ return NULL; }
  59.676 +
  59.677 +static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor,
  59.678 +				unsigned int device, struct pci_dev *from)
  59.679 +{ return NULL; }
  59.680 +
  59.681 +static inline struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device,
  59.682 +unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from)
  59.683 +{ return NULL; }
  59.684 +
  59.685 +static inline struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
  59.686 +{ return NULL; }
  59.687 +
  59.688 +#define pci_dev_present(ids)	(0)
  59.689 +#define pci_dev_put(dev)	do { } while (0)
  59.690 +
  59.691 +static inline void pci_set_master(struct pci_dev *dev) { }
  59.692 +static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
  59.693 +static inline void pci_disable_device(struct pci_dev *dev) { }
  59.694 +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; }
  59.695 +static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY;}
  59.696 +static inline int __pci_register_driver(struct pci_driver *drv, struct module *owner) { return 0;}
  59.697 +static inline int pci_register_driver(struct pci_driver *drv) { return 0;}
  59.698 +static inline void pci_unregister_driver(struct pci_driver *drv) { }
  59.699 +static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; }
  59.700 +static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; }
  59.701 +static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) {return 0; }
  59.702 +static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; }
  59.703 +
  59.704 +/* Power management related routines */
  59.705 +static inline int pci_save_state(struct pci_dev *dev) { return 0; }
  59.706 +static inline int pci_restore_state(struct pci_dev *dev) { return 0; }
  59.707 +static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { return 0; }
  59.708 +static inline pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { return PCI_D0; }
  59.709 +static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { return 0; }
  59.710 +
  59.711 +#define	isa_bridge	((struct pci_dev *)NULL)
  59.712 +
  59.713 +#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
  59.714 +
  59.715 +static inline void pci_block_user_cfg_access(struct pci_dev *dev) { }
  59.716 +static inline void pci_unblock_user_cfg_access(struct pci_dev *dev) { }
  59.717 +
  59.718 +#endif /* CONFIG_PCI */
  59.719 +
  59.720 +/* Include architecture-dependent settings and functions */
  59.721 +
  59.722 +#include <asm/pci.h>
  59.723 +
  59.724 +/* these helpers provide future and backwards compatibility
  59.725 + * for accessing popular PCI BAR info */
  59.726 +#define pci_resource_start(dev,bar)   ((dev)->resource[(bar)].start)
  59.727 +#define pci_resource_end(dev,bar)     ((dev)->resource[(bar)].end)
  59.728 +#define pci_resource_flags(dev,bar)   ((dev)->resource[(bar)].flags)
  59.729 +#define pci_resource_len(dev,bar) \
  59.730 +	((pci_resource_start((dev),(bar)) == 0 &&	\
  59.731 +	  pci_resource_end((dev),(bar)) ==		\
  59.732 +	  pci_resource_start((dev),(bar))) ? 0 :	\
  59.733 +	  						\
  59.734 +	 (pci_resource_end((dev),(bar)) -		\
  59.735 +	  pci_resource_start((dev),(bar)) + 1))
  59.736 +
  59.737 +/* Similar to the helpers above, these manipulate per-pci_dev
  59.738 + * driver-specific data.  They are really just a wrapper around
  59.739 + * the generic device structure functions of these calls.
  59.740 + */
  59.741 +static inline void *pci_get_drvdata (struct pci_dev *pdev)
  59.742 +{
  59.743 +	return dev_get_drvdata(&pdev->dev);
  59.744 +}
  59.745 +
  59.746 +static inline void pci_set_drvdata (struct pci_dev *pdev, void *data)
  59.747 +{
  59.748 +	dev_set_drvdata(&pdev->dev, data);
  59.749 +}
  59.750 +
  59.751 +/* If you want to know what to call your pci_dev, ask this function.
  59.752 + * Again, it's a wrapper around the generic device.
  59.753 + */
  59.754 +static inline char *pci_name(struct pci_dev *pdev)
  59.755 +{
  59.756 +	return pdev->dev.bus_id;
  59.757 +}
  59.758 +
  59.759 +
  59.760 +/* Some archs don't want to expose struct resource to userland as-is
  59.761 + * in sysfs and /proc
  59.762 + */
  59.763 +#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
  59.764 +static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
  59.765 +                const struct resource *rsrc, resource_size_t *start,
  59.766 +		resource_size_t *end)
  59.767 +{
  59.768 +	*start = rsrc->start;
  59.769 +	*end = rsrc->end;
  59.770 +}
  59.771 +#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
  59.772 +
  59.773 +
  59.774 +/*
  59.775 + *  The world is not perfect and supplies us with broken PCI devices.
  59.776 + *  For at least a part of these bugs we need a work-around, so both
  59.777 + *  generic (drivers/pci/quirks.c) and per-architecture code can define
  59.778 + *  fixup hooks to be called for particular buggy devices.
  59.779 + */
  59.780 +
  59.781 +struct pci_fixup {
  59.782 +	u16 vendor, device;	/* You can use PCI_ANY_ID here of course */
  59.783 +	void (*hook)(struct pci_dev *dev);
  59.784 +};
  59.785 +
  59.786 +enum pci_fixup_pass {
  59.787 +	pci_fixup_early,	/* Before probing BARs */
  59.788 +	pci_fixup_header,	/* After reading configuration header */
  59.789 +	pci_fixup_final,	/* Final phase of device fixups */
  59.790 +	pci_fixup_enable,	/* pci_enable_device() time */
  59.791 +};
  59.792 +
  59.793 +/* Anonymous variables would be nice... */
  59.794 +#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook)	\
  59.795 +	static const struct pci_fixup __pci_fixup_##name __attribute_used__ \
  59.796 +	__attribute__((__section__(#section))) = { vendor, device, hook };
  59.797 +#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
  59.798 +	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
  59.799 +			vendor##device##hook, vendor, device, hook)
  59.800 +#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
  59.801 +	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
  59.802 +			vendor##device##hook, vendor, device, hook)
  59.803 +#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
  59.804 +	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
  59.805 +			vendor##device##hook, vendor, device, hook)
  59.806 +#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
  59.807 +	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
  59.808 +			vendor##device##hook, vendor, device, hook)
  59.809 +
  59.810 +
  59.811 +void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
  59.812 +
  59.813 +extern int pci_pci_problems;
  59.814 +#define PCIPCI_FAIL		1	/* No PCI PCI DMA */
  59.815 +#define PCIPCI_TRITON		2
  59.816 +#define PCIPCI_NATOMA		4
  59.817 +#define PCIPCI_VIAETBF		8
  59.818 +#define PCIPCI_VSFX		16
  59.819 +#define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
  59.820 +#define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
  59.821 +
  59.822 +#endif /* __KERNEL__ */
  59.823 +#endif /* LINUX_PCI_H */
    60.1 --- a/xen/include/asm-ia64/linux/README.origin	Wed Dec 20 08:53:42 2006 -0700
    60.2 +++ b/xen/include/asm-ia64/linux/README.origin	Wed Dec 20 14:55:02 2006 -0700
    60.3 @@ -24,3 +24,15 @@ time.h			-> linux/include/linux/time.h
    60.4  timex.h			-> linux/include/linux/timex.h
    60.5  topology.h		-> linux/include/linux/topology.h
    60.6  wait.h			-> linux/include/linux/wait.h
    60.7 +
    60.8 +# The files below are from Linux-2.6.19
    60.9 +completion.h		-> linux/include/linux/completion.h
   60.10 +ioport.h		-> linux/include/linux/ioport.h
   60.11 +klist.h			-> linux/include/linux/klist.h
   60.12 +kref.h			-> linux/include/linux/kref.h
   60.13 +mod_devicetable.h	-> linux/include/linux/mod_devicetable.h
   60.14 +pci_ids.h		-> linux/include/linux/pci_ids.h
   60.15 +pci_regs.h		-> linux/include/linux/pci_regs.h
   60.16 +pm.h			-> linux/include/linux/pm.h
   60.17 +sysfs.h			-> linux/include/linux/sysfs.h
   60.18 +
    61.1 --- a/xen/include/asm-ia64/linux/asm/README.origin	Wed Dec 20 08:53:42 2006 -0700
    61.2 +++ b/xen/include/asm-ia64/linux/asm/README.origin	Wed Dec 20 14:55:02 2006 -0700
    61.3 @@ -21,7 +21,6 @@ intrinsics.h		-> linux/include/asm-ia64/
    61.4  ioctl.h			-> linux/include/asm-ia64/ioctl.h
    61.5  irq.h			-> linux/include/asm-ia64/irq.h
    61.6  linkage.h		-> linux/include/asm-ia64/linkage.h
    61.7 -machvec.h		-> linux/include/asm-ia64/machvec.h
    61.8  machvec_hpsim.h		-> linux/include/asm-ia64/machvec_hpsim.h
    61.9  mca.h			-> linux/include/asm-ia64/mca.h
   61.10  nodedata.h		-> linux/include/asm-ia64/nodedate.h
   61.11 @@ -41,3 +40,6 @@ topology.h		-> linux/include/asm-ia64/to
   61.12  unaligned.h		-> linux/include/asm-ia64/unaligned.h
   61.13  unistd.h		-> linux/include/asm-ia64/unistd.h
   61.14  unwind.h		-> linux/include/asm-ia64/unwind.h
   61.15 +
   61.16 +# The files below are from Linux-2.6.19
   61.17 +machvec_init.h		-> linux/include/asm-ia64/machvec_init.h
    62.1 --- a/xen/include/asm-ia64/linux/asm/machvec.h	Wed Dec 20 08:53:42 2006 -0700
    62.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    62.3 @@ -1,390 +0,0 @@
    62.4 -/*
    62.5 - * Machine vector for IA-64.
    62.6 - *
    62.7 - * Copyright (C) 1999 Silicon Graphics, Inc.
    62.8 - * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
    62.9 - * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
   62.10 - * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
   62.11 - *	David Mosberger-Tang <davidm@hpl.hp.com>
   62.12 - */
   62.13 -#ifndef _ASM_IA64_MACHVEC_H
   62.14 -#define _ASM_IA64_MACHVEC_H
   62.15 -
   62.16 -#include <linux/config.h>
   62.17 -#include <linux/types.h>
   62.18 -
   62.19 -/* forward declarations: */
   62.20 -struct device;
   62.21 -struct pt_regs;
   62.22 -struct scatterlist;
   62.23 -struct page;
   62.24 -struct mm_struct;
   62.25 -struct pci_bus;
   62.26 -
   62.27 -typedef void ia64_mv_setup_t (char **);
   62.28 -typedef void ia64_mv_cpu_init_t (void);
   62.29 -typedef void ia64_mv_irq_init_t (void);
   62.30 -typedef void ia64_mv_send_ipi_t (int, int, int, int);
   62.31 -typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
   62.32 -typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long);
   62.33 -typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
   62.34 -typedef unsigned int ia64_mv_local_vector_to_irq (u8);
   62.35 -typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
   62.36 -typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
   62.37 -				       u8 size);
   62.38 -typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
   62.39 -					u8 size);
   62.40 -
   62.41 -/* DMA-mapping interface: */
   62.42 -typedef void ia64_mv_dma_init (void);
   62.43 -typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int);
   62.44 -typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
   62.45 -typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
   62.46 -typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
   62.47 -typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
   62.48 -typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
   62.49 -typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
   62.50 -typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
   62.51 -typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
   62.52 -typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
   62.53 -typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
   62.54 -typedef int ia64_mv_dma_supported (struct device *, u64);
   62.55 -
   62.56 -/*
   62.57 - * WARNING: The legacy I/O space is _architected_.  Platforms are
   62.58 - * expected to follow this architected model (see Section 10.7 in the
   62.59 - * IA-64 Architecture Software Developer's Manual).  Unfortunately,
   62.60 - * some broken machines do not follow that model, which is why we have
   62.61 - * to make the inX/outX operations part of the machine vector.
   62.62 - * Platform designers should follow the architected model whenever
   62.63 - * possible.
   62.64 - */
   62.65 -typedef unsigned int ia64_mv_inb_t (unsigned long);
   62.66 -typedef unsigned int ia64_mv_inw_t (unsigned long);
   62.67 -typedef unsigned int ia64_mv_inl_t (unsigned long);
   62.68 -typedef void ia64_mv_outb_t (unsigned char, unsigned long);
   62.69 -typedef void ia64_mv_outw_t (unsigned short, unsigned long);
   62.70 -typedef void ia64_mv_outl_t (unsigned int, unsigned long);
   62.71 -typedef void ia64_mv_mmiowb_t (void);
   62.72 -typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
   62.73 -typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
   62.74 -typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
   62.75 -typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
   62.76 -typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
   62.77 -typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
   62.78 -typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
   62.79 -typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
   62.80 -
   62.81 -static inline void
   62.82 -machvec_noop (void)
   62.83 -{
   62.84 -}
   62.85 -
   62.86 -static inline void
   62.87 -machvec_noop_mm (struct mm_struct *mm)
   62.88 -{
   62.89 -}
   62.90 -
   62.91 -extern void machvec_setup (char **);
   62.92 -extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
   62.93 -extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
   62.94 -extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
   62.95 -extern void machvec_tlb_migrate_finish (struct mm_struct *);
   62.96 -
   62.97 -# if defined (CONFIG_IA64_HP_SIM)
   62.98 -#  include <asm/machvec_hpsim.h>
   62.99 -# elif defined (CONFIG_IA64_DIG)
  62.100 -#  include <asm/machvec_dig.h>
  62.101 -# elif defined (CONFIG_IA64_HP_ZX1)
  62.102 -#  include <asm/machvec_hpzx1.h>
  62.103 -# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
  62.104 -#  include <asm/machvec_hpzx1_swiotlb.h>
  62.105 -# elif defined (CONFIG_IA64_SGI_SN2)
  62.106 -#  include <asm/machvec_sn2.h>
  62.107 -# elif defined (CONFIG_IA64_GENERIC)
  62.108 -
  62.109 -# ifdef MACHVEC_PLATFORM_HEADER
  62.110 -#  include MACHVEC_PLATFORM_HEADER
  62.111 -# else
  62.112 -#  define platform_name		ia64_mv.name
  62.113 -#  define platform_setup	ia64_mv.setup
  62.114 -#  define platform_cpu_init	ia64_mv.cpu_init
  62.115 -#  define platform_irq_init	ia64_mv.irq_init
  62.116 -#  define platform_send_ipi	ia64_mv.send_ipi
  62.117 -#  define platform_timer_interrupt	ia64_mv.timer_interrupt
  62.118 -#  define platform_global_tlb_purge	ia64_mv.global_tlb_purge
  62.119 -#  define platform_tlb_migrate_finish	ia64_mv.tlb_migrate_finish
  62.120 -#  define platform_dma_init		ia64_mv.dma_init
  62.121 -#  define platform_dma_alloc_coherent	ia64_mv.dma_alloc_coherent
  62.122 -#  define platform_dma_free_coherent	ia64_mv.dma_free_coherent
  62.123 -#  define platform_dma_map_single	ia64_mv.dma_map_single
  62.124 -#  define platform_dma_unmap_single	ia64_mv.dma_unmap_single
  62.125 -#  define platform_dma_map_sg		ia64_mv.dma_map_sg
  62.126 -#  define platform_dma_unmap_sg		ia64_mv.dma_unmap_sg
  62.127 -#  define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
  62.128 -#  define platform_dma_sync_sg_for_cpu	ia64_mv.dma_sync_sg_for_cpu
  62.129 -#  define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
  62.130 -#  define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
  62.131 -#  define platform_dma_mapping_error		ia64_mv.dma_mapping_error
  62.132 -#  define platform_dma_supported	ia64_mv.dma_supported
  62.133 -#  define platform_local_vector_to_irq	ia64_mv.local_vector_to_irq
  62.134 -#  define platform_pci_get_legacy_mem	ia64_mv.pci_get_legacy_mem
  62.135 -#  define platform_pci_legacy_read	ia64_mv.pci_legacy_read
  62.136 -#  define platform_pci_legacy_write	ia64_mv.pci_legacy_write
  62.137 -#  define platform_inb		ia64_mv.inb
  62.138 -#  define platform_inw		ia64_mv.inw
  62.139 -#  define platform_inl		ia64_mv.inl
  62.140 -#  define platform_outb		ia64_mv.outb
  62.141 -#  define platform_outw		ia64_mv.outw
  62.142 -#  define platform_outl		ia64_mv.outl
  62.143 -#  define platform_mmiowb	ia64_mv.mmiowb
  62.144 -#  define platform_readb        ia64_mv.readb
  62.145 -#  define platform_readw        ia64_mv.readw
  62.146 -#  define platform_readl        ia64_mv.readl
  62.147 -#  define platform_readq        ia64_mv.readq
  62.148 -#  define platform_readb_relaxed        ia64_mv.readb_relaxed
  62.149 -#  define platform_readw_relaxed        ia64_mv.readw_relaxed
  62.150 -#  define platform_readl_relaxed        ia64_mv.readl_relaxed
  62.151 -#  define platform_readq_relaxed        ia64_mv.readq_relaxed
  62.152 -# endif
  62.153 -
  62.154 -/* __attribute__((__aligned__(16))) is required to make size of the
  62.155 - * structure multiple of 16 bytes.
  62.156 - * This will fillup the holes created because of section 3.3.1 in
  62.157 - * Software Conventions guide.
  62.158 - */
  62.159 -struct ia64_machine_vector {
  62.160 -	const char *name;
  62.161 -	ia64_mv_setup_t *setup;
  62.162 -	ia64_mv_cpu_init_t *cpu_init;
  62.163 -	ia64_mv_irq_init_t *irq_init;
  62.164 -	ia64_mv_send_ipi_t *send_ipi;
  62.165 -	ia64_mv_timer_interrupt_t *timer_interrupt;
  62.166 -	ia64_mv_global_tlb_purge_t *global_tlb_purge;
  62.167 -	ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
  62.168 -	ia64_mv_dma_init *dma_init;
  62.169 -	ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
  62.170 -	ia64_mv_dma_free_coherent *dma_free_coherent;
  62.171 -	ia64_mv_dma_map_single *dma_map_single;
  62.172 -	ia64_mv_dma_unmap_single *dma_unmap_single;
  62.173 -	ia64_mv_dma_map_sg *dma_map_sg;
  62.174 -	ia64_mv_dma_unmap_sg *dma_unmap_sg;
  62.175 -	ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
  62.176 -	ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
  62.177 -	ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
  62.178 -	ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
  62.179 -	ia64_mv_dma_mapping_error *dma_mapping_error;
  62.180 -	ia64_mv_dma_supported *dma_supported;
  62.181 -	ia64_mv_local_vector_to_irq *local_vector_to_irq;
  62.182 -	ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
  62.183 -	ia64_mv_pci_legacy_read_t *pci_legacy_read;
  62.184 -	ia64_mv_pci_legacy_write_t *pci_legacy_write;
  62.185 -	ia64_mv_inb_t *inb;
  62.186 -	ia64_mv_inw_t *inw;
  62.187 -	ia64_mv_inl_t *inl;
  62.188 -	ia64_mv_outb_t *outb;
  62.189 -	ia64_mv_outw_t *outw;
  62.190 -	ia64_mv_outl_t *outl;
  62.191 -	ia64_mv_mmiowb_t *mmiowb;
  62.192 -	ia64_mv_readb_t *readb;
  62.193 -	ia64_mv_readw_t *readw;
  62.194 -	ia64_mv_readl_t *readl;
  62.195 -	ia64_mv_readq_t *readq;
  62.196 -	ia64_mv_readb_relaxed_t *readb_relaxed;
  62.197 -	ia64_mv_readw_relaxed_t *readw_relaxed;
  62.198 -	ia64_mv_readl_relaxed_t *readl_relaxed;
  62.199 -	ia64_mv_readq_relaxed_t *readq_relaxed;
  62.200 -} __attribute__((__aligned__(16))); /* align attrib? see above comment */
  62.201 -
  62.202 -#define MACHVEC_INIT(name)			\
  62.203 -{						\
  62.204 -	#name,					\
  62.205 -	platform_setup,				\
  62.206 -	platform_cpu_init,			\
  62.207 -	platform_irq_init,			\
  62.208 -	platform_send_ipi,			\
  62.209 -	platform_timer_interrupt,		\
  62.210 -	platform_global_tlb_purge,		\
  62.211 -	platform_tlb_migrate_finish,		\
  62.212 -	platform_dma_init,			\
  62.213 -	platform_dma_alloc_coherent,		\
  62.214 -	platform_dma_free_coherent,		\
  62.215 -	platform_dma_map_single,		\
  62.216 -	platform_dma_unmap_single,		\
  62.217 -	platform_dma_map_sg,			\
  62.218 -	platform_dma_unmap_sg,			\
  62.219 -	platform_dma_sync_single_for_cpu,	\
  62.220 -	platform_dma_sync_sg_for_cpu,		\
  62.221 -	platform_dma_sync_single_for_device,	\
  62.222 -	platform_dma_sync_sg_for_device,	\
  62.223 -	platform_dma_mapping_error,			\
  62.224 -	platform_dma_supported,			\
  62.225 -	platform_local_vector_to_irq,		\
  62.226 -	platform_pci_get_legacy_mem,		\
  62.227 -	platform_pci_legacy_read,		\
  62.228 -	platform_pci_legacy_write,		\
  62.229 -	platform_inb,				\
  62.230 -	platform_inw,				\
  62.231 -	platform_inl,				\
  62.232 -	platform_outb,				\
  62.233 -	platform_outw,				\
  62.234 -	platform_outl,				\
  62.235 -	platform_mmiowb,			\
  62.236 -	platform_readb,				\
  62.237 -	platform_readw,				\
  62.238 -	platform_readl,				\
  62.239 -	platform_readq,				\
  62.240 -	platform_readb_relaxed,			\
  62.241 -	platform_readw_relaxed,			\
  62.242 -	platform_readl_relaxed,			\
  62.243 -	platform_readq_relaxed,			\
  62.244 -}
  62.245 -
  62.246 -extern struct ia64_machine_vector ia64_mv;
  62.247 -extern void machvec_init (const char *name);
  62.248 -
  62.249 -# else
  62.250 -#  error Unknown configuration.  Update asm-ia64/machvec.h.
  62.251 -# endif /* CONFIG_IA64_GENERIC */
  62.252 -
  62.253 -/*
  62.254 - * Declare default routines which aren't declared anywhere else:
  62.255 - */
  62.256 -extern ia64_mv_dma_init			swiotlb_init;
  62.257 -extern ia64_mv_dma_alloc_coherent	swiotlb_alloc_coherent;
  62.258 -extern ia64_mv_dma_free_coherent	swiotlb_free_coherent;
  62.259 -extern ia64_mv_dma_map_single		swiotlb_map_single;
  62.260 -extern ia64_mv_dma_unmap_single		swiotlb_unmap_single;
  62.261 -extern ia64_mv_dma_map_sg		swiotlb_map_sg;
  62.262 -extern ia64_mv_dma_unmap_sg		swiotlb_unmap_sg;
  62.263 -extern ia64_mv_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu;
  62.264 -extern ia64_mv_dma_sync_sg_for_cpu	swiotlb_sync_sg_for_cpu;
  62.265 -extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
  62.266 -extern ia64_mv_dma_sync_sg_for_device	swiotlb_sync_sg_for_device;
  62.267 -extern ia64_mv_dma_mapping_error	swiotlb_dma_mapping_error;
  62.268 -extern ia64_mv_dma_supported		swiotlb_dma_supported;
  62.269 -
  62.270 -/*
  62.271 - * Define default versions so we can extend machvec for new platforms without having
  62.272 - * to update the machvec files for all existing platforms.
  62.273 - */
  62.274 -#ifndef platform_setup
  62.275 -# define platform_setup			machvec_setup
  62.276 -#endif
  62.277 -#ifndef platform_cpu_init
  62.278 -# define platform_cpu_init		machvec_noop
  62.279 -#endif
  62.280 -#ifndef platform_irq_init
  62.281 -# define platform_irq_init		machvec_noop
  62.282 -#endif
  62.283 -
  62.284 -#ifndef platform_send_ipi
  62.285 -# define platform_send_ipi		ia64_send_ipi	/* default to architected version */
  62.286 -#endif
  62.287 -#ifndef platform_timer_interrupt
  62.288 -# define platform_timer_interrupt 	machvec_timer_interrupt
  62.289 -#endif
  62.290 -#ifndef platform_global_tlb_purge
  62.291 -# define platform_global_tlb_purge	ia64_global_tlb_purge /* default to architected version */
  62.292 -#endif
  62.293 -#ifndef platform_tlb_migrate_finish
  62.294 -# define platform_tlb_migrate_finish	machvec_noop_mm
  62.295 -#endif
  62.296 -#ifndef platform_dma_init
  62.297 -# define platform_dma_init		swiotlb_init
  62.298 -#endif
  62.299 -#ifndef platform_dma_alloc_coherent
  62.300 -# define platform_dma_alloc_coherent	swiotlb_alloc_coherent
  62.301 -#endif
  62.302 -#ifndef platform_dma_free_coherent
  62.303 -# define platform_dma_free_coherent	swiotlb_free_coherent
  62.304 -#endif
  62.305 -#ifndef platform_dma_map_single
  62.306 -# define platform_dma_map_single	swiotlb_map_single
  62.307 -#endif
  62.308 -#ifndef platform_dma_unmap_single
  62.309 -# define platform_dma_unmap_single	swiotlb_unmap_single
  62.310 -#endif
  62.311 -#ifndef platform_dma_map_sg
  62.312 -# define platform_dma_map_sg		swiotlb_map_sg
  62.313 -#endif
  62.314 -#ifndef platform_dma_unmap_sg
  62.315 -# define platform_dma_unmap_sg		swiotlb_unmap_sg
  62.316 -#endif
  62.317 -#ifndef platform_dma_sync_single_for_cpu
  62.318 -# define platform_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu
  62.319 -#endif
  62.320 -#ifndef platform_dma_sync_sg_for_cpu
  62.321 -# define platform_dma_sync_sg_for_cpu		swiotlb_sync_sg_for_cpu
  62.322 -#endif
  62.323 -#ifndef platform_dma_sync_single_for_device
  62.324 -# define platform_dma_sync_single_for_device	swiotlb_sync_single_for_device
  62.325 -#endif
  62.326 -#ifndef platform_dma_sync_sg_for_device
  62.327 -# define platform_dma_sync_sg_for_device	swiotlb_sync_sg_for_device
  62.328 -#endif
  62.329 -#ifndef platform_dma_mapping_error
  62.330 -# define platform_dma_mapping_error		swiotlb_dma_mapping_error
  62.331 -#endif
  62.332 -#ifndef platform_dma_supported
  62.333 -# define  platform_dma_supported	swiotlb_dma_supported
  62.334 -#endif
  62.335 -#ifndef platform_local_vector_to_irq
  62.336 -# define platform_local_vector_to_irq	__ia64_local_vector_to_irq
  62.337 -#endif
  62.338 -#ifndef platform_pci_get_legacy_mem
  62.339 -# define platform_pci_get_legacy_mem	ia64_pci_get_legacy_mem
  62.340 -#endif
  62.341 -#ifndef platform_pci_legacy_read
  62.342 -# define platform_pci_legacy_read	ia64_pci_legacy_read
  62.343 -#endif
  62.344 -#ifndef platform_pci_legacy_write
  62.345 -# define platform_pci_legacy_write	ia64_pci_legacy_write
  62.346 -#endif
  62.347 -#ifndef platform_inb
  62.348 -# define platform_inb		__ia64_inb
  62.349 -#endif
  62.350 -#ifndef platform_inw
  62.351 -# define platform_inw		__ia64_inw
  62.352 -#endif
  62.353 -#ifndef platform_inl
  62.354 -# define platform_inl		__ia64_inl
  62.355 -#endif
  62.356 -#ifndef platform_outb
  62.357 -# define platform_outb		__ia64_outb
  62.358 -#endif
  62.359 -#ifndef platform_outw
  62.360 -# define platform_outw		__ia64_outw
  62.361 -#endif
  62.362 -#ifndef platform_outl
  62.363 -# define platform_outl		__ia64_outl
  62.364 -#endif
  62.365 -#ifndef platform_mmiowb
  62.366 -# define platform_mmiowb	__ia64_mmiowb
  62.367 -#endif
  62.368 -#ifndef platform_readb
  62.369 -# define platform_readb		__ia64_readb
  62.370 -#endif
  62.371 -#ifndef platform_readw
  62.372 -# define platform_readw		__ia64_readw
  62.373 -#endif
  62.374 -#ifndef platform_readl
  62.375 -# define platform_readl		__ia64_readl
  62.376 -#endif
  62.377 -#ifndef platform_readq
  62.378 -# define platform_readq		__ia64_readq
  62.379 -#endif
  62.380 -#ifndef platform_readb_relaxed
  62.381 -# define platform_readb_relaxed	__ia64_readb_relaxed
  62.382 -#endif
  62.383 -#ifndef platform_readw_relaxed
  62.384 -# define platform_readw_relaxed	__ia64_readw_relaxed
  62.385 -#endif
  62.386 -#ifndef platform_readl_relaxed
  62.387 -# define platform_readl_relaxed	__ia64_readl_relaxed
  62.388 -#endif
  62.389 -#ifndef platform_readq_relaxed
  62.390 -# define platform_readq_relaxed	__ia64_readq_relaxed
  62.391 -#endif
  62.392 -
  62.393 -#endif /* _ASM_IA64_MACHVEC_H */
    63.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    63.2 +++ b/xen/include/asm-ia64/linux/asm/machvec_init.h	Wed Dec 20 14:55:02 2006 -0700
    63.3 @@ -0,0 +1,32 @@
    63.4 +#include <asm/machvec.h>
    63.5 +
    63.6 +extern ia64_mv_send_ipi_t ia64_send_ipi;
    63.7 +extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
    63.8 +extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
    63.9 +extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
   63.10 +extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read;
   63.11 +extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write;
   63.12 +
   63.13 +extern ia64_mv_inb_t __ia64_inb;
   63.14 +extern ia64_mv_inw_t __ia64_inw;
   63.15 +extern ia64_mv_inl_t __ia64_inl;
   63.16 +extern ia64_mv_outb_t __ia64_outb;
   63.17 +extern ia64_mv_outw_t __ia64_outw;
   63.18 +extern ia64_mv_outl_t __ia64_outl;
   63.19 +extern ia64_mv_mmiowb_t __ia64_mmiowb;
   63.20 +extern ia64_mv_readb_t __ia64_readb;
   63.21 +extern ia64_mv_readw_t __ia64_readw;
   63.22 +extern ia64_mv_readl_t __ia64_readl;
   63.23 +extern ia64_mv_readq_t __ia64_readq;
   63.24 +extern ia64_mv_readb_t __ia64_readb_relaxed;
   63.25 +extern ia64_mv_readw_t __ia64_readw_relaxed;
   63.26 +extern ia64_mv_readl_t __ia64_readl_relaxed;
   63.27 +extern ia64_mv_readq_t __ia64_readq_relaxed;
   63.28 +
   63.29 +#define MACHVEC_HELPER(name)									\
   63.30 + struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec")))	\
   63.31 +	= MACHVEC_INIT(name);
   63.32 +
   63.33 +#define MACHVEC_DEFINE(name)	MACHVEC_HELPER(name)
   63.34 +
   63.35 +MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME)
    64.1 --- a/xen/include/asm-ia64/linux/asm/pci.h	Wed Dec 20 08:53:42 2006 -0700
    64.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    64.3 @@ -1,161 +0,0 @@
    64.4 -#ifndef _ASM_IA64_PCI_H
    64.5 -#define _ASM_IA64_PCI_H
    64.6 -
    64.7 -#include <linux/mm.h>
    64.8 -#include <linux/slab.h>
    64.9 -#include <linux/spinlock.h>
   64.10 -#include <linux/string.h>
   64.11 -#include <linux/types.h>
   64.12 -
   64.13 -#include <asm/io.h>
   64.14 -#include <asm/scatterlist.h>
   64.15 -
   64.16 -/*
   64.17 - * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
   64.18 - * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
   64.19 - * loader.
   64.20 - */
   64.21 -#define pcibios_assign_all_busses()     0
   64.22 -#define pcibios_scan_all_fns(a, b)	0
   64.23 -
   64.24 -#define PCIBIOS_MIN_IO		0x1000
   64.25 -#define PCIBIOS_MIN_MEM		0x10000000
   64.26 -
   64.27 -void pcibios_config_init(void);
   64.28 -
   64.29 -struct pci_dev;
   64.30 -
   64.31 -/*
   64.32 - * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence
   64.33 - * between device bus addresses and CPU physical addresses.  Platforms with a hardware I/O
   64.34 - * MMU _must_ turn this off to suppress the bounce buffer handling code in the block and
   64.35 - * network device layers.  Platforms with separate bus address spaces _must_ turn this off
   64.36 - * and provide a device DMA mapping implementation that takes care of the necessary
   64.37 - * address translation.
   64.38 - *
   64.39 - * For now, the ia64 platforms which may have separate/multiple bus address spaces all
   64.40 - * have I/O MMUs which support the merging of physically discontiguous buffers, so we can
   64.41 - * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
   64.42 - */
   64.43 -extern unsigned long ia64_max_iommu_merge_mask;
   64.44 -#define PCI_DMA_BUS_IS_PHYS	(ia64_max_iommu_merge_mask == ~0UL)
   64.45 -
   64.46 -static inline void
   64.47 -pcibios_set_master (struct pci_dev *dev)
   64.48 -{
   64.49 -	/* No special bus mastering setup handling */
   64.50 -}
   64.51 -
   64.52 -static inline void
   64.53 -pcibios_penalize_isa_irq (int irq, int active)
   64.54 -{
   64.55 -	/* We don't do dynamic PCI IRQ allocation */
   64.56 -}
   64.57 -
   64.58 -#define HAVE_ARCH_PCI_MWI 1
   64.59 -extern int pcibios_prep_mwi (struct pci_dev *);
   64.60 -
   64.61 -#include <asm-generic/pci-dma-compat.h>
   64.62 -
   64.63 -/* pci_unmap_{single,page} is not a nop, thus... */
   64.64 -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
   64.65 -	dma_addr_t ADDR_NAME;
   64.66 -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
   64.67 -	__u32 LEN_NAME;
   64.68 -#define pci_unmap_addr(PTR, ADDR_NAME)			\
   64.69 -	((PTR)->ADDR_NAME)
   64.70 -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
   64.71 -	(((PTR)->ADDR_NAME) = (VAL))
   64.72 -#define pci_unmap_len(PTR, LEN_NAME)			\
   64.73 -	((PTR)->LEN_NAME)
   64.74 -#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
   64.75 -	(((PTR)->LEN_NAME) = (VAL))
   64.76 -
   64.77 -/* The ia64 platform always supports 64-bit addressing. */
   64.78 -#define pci_dac_dma_supported(pci_dev, mask)		(1)
   64.79 -#define pci_dac_page_to_dma(dev,pg,off,dir)		((dma_addr_t) page_to_bus(pg) + (off))
   64.80 -#define pci_dac_dma_to_page(dev,dma_addr)		(virt_to_page(bus_to_virt(dma_addr)))
   64.81 -#define pci_dac_dma_to_offset(dev,dma_addr)		offset_in_page(dma_addr)
   64.82 -#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir)	do { } while (0)
   64.83 -#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir)	do { mb(); } while (0)
   64.84 -
   64.85 -#define sg_dma_len(sg)		((sg)->dma_length)
   64.86 -#define sg_dma_address(sg)	((sg)->dma_address)
   64.87 -
   64.88 -#ifdef CONFIG_PCI
   64.89 -static inline void pci_dma_burst_advice(struct pci_dev *pdev,
   64.90 -					enum pci_dma_burst_strategy *strat,
   64.91 -					unsigned long *strategy_parameter)
   64.92 -{
   64.93 -	unsigned long cacheline_size;
   64.94 -	u8 byte;
   64.95 -
   64.96 -	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
   64.97 -	if (byte == 0)
   64.98 -		cacheline_size = 1024;
   64.99 -	else
  64.100 -		cacheline_size = (int) byte * 4;
  64.101 -
  64.102 -	*strat = PCI_DMA_BURST_MULTIPLE;
  64.103 -	*strategy_parameter = cacheline_size;
  64.104 -}
  64.105 -#endif
  64.106 -
  64.107 -#define HAVE_PCI_MMAP
  64.108 -extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
  64.109 -				enum pci_mmap_state mmap_state, int write_combine);
  64.110 -#define HAVE_PCI_LEGACY
  64.111 -extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
  64.112 -				      struct vm_area_struct *vma);
  64.113 -extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
  64.114 -				  size_t count);
  64.115 -extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
  64.116 -				   size_t count);
  64.117 -extern int pci_mmap_legacy_mem(struct kobject *kobj,
  64.118 -			       struct bin_attribute *attr,
  64.119 -			       struct vm_area_struct *vma);
  64.120 -
  64.121 -#define pci_get_legacy_mem platform_pci_get_legacy_mem
  64.122 -#define pci_legacy_read platform_pci_legacy_read
  64.123 -#define pci_legacy_write platform_pci_legacy_write
  64.124 -
  64.125 -struct pci_window {
  64.126 -	struct resource resource;
  64.127 -	u64 offset;
  64.128 -};
  64.129 -
  64.130 -struct pci_controller {
  64.131 -	void *acpi_handle;
  64.132 -	void *iommu;
  64.133 -	int segment;
  64.134 -	int node;		/* nearest node with memory or -1 for global allocation */
  64.135 -
  64.136 -	unsigned int windows;
  64.137 -	struct pci_window *window;
  64.138 -
  64.139 -	void *platform_data;
  64.140 -};
  64.141 -
  64.142 -#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
  64.143 -#define pci_domain_nr(busdev)    (PCI_CONTROLLER(busdev)->segment)
  64.144 -
  64.145 -extern struct pci_ops pci_root_ops;
  64.146 -
  64.147 -static inline int pci_proc_domain(struct pci_bus *bus)
  64.148 -{
  64.149 -	return (pci_domain_nr(bus) != 0);
  64.150 -}
  64.151 -
  64.152 -static inline void pcibios_add_platform_entries(struct pci_dev *dev)
  64.153 -{
  64.154 -}
  64.155 -
  64.156 -extern void pcibios_resource_to_bus(struct pci_dev *dev,
  64.157 -		struct pci_bus_region *region, struct resource *res);
  64.158 -
  64.159 -extern void pcibios_bus_to_resource(struct pci_dev *dev,
  64.160 -		struct resource *res, struct pci_bus_region *region);
  64.161 -
  64.162 -#define pcibios_scan_all_fns(a, b)	0
  64.163 -
  64.164 -#endif /* _ASM_IA64_PCI_H */
    65.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    65.2 +++ b/xen/include/asm-ia64/linux/asm/sn/README.origin	Wed Dec 20 14:55:02 2006 -0700
    65.3 @@ -0,0 +1,24 @@
    65.4 +# Source files in this directory are identical copies of linux-2.6.19 files:
    65.5 +# 
    65.6 +# NOTE: DO NOT commit changes to these files!   If a file
    65.7 +# needs to be changed, move it to ../linux-xen and follow
    65.8 +# the instructions in the README there.
    65.9 +
   65.10 +geo.h			-> linux/include/asm-ia64/sn/geo.h
   65.11 +klconfig.h		-> linux/include/asm-ia64/sn/klconfig.h
   65.12 +l1.h			-> linux/include/asm-ia64/sn/l1.h
   65.13 +leds.h			-> linux/include/asm-ia64/sn/leds.h
   65.14 +module.h		-> linux/include/asm-ia64/sn/module.h
   65.15 +pcibus_provider_defs.h	-> linux/include/asm-ia64/sn/pcibus_provider_defs.h
   65.16 +pcidev.h		-> linux/include/asm-ia64/sn/pcidev.h
   65.17 +pda.h			-> linux/include/asm-ia64/sn/pda.h
   65.18 +pic.h			-> linux/include/asm-ia64/sn/pic.h
   65.19 +shub_mmr.h		-> linux/include/asm-ia64/sn/shub_mmr.h
   65.20 +shubio.h		-> linux/include/asm-ia64/sn/shubio.h
   65.21 +simulator.h		-> linux/include/asm-ia64/sn/simulator.h
   65.22 +sn_cpuid.h		-> linux/include/asm-ia64/sn/sn_cpuid.h
   65.23 +sn_feature_sets.h	-> linux/include/asm-ia64/sn/sn_feature_sets.h
   65.24 +sn_sal.h		-> linux/include/asm-ia64/sn/sn_sal.h
   65.25 +tiocp.h			-> linux/include/asm-ia64/sn/tiocp.h
   65.26 +xbow.h			-> linux/arch/ia64/sn/include/xtalk/xbow.h
   65.27 +xwidgetdev.h		-> linux/arch/ia64/sn/include/xtalk/xwidgetdev.h
    66.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    66.2 +++ b/xen/include/asm-ia64/linux/asm/sn/geo.h	Wed Dec 20 14:55:02 2006 -0700
    66.3 @@ -0,0 +1,132 @@
    66.4 +/*
    66.5 + * This file is subject to the terms and conditions of the GNU General Public
    66.6 + * License.  See the file "COPYING" in the main directory of this archive
    66.7 + * for more details.
    66.8 + *
    66.9 + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
   66.10 + */
   66.11 +
   66.12 +#ifndef _ASM_IA64_SN_GEO_H
   66.13 +#define _ASM_IA64_SN_GEO_H
   66.14 +
   66.15 +/* The geoid_t implementation below is based loosely on the pcfg_t
   66.16 +   implementation in sys/SN/promcfg.h. */
   66.17 +
   66.18 +/* Type declaractions */
   66.19 +
   66.20 +/* Size of a geoid_t structure (must be before decl. of geoid_u) */
   66.21 +#define GEOID_SIZE	8	/* Would 16 be better?  The size can
   66.22 +				   be different on different platforms. */
   66.23 +
   66.24 +#define MAX_SLOTS	0xf	/* slots per module */
   66.25 +#define MAX_SLABS	0xf	/* slabs per slot */
   66.26 +
   66.27 +typedef unsigned char	geo_type_t;
   66.28 +
   66.29 +/* Fields common to all substructures */
   66.30 +typedef struct geo_common_s {
   66.31 +    moduleid_t	module;		/* The module (box) this h/w lives in */
   66.32 +    geo_type_t	type;		/* What type of h/w is named by this geoid_t */
   66.33 +    slabid_t	slab:4;		/* slab (ASIC), 0 .. 15 within slot */
   66.34 +    slotid_t	slot:4;		/* slot (Blade), 0 .. 15 within module */
   66.35 +} geo_common_t;
   66.36 +
   66.37 +/* Additional fields for particular types of hardware */
   66.38 +typedef struct geo_node_s {
   66.39 +    geo_common_t	common;		/* No additional fields needed */
   66.40 +} geo_node_t;
   66.41 +
   66.42 +typedef struct geo_rtr_s {
   66.43 +    geo_common_t	common;		/* No additional fields needed */
   66.44 +} geo_rtr_t;
   66.45 +
   66.46 +typedef struct geo_iocntl_s {
   66.47 +    geo_common_t	common;		/* No additional fields needed */
   66.48 +} geo_iocntl_t;
   66.49 +
   66.50 +typedef struct geo_pcicard_s {
   66.51 +    geo_iocntl_t	common;
   66.52 +    char		bus;	/* Bus/widget number */
   66.53 +    char		slot;	/* PCI slot number */
   66.54 +} geo_pcicard_t;
   66.55 +
   66.56 +/* Subcomponents of a node */
   66.57 +typedef struct geo_cpu_s {
   66.58 +    geo_node_t	node;
   66.59 +    char	slice;		/* Which CPU on the node */
   66.60 +} geo_cpu_t;
   66.61 +
   66.62 +typedef struct geo_mem_s {
   66.63 +    geo_node_t	node;
   66.64 +    char	membus;		/* The memory bus on the node */
   66.65 +    char	memslot;	/* The memory slot on the bus */
   66.66 +} geo_mem_t;
   66.67 +
   66.68 +
   66.69 +typedef union geoid_u {
   66.70 +    geo_common_t	common;
   66.71 +    geo_node_t		node;
   66.72 +    geo_iocntl_t	iocntl;
   66.73 +    geo_pcicard_t	pcicard;
   66.74 +    geo_rtr_t		rtr;
   66.75 +    geo_cpu_t		cpu;
   66.76 +    geo_mem_t		mem;
   66.77 +    char		padsize[GEOID_SIZE];
   66.78 +} geoid_t;
   66.79 +
   66.80 +
   66.81 +/* Preprocessor macros */
   66.82 +
   66.83 +#define GEO_MAX_LEN	48	/* max. formatted length, plus some pad:
   66.84 +				   module/001c07/slab/5/node/memory/2/slot/4 */
   66.85 +
   66.86 +/* Values for geo_type_t */
   66.87 +#define GEO_TYPE_INVALID	0
   66.88 +#define GEO_TYPE_MODULE		1
   66.89 +#define GEO_TYPE_NODE		2
   66.90 +#define GEO_TYPE_RTR		3
   66.91 +#define GEO_TYPE_IOCNTL		4
   66.92 +#define GEO_TYPE_IOCARD		5
   66.93 +#define GEO_TYPE_CPU		6
   66.94 +#define GEO_TYPE_MEM		7
   66.95 +#define GEO_TYPE_MAX		(GEO_TYPE_MEM+1)
   66.96 +
   66.97 +/* Parameter for hwcfg_format_geoid_compt() */
   66.98 +#define GEO_COMPT_MODULE	1
   66.99 +#define GEO_COMPT_SLAB		2
  66.100 +#define GEO_COMPT_IOBUS		3
  66.101 +#define GEO_COMPT_IOSLOT	4
  66.102 +#define GEO_COMPT_CPU		5
  66.103 +#define GEO_COMPT_MEMBUS	6
  66.104 +#define GEO_COMPT_MEMSLOT	7
  66.105 +
  66.106 +#define GEO_INVALID_STR		"<invalid>"
  66.107 +
  66.108 +#define INVALID_NASID           ((nasid_t)-1)
  66.109 +#define INVALID_CNODEID         ((cnodeid_t)-1)
  66.110 +#define INVALID_PNODEID         ((pnodeid_t)-1)
  66.111 +#define INVALID_SLAB            (slabid_t)-1
  66.112 +#define INVALID_SLOT            (slotid_t)-1
  66.113 +#define INVALID_MODULE          ((moduleid_t)-1)
  66.114 +
  66.115 +static inline slabid_t geo_slab(geoid_t g)
  66.116 +{
  66.117 +	return (g.common.type == GEO_TYPE_INVALID) ?
  66.118 +		INVALID_SLAB : g.common.slab;
  66.119 +}
  66.120 +
  66.121 +static inline slotid_t geo_slot(geoid_t g)
  66.122 +{
  66.123 +	return (g.common.type == GEO_TYPE_INVALID) ?
  66.124 +		INVALID_SLOT : g.common.slot;
  66.125 +}
  66.126 +
  66.127 +static inline moduleid_t geo_module(geoid_t g)
  66.128 +{
  66.129 +	return (g.common.type == GEO_TYPE_INVALID) ?
  66.130 +		INVALID_MODULE : g.common.module;
  66.131 +}
  66.132 +
  66.133 +extern geoid_t cnodeid_get_geoid(cnodeid_t cnode);
  66.134 +
  66.135 +#endif /* _ASM_IA64_SN_GEO_H */
    67.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    67.2 +++ b/xen/include/asm-ia64/linux/asm/sn/klconfig.h	Wed Dec 20 14:55:02 2006 -0700
    67.3 @@ -0,0 +1,246 @@
    67.4 +/*
    67.5 + * This file is subject to the terms and conditions of the GNU General Public
    67.6 + * License.  See the file "COPYING" in the main directory of this archive
    67.7 + * for more details.
    67.8 + *
    67.9 + * Derived from IRIX <sys/SN/klconfig.h>.
   67.10 + *
   67.11 + * Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc.  All Rights Reserved.
   67.12 + * Copyright (C) 1999 by Ralf Baechle
   67.13 + */
   67.14 +#ifndef _ASM_IA64_SN_KLCONFIG_H
   67.15 +#define _ASM_IA64_SN_KLCONFIG_H
   67.16 +
   67.17 +/*
   67.18 + * The KLCONFIG structures store info about the various BOARDs found
   67.19 + * during Hardware Discovery. In addition, it stores info about the
   67.20 + * components found on the BOARDs.
   67.21 + */
   67.22 +
   67.23 +typedef s32 klconf_off_t;
   67.24 +
   67.25 +
   67.26 +/* Functions/macros needed to use this structure */
   67.27 +
   67.28 +typedef struct kl_config_hdr {
   67.29 +	char		pad[20];
   67.30 +	klconf_off_t	ch_board_info;	/* the link list of boards */
   67.31 +	char		pad0[88];
   67.32 +} kl_config_hdr_t;
   67.33 +
   67.34 +
   67.35 +#define NODE_OFFSET_TO_LBOARD(nasid,off)        (lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off)))
   67.36 +
   67.37 +/*
   67.38 + * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
   67.39 + * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to 
   67.40 + * the LOCAL/current NODE. REMOTE means it is attached to a different
   67.41 + * node.(TBD - Need a way to treat ROUTER boards.)
   67.42 + *
   67.43 + * There are 2 different structures to represent these boards -
   67.44 + * lboard - Local board, rboard - remote board. These 2 structures
   67.45 + * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
   67.46 + * Figure below). The first byte of the rboard or lboard structure
   67.47 + * is used to find out its type - no unions are used.
   67.48 + * If it is a lboard, then the config info of this board will be found
   67.49 + * on the local node. (LOCAL NODE BASE + offset value gives pointer to 
   67.50 + * the structure.
   67.51 + * If it is a rboard, the local structure contains the node number
   67.52 + * and the offset of the beginning of the LINKED LIST on the remote node.
   67.53 + * The details of the hardware on a remote node can be built locally,
   67.54 + * if required, by reading the LINKED LIST on the remote node and 
   67.55 + * ignoring all the rboards on that node.
   67.56 + *
   67.57 + * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the 
   67.58 + * First board info on the remote node. The remote node list is 
   67.59 + * traversed as the local list, using the REMOTE BASE ADDRESS and not
   67.60 + * the local base address and ignoring all rboard values.
   67.61 + *
   67.62 + * 
   67.63 + KLCONFIG
   67.64 +
   67.65 + +------------+      +------------+      +------------+      +------------+
   67.66 + |  lboard    |  +-->|   lboard   |  +-->|   rboard   |  +-->|   lboard   |
   67.67 + +------------+  |   +------------+  |   +------------+  |   +------------+
   67.68 + | board info |  |   | board info |  |   |errinfo,bptr|  |   | board info |
   67.69 + +------------+  |   +------------+  |   +------------+  |   +------------+
   67.70 + | offset     |--+   |  offset    |--+   |  offset    |--+   |offset=NULL |
   67.71 + +------------+      +------------+      +------------+      +------------+
   67.72 +
   67.73 +
   67.74 + +------------+
   67.75 + | board info |
   67.76 + +------------+       +--------------------------------+
   67.77 + | compt 1    |------>| type, rev, diaginfo, size ...  |  (CPU)
   67.78 + +------------+       +--------------------------------+
   67.79 + | compt 2    |--+
   67.80 + +------------+  |    +--------------------------------+
   67.81 + |  ...       |  +--->| type, rev, diaginfo, size ...  |  (MEM_BANK)
   67.82 + +------------+       +--------------------------------+
   67.83 + | errinfo    |--+
   67.84 + +------------+  |    +--------------------------------+
   67.85 +                 +--->|r/l brd errinfo,compt err flags |
   67.86 +                      +--------------------------------+
   67.87 +
   67.88 + *
   67.89 + * Each BOARD consists of COMPONENTs and the BOARD structure has 
   67.90 + * pointers (offsets) to its COMPONENT structure.
   67.91 + * The COMPONENT structure has version info, size and speed info, revision,
   67.92 + * error info and the NIC info. This structure can accommodate any
   67.93 + * BOARD with arbitrary COMPONENT composition.
   67.94 + *
   67.95 + * The ERRORINFO part of each BOARD has error information
   67.96 + * that describes errors about the BOARD itself. It also has flags to
   67.97 + * indicate the COMPONENT(s) on the board that have errors. The error 
   67.98 + * information specific to the COMPONENT is present in the respective 
   67.99 + * COMPONENT structure.
  67.100 + *
  67.101 + * The ERRORINFO structure is also treated like a COMPONENT, ie. the 
  67.102 + * BOARD has pointers(offset) to the ERRORINFO structure. The rboard
  67.103 + * structure also has a pointer to the ERRORINFO structure. This is 
  67.104 + * the place to store ERRORINFO about a REMOTE NODE, if the HUB on
  67.105 + * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where 
  67.106 + * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
  67.107 + * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info 
  67.108 + * which is present on the REMOTE NODE.(TBD)
  67.109 + * REMOTE ERRINFO can be stored on any of the nearest nodes 
  67.110 + * or on all the nearest nodes.(TBD)
  67.111 + * Like BOARD structures, REMOTE ERRINFO structures can be built locally
  67.112 + * using the rboard errinfo pointer.
  67.113 + *
  67.114 + * In order to get useful information from this Data organization, a set of
  67.115 + * interface routines are provided (TBD). The important thing to remember while
  67.116 + * manipulating the structures, is that, the NODE number information should
  67.117 + * be used. If the NODE is non-zero (remote) then each offset should
  67.118 + * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR. 
  67.119 + * This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
  67.120 + * 
  67.121 + * Note that these structures do not provide much info about connectivity.
  67.122 + * That info will be part of HWGRAPH, which is an extension of the cfg_t
  67.123 + * data structure. (ref IP27prom/cfg.h) It has to be extended to include
  67.124 + * the IO part of the Network(TBD).
  67.125 + *
  67.126 + * The data structures below define the above concepts.
  67.127 + */
  67.128 +
  67.129 +
  67.130 +/*
  67.131 + * BOARD classes
  67.132 + */
  67.133 +
  67.134 +#define KLCLASS_MASK	0xf0   
  67.135 +#define KLCLASS_NONE	0x00
  67.136 +#define KLCLASS_NODE	0x10             /* CPU, Memory and HUB board */
  67.137 +#define KLCLASS_CPU	KLCLASS_NODE	
  67.138 +#define KLCLASS_IO	0x20             /* BaseIO, 4 ch SCSI, ethernet, FDDI 
  67.139 +					    and the non-graphics widget boards */
  67.140 +#define KLCLASS_ROUTER	0x30             /* Router board */
  67.141 +#define KLCLASS_MIDPLANE 0x40            /* We need to treat this as a board
  67.142 +                                            so that we can record error info */
  67.143 +#define KLCLASS_IOBRICK	0x70		/* IP35 iobrick */
  67.144 +#define KLCLASS_MAX	8		/* Bump this if a new CLASS is added */
  67.145 +
  67.146 +#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
  67.147 +
  67.148 +
  67.149 +/*
  67.150 + * board types
  67.151 + */
  67.152 +
  67.153 +#define KLTYPE_MASK	0x0f
  67.154 +#define KLTYPE(_x)      ((_x) & KLTYPE_MASK)
  67.155 +
  67.156 +#define KLTYPE_SNIA	(KLCLASS_CPU | 0x1)
  67.157 +#define KLTYPE_TIO	(KLCLASS_CPU | 0x2)
  67.158 +
  67.159 +#define KLTYPE_ROUTER     (KLCLASS_ROUTER | 0x1)
  67.160 +#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
  67.161 +#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4)
  67.162 +
  67.163 +#define KLTYPE_IOBRICK_XBOW	(KLCLASS_MIDPLANE | 0x2)
  67.164 +
  67.165 +#define KLTYPE_IOBRICK		(KLCLASS_IOBRICK | 0x0)
  67.166 +#define KLTYPE_NBRICK		(KLCLASS_IOBRICK | 0x4)
  67.167 +#define KLTYPE_PXBRICK		(KLCLASS_IOBRICK | 0x6)
  67.168 +#define KLTYPE_IXBRICK		(KLCLASS_IOBRICK | 0x7)
  67.169 +#define KLTYPE_CGBRICK		(KLCLASS_IOBRICK | 0x8)
  67.170 +#define KLTYPE_OPUSBRICK	(KLCLASS_IOBRICK | 0x9)
  67.171 +#define KLTYPE_SABRICK          (KLCLASS_IOBRICK | 0xa)
  67.172 +#define KLTYPE_IABRICK		(KLCLASS_IOBRICK | 0xb)
  67.173 +#define KLTYPE_PABRICK          (KLCLASS_IOBRICK | 0xc)
  67.174 +#define KLTYPE_GABRICK		(KLCLASS_IOBRICK | 0xd)
  67.175 +
  67.176 +
  67.177 +/* 
  67.178 + * board structures
  67.179 + */
  67.180 +
  67.181 +#define MAX_COMPTS_PER_BRD 24
  67.182 +
  67.183 +typedef struct lboard_s {
  67.184 +	klconf_off_t 	brd_next_any;     /* Next BOARD */
  67.185 +	unsigned char 	struct_type;      /* type of structure, local or remote */
  67.186 +	unsigned char 	brd_type;         /* type+class */
  67.187 +	unsigned char 	brd_sversion;     /* version of this structure */
  67.188 +        unsigned char 	brd_brevision;    /* board revision */
  67.189 +        unsigned char 	brd_promver;      /* board prom version, if any */
  67.190 + 	unsigned char 	brd_flags;        /* Enabled, Disabled etc */
  67.191 +	unsigned char 	brd_slot;         /* slot number */
  67.192 +	unsigned short	brd_debugsw;      /* Debug switches */
  67.193 +	geoid_t		brd_geoid;	  /* geo id */
  67.194 +	partid_t 	brd_partition;    /* Partition number */
  67.195 +        unsigned short 	brd_diagval;      /* diagnostic value */
  67.196 +        unsigned short 	brd_diagparm;     /* diagnostic parameter */
  67.197 +        unsigned char 	brd_inventory;    /* inventory history */
  67.198 +        unsigned char 	brd_numcompts;    /* Number of components */
  67.199 +        nic_t         	brd_nic;          /* Number in CAN */
  67.200 +	nasid_t		brd_nasid;        /* passed parameter */
  67.201 +	klconf_off_t 	brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
  67.202 +	klconf_off_t 	brd_errinfo;      /* Board's error information */
  67.203 +	struct lboard_s *brd_parent;	  /* Logical parent for this brd */
  67.204 +	char            pad0[4];
  67.205 +	unsigned char	brd_confidence;	  /* confidence that the board is bad */
  67.206 +	nasid_t		brd_owner;        /* who owns this board */
  67.207 +	unsigned char 	brd_nic_flags;    /* To handle 8 more NICs */
  67.208 +	char		pad1[24];	  /* future expansion */
  67.209 +	char		brd_name[32];
  67.210 +	nasid_t		brd_next_same_host; /* host of next brd w/same nasid */
  67.211 +	klconf_off_t	brd_next_same;    /* Next BOARD with same nasid */
  67.212 +} lboard_t;
  67.213 +
  67.214 +/*
  67.215 + * Generic info structure. This stores common info about a 
  67.216 + * component.
  67.217 + */
  67.218 + 
  67.219 +typedef struct klinfo_s {                  /* Generic info */
  67.220 +        unsigned char   struct_type;       /* type of this structure */
  67.221 +        unsigned char   struct_version;    /* version of this structure */
  67.222 +        unsigned char   flags;            /* Enabled, disabled etc */
  67.223 +        unsigned char   revision;         /* component revision */
  67.224 +        unsigned short  diagval;          /* result of diagnostics */
  67.225 +        unsigned short  diagparm;         /* diagnostic parameter */
  67.226 +        unsigned char   inventory;        /* previous inventory status */
  67.227 +        unsigned short  partid;		   /* widget part number */
  67.228 +	nic_t 		nic;              /* MUst be aligned properly */
  67.229 +        unsigned char   physid;           /* physical id of component */
  67.230 +        unsigned int    virtid;           /* virtual id as seen by system */
  67.231 +	unsigned char	widid;	          /* Widget id - if applicable */
  67.232 +	nasid_t		nasid;            /* node number - from parent */
  67.233 +	char		pad1;		  /* pad out structure. */
  67.234 +	char		pad2;		  /* pad out structure. */
  67.235 +	void		*data;
  67.236 +        klconf_off_t	errinfo;          /* component specific errors */
  67.237 +        unsigned short  pad3;             /* pci fields have moved over to */
  67.238 +        unsigned short  pad4;             /* klbri_t */
  67.239 +} klinfo_t ;
  67.240 +
  67.241 +
  67.242 +static inline lboard_t *find_lboard_next(lboard_t * brd)
  67.243 +{
  67.244 +	if (brd && brd->brd_next_any)
  67.245 +		return NODE_OFFSET_TO_LBOARD(NASID_GET(brd), brd->brd_next_any);
  67.246 +        return NULL;
  67.247 +}
  67.248 +
  67.249 +#endif /* _ASM_IA64_SN_KLCONFIG_H */
    68.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    68.2 +++ b/xen/include/asm-ia64/linux/asm/sn/l1.h	Wed Dec 20 14:55:02 2006 -0700
    68.3 @@ -0,0 +1,51 @@
    68.4 +/*
    68.5 + * This file is subject to the terms and conditions of the GNU General Public
    68.6 + * License.  See the file "COPYING" in the main directory of this archive
    68.7 + * for more details.
    68.8 + *
    68.9 + * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
   68.10 + */
   68.11 +
   68.12 +#ifndef _ASM_IA64_SN_L1_H
   68.13 +#define _ASM_IA64_SN_L1_H
   68.14 +
   68.15 +/* brick type response codes */
   68.16 +#define L1_BRICKTYPE_PX         0x23            /* # */
   68.17 +#define L1_BRICKTYPE_PE         0x25            /* % */
   68.18 +#define L1_BRICKTYPE_N_p0       0x26            /* & */
   68.19 +#define L1_BRICKTYPE_IP45       0x34            /* 4 */
   68.20 +#define L1_BRICKTYPE_IP41       0x35            /* 5 */
   68.21 +#define L1_BRICKTYPE_TWISTER    0x36            /* 6 */ /* IP53 & ROUTER */
   68.22 +#define L1_BRICKTYPE_IX         0x3d            /* = */
   68.23 +#define L1_BRICKTYPE_IP34       0x61            /* a */
   68.24 +#define L1_BRICKTYPE_GA		0x62            /* b */
   68.25 +#define L1_BRICKTYPE_C          0x63            /* c */
   68.26 +#define L1_BRICKTYPE_OPUS_TIO	0x66		/* f */
   68.27 +#define L1_BRICKTYPE_I          0x69            /* i */
   68.28 +#define L1_BRICKTYPE_N          0x6e            /* n */
   68.29 +#define L1_BRICKTYPE_OPUS       0x6f		/* o */
   68.30 +#define L1_BRICKTYPE_P          0x70            /* p */
   68.31 +#define L1_BRICKTYPE_R          0x72            /* r */
   68.32 +#define L1_BRICKTYPE_CHI_CG     0x76            /* v */
   68.33 +#define L1_BRICKTYPE_X          0x78            /* x */
   68.34 +#define L1_BRICKTYPE_X2         0x79            /* y */
   68.35 +#define L1_BRICKTYPE_SA		0x5e            /* ^ */
   68.36 +#define L1_BRICKTYPE_PA		0x6a            /* j */
   68.37 +#define L1_BRICKTYPE_IA		0x6b            /* k */
   68.38 +#define L1_BRICKTYPE_ATHENA	0x2b            /* + */
   68.39 +#define L1_BRICKTYPE_DAYTONA	0x7a            /* z */
   68.40 +#define L1_BRICKTYPE_1932	0x2c		/* . */
   68.41 +#define L1_BRICKTYPE_191010	0x2e		/* , */
   68.42 +
   68.43 +/* board type response codes */
   68.44 +#define L1_BOARDTYPE_IP69       0x0100          /* CA */
   68.45 +#define L1_BOARDTYPE_IP63       0x0200          /* CB */
   68.46 +#define L1_BOARDTYPE_BASEIO     0x0300          /* IB */
   68.47 +#define L1_BOARDTYPE_PCIE2SLOT  0x0400          /* IC */
   68.48 +#define L1_BOARDTYPE_PCIX3SLOT  0x0500          /* ID */
   68.49 +#define L1_BOARDTYPE_PCIXPCIE4SLOT 0x0600       /* IE */
   68.50 +#define L1_BOARDTYPE_ABACUS     0x0700          /* AB */
   68.51 +#define L1_BOARDTYPE_DAYTONA    0x0800          /* AD */
   68.52 +#define L1_BOARDTYPE_INVAL      (-1)            /* invalid brick type */
   68.53 +
   68.54 +#endif /* _ASM_IA64_SN_L1_H */
    69.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    69.2 +++ b/xen/include/asm-ia64/linux/asm/sn/leds.h	Wed Dec 20 14:55:02 2006 -0700
    69.3 @@ -0,0 +1,33 @@
    69.4 +/*
    69.5 + * This file is subject to the terms and conditions of the GNU General Public
    69.6 + * License.  See the file "COPYING" in the main directory of this archive
    69.7 + * for more details.
    69.8 + * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
    69.9 + */
   69.10 +#ifndef _ASM_IA64_SN_LEDS_H
   69.11 +#define _ASM_IA64_SN_LEDS_H
   69.12 +
   69.13 +#include <asm/sn/addrs.h>
   69.14 +#include <asm/sn/pda.h>
   69.15 +#include <asm/sn/shub_mmr.h>
   69.16 +
   69.17 +#define LED0		(LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0))
   69.18 +#define LED_CPU_SHIFT	16
   69.19 +
   69.20 +#define LED_CPU_HEARTBEAT	0x01
   69.21 +#define LED_CPU_ACTIVITY	0x02
   69.22 +#define LED_ALWAYS_SET		0x00
   69.23 +
   69.24 +/*
   69.25 + * Basic macros for flashing the LEDS on an SGI SN.
   69.26 + */
   69.27 +
   69.28 +static __inline__ void
   69.29 +set_led_bits(u8 value, u8 mask)
   69.30 +{
   69.31 +	pda->led_state = (pda->led_state & ~mask) | (value & mask);
   69.32 +	*pda->led_address = (short) pda->led_state;
   69.33 +}
   69.34 +
   69.35 +#endif /* _ASM_IA64_SN_LEDS_H */
   69.36 +
    70.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    70.2 +++ b/xen/include/asm-ia64/linux/asm/sn/module.h	Wed Dec 20 14:55:02 2006 -0700
    70.3 @@ -0,0 +1,127 @@
    70.4 +/*
    70.5 + * This file is subject to the terms and conditions of the GNU General Public
    70.6 + * License.  See the file "COPYING" in the main directory of this archive
    70.7 + * for more details.
    70.8 + *
    70.9 + * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
   70.10 + */
   70.11 +#ifndef _ASM_IA64_SN_MODULE_H
   70.12 +#define _ASM_IA64_SN_MODULE_H
   70.13 +
   70.14 +/* parameter for format_module_id() */
   70.15 +#define MODULE_FORMAT_BRIEF	1
   70.16 +#define MODULE_FORMAT_LONG	2
   70.17 +#define MODULE_FORMAT_LCD	3
   70.18 +
   70.19 +/*
   70.20 + *	Module id format
   70.21 + *
   70.22 + *	31-16	Rack ID (encoded class, group, number - 16-bit unsigned int)
   70.23 + *	 15-8	Brick type (8-bit ascii character)
   70.24 + *	  7-0	Bay (brick position in rack (0-63) - 8-bit unsigned int)
   70.25 + *
   70.26 + */
   70.27 +
   70.28 +/*
   70.29 + * Macros for getting the brick type
   70.30 + */
   70.31 +#define MODULE_BTYPE_MASK	0xff00
   70.32 +#define MODULE_BTYPE_SHFT	8
   70.33 +#define MODULE_GET_BTYPE(_m)	(((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT)
   70.34 +#define MODULE_BT_TO_CHAR(_b)	((char)(_b))
   70.35 +#define MODULE_GET_BTCHAR(_m)	(MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
   70.36 +
   70.37 +/*
   70.38 + * Macros for getting the rack ID.
   70.39 + */
   70.40 +#define MODULE_RACK_MASK	0xffff0000
   70.41 +#define MODULE_RACK_SHFT	16
   70.42 +#define MODULE_GET_RACK(_m)	(((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
   70.43 +
   70.44 +/*
   70.45 + * Macros for getting the brick position
   70.46 + */
   70.47 +#define MODULE_BPOS_MASK	0x00ff
   70.48 +#define MODULE_BPOS_SHFT	0
   70.49 +#define MODULE_GET_BPOS(_m)	(((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
   70.50 +
   70.51 +/*
   70.52 + * Macros for encoding and decoding rack IDs
   70.53 + * A rack number consists of three parts:
   70.54 + *   class (0==CPU/mixed, 1==I/O), group, number
   70.55 + *
   70.56 + * Rack number is stored just as it is displayed on the screen:
   70.57 + * a 3-decimal-digit number.
   70.58 + */
   70.59 +#define RACK_CLASS_DVDR         100
   70.60 +#define RACK_GROUP_DVDR         10
   70.61 +#define RACK_NUM_DVDR           1
   70.62 +
   70.63 +#define RACK_CREATE_RACKID(_c, _g, _n)  ((_c) * RACK_CLASS_DVDR +       \
   70.64 +        (_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR)
   70.65 +
   70.66 +#define RACK_GET_CLASS(_r)              ((_r) / RACK_CLASS_DVDR)
   70.67 +#define RACK_GET_GROUP(_r)              (((_r) - RACK_GET_CLASS(_r) *   \
   70.68 +            RACK_CLASS_DVDR) / RACK_GROUP_DVDR)
   70.69 +#define RACK_GET_NUM(_r)                (((_r) - RACK_GET_CLASS(_r) *   \
   70.70 +            RACK_CLASS_DVDR - RACK_GET_GROUP(_r) *      \
   70.71 +            RACK_GROUP_DVDR) / RACK_NUM_DVDR)
   70.72 +
   70.73 +/*
   70.74 + * Macros for encoding and decoding rack IDs
   70.75 + * A rack number consists of three parts:
   70.76 + *   class      1 bit, 0==CPU/mixed, 1==I/O
   70.77 + *   group      2 bits for CPU/mixed, 3 bits for I/O
   70.78 + *   number     3 bits for CPU/mixed, 2 bits for I/O (1 based)
   70.79 + */
   70.80 +#define RACK_GROUP_BITS(_r)     (RACK_GET_CLASS(_r) ? 3 : 2)
   70.81 +#define RACK_NUM_BITS(_r)       (RACK_GET_CLASS(_r) ? 2 : 3)
   70.82 +
   70.83 +#define RACK_CLASS_MASK(_r)     0x20
   70.84 +#define RACK_CLASS_SHFT(_r)     5
   70.85 +#define RACK_ADD_CLASS(_r, _c)  \
   70.86 +        ((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
   70.87 +
   70.88 +#define RACK_GROUP_SHFT(_r)     RACK_NUM_BITS(_r)
   70.89 +#define RACK_GROUP_MASK(_r)     \
   70.90 +        ( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
   70.91 +#define RACK_ADD_GROUP(_r, _g)  \
   70.92 +        ((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
   70.93 +
   70.94 +#define RACK_NUM_SHFT(_r)       0
   70.95 +#define RACK_NUM_MASK(_r)       \
   70.96 +        ( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
   70.97 +#define RACK_ADD_NUM(_r, _n)    \
   70.98 +        ((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
   70.99 +
  70.100 +
  70.101 +/*
  70.102 + * Brick type definitions
  70.103 + */
  70.104 +#define MAX_BRICK_TYPES         256 /* brick type is stored as uchar */
  70.105 +
  70.106 +extern char brick_types[];
  70.107 +
  70.108 +#define MODULE_CBRICK           0
  70.109 +#define MODULE_RBRICK           1
  70.110 +#define MODULE_IBRICK           2
  70.111 +#define MODULE_KBRICK           3
  70.112 +#define MODULE_XBRICK           4
  70.113 +#define MODULE_DBRICK           5
  70.114 +#define MODULE_PBRICK           6
  70.115 +#define MODULE_NBRICK           7
  70.116 +#define MODULE_PEBRICK          8
  70.117 +#define MODULE_PXBRICK          9
  70.118 +#define MODULE_IXBRICK          10
  70.119 +#define MODULE_CGBRICK		11
  70.120 +#define MODULE_OPUSBRICK        12
  70.121 +#define MODULE_SABRICK		13	/* TIO BringUp Brick */
  70.122 +#define MODULE_IABRICK		14
  70.123 +#define MODULE_PABRICK		15
  70.124 +#define MODULE_GABRICK		16
  70.125 +#define MODULE_OPUS_TIO		17	/* OPUS TIO Riser */
  70.126 +
  70.127 +extern char brick_types[];
  70.128 +extern void format_module_id(char *, moduleid_t, int);
  70.129 +
  70.130 +#endif /* _ASM_IA64_SN_MODULE_H */
    71.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    71.2 +++ b/xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h	Wed Dec 20 14:55:02 2006 -0700
    71.3 @@ -0,0 +1,68 @@
    71.4 +/*
    71.5 + * This file is subject to the terms and conditions of the GNU General Public
    71.6 + * License.  See the file "COPYING" in the main directory of this archive
    71.7 + * for more details.
    71.8 + *
    71.9 + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
   71.10 + */
   71.11 +#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
   71.12 +#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
   71.13 +
   71.14 +/*
   71.15 + * SN pci asic types.  Do not ever renumber these or reuse values.  The
   71.16 + * values must agree with what prom thinks they are.
   71.17 + */
   71.18 +
   71.19 +#define PCIIO_ASIC_TYPE_UNKNOWN	0
   71.20 +#define PCIIO_ASIC_TYPE_PPB	1
   71.21 +#define PCIIO_ASIC_TYPE_PIC	2
   71.22 +#define PCIIO_ASIC_TYPE_TIOCP	3
   71.23 +#define PCIIO_ASIC_TYPE_TIOCA	4
   71.24 +#define PCIIO_ASIC_TYPE_TIOCE	5
   71.25 +
   71.26 +#define PCIIO_ASIC_MAX_TYPES	6
   71.27 +
   71.28 +/*
   71.29 + * Common pciio bus provider data.  There should be one of these as the
   71.30 + * first field in any pciio based provider soft structure (e.g. pcibr_soft
   71.31 + * tioca_soft, etc).
   71.32 + */
   71.33 +
   71.34 +struct pcibus_bussoft {
   71.35 +	u32		bs_asic_type;	/* chipset type */
   71.36 +	u32		bs_xid;		/* xwidget id */
   71.37 +	u32		bs_persist_busnum; /* Persistent Bus Number */
   71.38 +	u32		bs_persist_segment; /* Segment Number */
   71.39 +	u64		bs_legacy_io;	/* legacy io pio addr */
   71.40 +	u64		bs_legacy_mem;	/* legacy mem pio addr */
   71.41 +	u64		bs_base;	/* widget base */
   71.42 +	struct xwidget_info	*bs_xwidget_info;
   71.43 +};
   71.44 +
   71.45 +struct pci_controller;
   71.46 +/*
   71.47 + * SN pci bus indirection
   71.48 + */
   71.49 +
   71.50 +struct sn_pcibus_provider {
   71.51 +	dma_addr_t	(*dma_map)(struct pci_dev *, unsigned long, size_t, int flags);
   71.52 +	dma_addr_t	(*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags);
   71.53 +	void		(*dma_unmap)(struct pci_dev *, dma_addr_t, int);
   71.54 +	void *		(*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
   71.55 + 	void		(*force_interrupt)(struct sn_irq_info *);
   71.56 + 	void		(*target_interrupt)(struct sn_irq_info *);
   71.57 +};
   71.58 +
   71.59 +/*
   71.60 + * Flags used by the map interfaces
   71.61 + * bits 3:0 specifies format of passed in address
   71.62 + * bit  4   specifies that address is to be used for MSI
   71.63 + */
   71.64 +
   71.65 +#define SN_DMA_ADDRTYPE(x)	((x) & 0xf)
   71.66 +#define     SN_DMA_ADDR_PHYS	1	/* address is an xio address. */
   71.67 +#define     SN_DMA_ADDR_XIO	2	/* address is phys memory */
   71.68 +#define SN_DMA_MSI		0x10	/* Bus address is to be used for MSI */
   71.69 +
   71.70 +extern struct sn_pcibus_provider *sn_pci_provider[];
   71.71 +#endif				/* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
    72.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    72.2 +++ b/xen/include/asm-ia64/linux/asm/sn/pcidev.h	Wed Dec 20 14:55:02 2006 -0700
    72.3 @@ -0,0 +1,83 @@
    72.4 +/*
    72.5 + * This file is subject to the terms and conditions of the GNU General Public
    72.6 + * License.  See the file "COPYING" in the main directory of this archive
    72.7 + * for more details.
    72.8 + *
    72.9 + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
   72.10 + */
   72.11 +#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
   72.12 +#define _ASM_IA64_SN_PCI_PCIDEV_H
   72.13 +
   72.14 +#include <linux/pci.h>
   72.15 +
   72.16 +/*
   72.17 + * In ia64, pci_dev->sysdata must be a *pci_controller. To provide access to
   72.18 + * the pcidev_info structs for all devices under a controller, we extend the
   72.19 + * definition of pci_controller, via sn_pci_controller, to include a list
   72.20 + * of pcidev_info.
   72.21 + */
   72.22 +struct sn_pci_controller {
   72.23 +	struct pci_controller pci_controller;
   72.24 +	struct list_head pcidev_info;
   72.25 +};
   72.26 +
   72.27 +#define SN_PCI_CONTROLLER(dev) ((struct sn_pci_controller *) dev->sysdata)
   72.28 +
   72.29 +#define SN_PCIDEV_INFO(dev)	sn_pcidev_info_get(dev)
   72.30 +
   72.31 +#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
   72.32 +	(struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
   72.33 +/*
   72.34 + * Given a pci_bus, return the sn pcibus_bussoft struct.  Note that
   72.35 + * this only works for root busses, not for busses represented by PPB's.
   72.36 + */
   72.37 +
   72.38 +#define SN_PCIBUS_BUSSOFT(pci_bus) \
   72.39 +        ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
   72.40 +
   72.41 +#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
   72.42 +	(struct pcibus_info *)((s