ia64/xen-unstable

changeset 13961:5ad7d9b466ee

[IA64] Import arch/ia64/pci/pci.c into sparse tree

Necessary for paravirtualizing mmap handler for /proc/bus/pci

Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
author awilliam@xenbuild2.aw
date Sun Feb 18 15:57:38 2007 -0700 (2007-02-18)
parents 779d21cf58e7
children 5abf33a383cf
files linux-2.6-xen-sparse/arch/ia64/pci/pci.c
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/ia64/pci/pci.c	Sun Feb 18 15:57:38 2007 -0700
     1.3 @@ -0,0 +1,820 @@
     1.4 +/*
     1.5 + * pci.c - Low-Level PCI Access in IA-64
     1.6 + *
     1.7 + * Derived from bios32.c of i386 tree.
     1.8 + *
     1.9 + * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
    1.10 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    1.11 + *	Bjorn Helgaas <bjorn.helgaas@hp.com>
    1.12 + * Copyright (C) 2004 Silicon Graphics, Inc.
    1.13 + *
    1.14 + * Note: Above list of copyright holders is incomplete...
    1.15 + */
    1.16 +
    1.17 +#include <linux/acpi.h>
    1.18 +#include <linux/types.h>
    1.19 +#include <linux/kernel.h>
    1.20 +#include <linux/pci.h>
    1.21 +#include <linux/init.h>
    1.22 +#include <linux/ioport.h>
    1.23 +#include <linux/slab.h>
    1.24 +#include <linux/smp_lock.h>
    1.25 +#include <linux/spinlock.h>
    1.26 +
    1.27 +#include <asm/machvec.h>
    1.28 +#include <asm/page.h>
    1.29 +#include <asm/system.h>
    1.30 +#include <asm/io.h>
    1.31 +#include <asm/sal.h>
    1.32 +#include <asm/smp.h>
    1.33 +#include <asm/irq.h>
    1.34 +#include <asm/hw_irq.h>
    1.35 +
    1.36 +/*
    1.37 + * Low-level SAL-based PCI configuration access functions. Note that SAL
    1.38 + * calls are already serialized (via sal_lock), so we don't need another
    1.39 + * synchronization mechanism here.
    1.40 + */
    1.41 +
    1.42 +#define PCI_SAL_ADDRESS(seg, bus, devfn, reg)		\
    1.43 +	(((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
    1.44 +
    1.45 +/* SAL 3.2 adds support for extended config space. */
    1.46 +
    1.47 +#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg)	\
    1.48 +	(((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
    1.49 +
    1.50 +static int
    1.51 +pci_sal_read (unsigned int seg, unsigned int bus, unsigned int devfn,
    1.52 +	      int reg, int len, u32 *value)
    1.53 +{
    1.54 +	u64 addr, data = 0;
    1.55 +	int mode, result;
    1.56 +
    1.57 +	if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
    1.58 +		return -EINVAL;
    1.59 +
    1.60 +	if ((seg | reg) <= 255) {
    1.61 +		addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
    1.62 +		mode = 0;
    1.63 +	} else {
    1.64 +		addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
    1.65 +		mode = 1;
    1.66 +	}
    1.67 +	result = ia64_sal_pci_config_read(addr, mode, len, &data);
    1.68 +	if (result != 0)
    1.69 +		return -EINVAL;
    1.70 +
    1.71 +	*value = (u32) data;
    1.72 +	return 0;
    1.73 +}
    1.74 +
    1.75 +static int
    1.76 +pci_sal_write (unsigned int seg, unsigned int bus, unsigned int devfn,
    1.77 +	       int reg, int len, u32 value)
    1.78 +{
    1.79 +	u64 addr;
    1.80 +	int mode, result;
    1.81 +
    1.82 +	if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
    1.83 +		return -EINVAL;
    1.84 +
    1.85 +	if ((seg | reg) <= 255) {
    1.86 +		addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
    1.87 +		mode = 0;
    1.88 +	} else {
    1.89 +		addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
    1.90 +		mode = 1;
    1.91 +	}
    1.92 +	result = ia64_sal_pci_config_write(addr, mode, len, value);
    1.93 +	if (result != 0)
    1.94 +		return -EINVAL;
    1.95 +	return 0;
    1.96 +}
    1.97 +
    1.98 +static struct pci_raw_ops pci_sal_ops = {
    1.99 +	.read =		pci_sal_read,
   1.100 +	.write =	pci_sal_write
   1.101 +};
   1.102 +
   1.103 +struct pci_raw_ops *raw_pci_ops = &pci_sal_ops;
   1.104 +
   1.105 +static int
   1.106 +pci_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
   1.107 +{
   1.108 +	return raw_pci_ops->read(pci_domain_nr(bus), bus->number,
   1.109 +				 devfn, where, size, value);
   1.110 +}
   1.111 +
   1.112 +static int
   1.113 +pci_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
   1.114 +{
   1.115 +	return raw_pci_ops->write(pci_domain_nr(bus), bus->number,
   1.116 +				  devfn, where, size, value);
   1.117 +}
   1.118 +
   1.119 +struct pci_ops pci_root_ops = {
   1.120 +	.read = pci_read,
   1.121 +	.write = pci_write,
   1.122 +};
   1.123 +
   1.124 +/* Called by ACPI when it finds a new root bus.  */
   1.125 +
   1.126 +static struct pci_controller * __devinit
   1.127 +alloc_pci_controller (int seg)
   1.128 +{
   1.129 +	struct pci_controller *controller;
   1.130 +
   1.131 +	controller = kmalloc(sizeof(*controller), GFP_KERNEL);
   1.132 +	if (!controller)
   1.133 +		return NULL;
   1.134 +
   1.135 +	memset(controller, 0, sizeof(*controller));
   1.136 +	controller->segment = seg;
   1.137 +	controller->node = -1;
   1.138 +	return controller;
   1.139 +}
   1.140 +
   1.141 +struct pci_root_info {
   1.142 +	struct pci_controller *controller;
   1.143 +	char *name;
   1.144 +};
   1.145 +
   1.146 +static unsigned int
   1.147 +new_space (u64 phys_base, int sparse)
   1.148 +{
   1.149 +	u64 mmio_base;
   1.150 +	int i;
   1.151 +
   1.152 +	if (phys_base == 0)
   1.153 +		return 0;	/* legacy I/O port space */
   1.154 +
   1.155 +	mmio_base = (u64) ioremap(phys_base, 0);
   1.156 +	for (i = 0; i < num_io_spaces; i++)
   1.157 +		if (io_space[i].mmio_base == mmio_base &&
   1.158 +		    io_space[i].sparse == sparse)
   1.159 +			return i;
   1.160 +
   1.161 +	if (num_io_spaces == MAX_IO_SPACES) {
   1.162 +		printk(KERN_ERR "PCI: Too many IO port spaces "
   1.163 +			"(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
   1.164 +		return ~0;
   1.165 +	}
   1.166 +
   1.167 +	i = num_io_spaces++;
   1.168 +	io_space[i].mmio_base = mmio_base;
   1.169 +	io_space[i].sparse = sparse;
   1.170 +
   1.171 +	return i;
   1.172 +}
   1.173 +
   1.174 +static u64 __devinit
   1.175 +add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
   1.176 +{
   1.177 +	struct resource *resource;
   1.178 +	char *name;
   1.179 +	u64 base, min, max, base_port;
   1.180 +	unsigned int sparse = 0, space_nr, len;
   1.181 +
   1.182 +	resource = kzalloc(sizeof(*resource), GFP_KERNEL);
   1.183 +	if (!resource) {
   1.184 +		printk(KERN_ERR "PCI: No memory for %s I/O port space\n",
   1.185 +			info->name);
   1.186 +		goto out;
   1.187 +	}
   1.188 +
   1.189 +	len = strlen(info->name) + 32;
   1.190 +	name = kzalloc(len, GFP_KERNEL);
   1.191 +	if (!name) {
   1.192 +		printk(KERN_ERR "PCI: No memory for %s I/O port space name\n",
   1.193 +			info->name);
   1.194 +		goto free_resource;
   1.195 +	}
   1.196 +
   1.197 +	min = addr->minimum;
   1.198 +	max = min + addr->address_length - 1;
   1.199 +	if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
   1.200 +		sparse = 1;
   1.201 +
   1.202 +	space_nr = new_space(addr->translation_offset, sparse);
   1.203 +	if (space_nr == ~0)
   1.204 +		goto free_name;
   1.205 +
   1.206 +	base = __pa(io_space[space_nr].mmio_base);
   1.207 +	base_port = IO_SPACE_BASE(space_nr);
   1.208 +	snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->name,
   1.209 +		base_port + min, base_port + max);
   1.210 +
   1.211 +	/*
   1.212 +	 * The SDM guarantees the legacy 0-64K space is sparse, but if the
   1.213 +	 * mapping is done by the processor (not the bridge), ACPI may not
   1.214 +	 * mark it as sparse.
   1.215 +	 */
   1.216 +	if (space_nr == 0)
   1.217 +		sparse = 1;
   1.218 +
   1.219 +	resource->name  = name;
   1.220 +	resource->flags = IORESOURCE_MEM;
   1.221 +	resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
   1.222 +	resource->end   = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
   1.223 +	insert_resource(&iomem_resource, resource);
   1.224 +
   1.225 +	return base_port;
   1.226 +
   1.227 +free_name:
   1.228 +	kfree(name);
   1.229 +free_resource:
   1.230 +	kfree(resource);
   1.231 +out:
   1.232 +	return ~0;
   1.233 +}
   1.234 +
   1.235 +static acpi_status __devinit resource_to_window(struct acpi_resource *resource,
   1.236 +	struct acpi_resource_address64 *addr)
   1.237 +{
   1.238 +	acpi_status status;
   1.239 +
   1.240 +	/*
   1.241 +	 * We're only interested in _CRS descriptors that are
   1.242 +	 *	- address space descriptors for memory or I/O space
   1.243 +	 *	- non-zero size
   1.244 +	 *	- producers, i.e., the address space is routed downstream,
   1.245 +	 *	  not consumed by the bridge itself
   1.246 +	 */
   1.247 +	status = acpi_resource_to_address64(resource, addr);
   1.248 +	if (ACPI_SUCCESS(status) &&
   1.249 +	    (addr->resource_type == ACPI_MEMORY_RANGE ||
   1.250 +	     addr->resource_type == ACPI_IO_RANGE) &&
   1.251 +	    addr->address_length &&
   1.252 +	    addr->producer_consumer == ACPI_PRODUCER)
   1.253 +		return AE_OK;
   1.254 +
   1.255 +	return AE_ERROR;
   1.256 +}
   1.257 +
   1.258 +static acpi_status __devinit
   1.259 +count_window (struct acpi_resource *resource, void *data)
   1.260 +{
   1.261 +	unsigned int *windows = (unsigned int *) data;
   1.262 +	struct acpi_resource_address64 addr;
   1.263 +	acpi_status status;
   1.264 +
   1.265 +	status = resource_to_window(resource, &addr);
   1.266 +	if (ACPI_SUCCESS(status))
   1.267 +		(*windows)++;
   1.268 +
   1.269 +	return AE_OK;
   1.270 +}
   1.271 +
   1.272 +static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
   1.273 +{
   1.274 +	struct pci_root_info *info = data;
   1.275 +	struct pci_window *window;
   1.276 +	struct acpi_resource_address64 addr;
   1.277 +	acpi_status status;
   1.278 +	unsigned long flags, offset = 0;
   1.279 +	struct resource *root;
   1.280 +
   1.281 +	/* Return AE_OK for non-window resources to keep scanning for more */
   1.282 +	status = resource_to_window(res, &addr);
   1.283 +	if (!ACPI_SUCCESS(status))
   1.284 +		return AE_OK;
   1.285 +
   1.286 +	if (addr.resource_type == ACPI_MEMORY_RANGE) {
   1.287 +		flags = IORESOURCE_MEM;
   1.288 +		root = &iomem_resource;
   1.289 +		offset = addr.translation_offset;
   1.290 +	} else if (addr.resource_type == ACPI_IO_RANGE) {
   1.291 +		flags = IORESOURCE_IO;
   1.292 +		root = &ioport_resource;
   1.293 +		offset = add_io_space(info, &addr);
   1.294 +		if (offset == ~0)
   1.295 +			return AE_OK;
   1.296 +	} else
   1.297 +		return AE_OK;
   1.298 +
   1.299 +	window = &info->controller->window[info->controller->windows++];
   1.300 +	window->resource.name = info->name;
   1.301 +	window->resource.flags = flags;
   1.302 +	window->resource.start = addr.minimum + offset;
   1.303 +	window->resource.end = window->resource.start + addr.address_length - 1;
   1.304 +	window->resource.child = NULL;
   1.305 +	window->offset = offset;
   1.306 +
   1.307 +	if (insert_resource(root, &window->resource)) {
   1.308 +		printk(KERN_ERR "alloc 0x%lx-0x%lx from %s for %s failed\n",
   1.309 +			window->resource.start, window->resource.end,
   1.310 +			root->name, info->name);
   1.311 +	}
   1.312 +
   1.313 +	return AE_OK;
   1.314 +}
   1.315 +
   1.316 +static void __devinit
   1.317 +pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
   1.318 +{
   1.319 +	int i, j;
   1.320 +
   1.321 +	j = 0;
   1.322 +	for (i = 0; i < ctrl->windows; i++) {
   1.323 +		struct resource *res = &ctrl->window[i].resource;
   1.324 +		/* HP's firmware has a hack to work around a Windows bug.
   1.325 +		 * Ignore these tiny memory ranges */
   1.326 +		if ((res->flags & IORESOURCE_MEM) &&
   1.327 +		    (res->end - res->start < 16))
   1.328 +			continue;
   1.329 +		if (j >= PCI_BUS_NUM_RESOURCES) {
   1.330 +			printk("Ignoring range [%lx-%lx] (%lx)\n", res->start,
   1.331 +					res->end, res->flags);
   1.332 +			continue;
   1.333 +		}
   1.334 +		bus->resource[j++] = res;
   1.335 +	}
   1.336 +}
   1.337 +
   1.338 +struct pci_bus * __devinit
   1.339 +pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
   1.340 +{
   1.341 +	struct pci_root_info info;
   1.342 +	struct pci_controller *controller;
   1.343 +	unsigned int windows = 0;
   1.344 +	struct pci_bus *pbus;
   1.345 +	char *name;
   1.346 +	int pxm;
   1.347 +
   1.348 +	controller = alloc_pci_controller(domain);
   1.349 +	if (!controller)
   1.350 +		goto out1;
   1.351 +
   1.352 +	controller->acpi_handle = device->handle;
   1.353 +
   1.354 +	pxm = acpi_get_pxm(controller->acpi_handle);
   1.355 +#ifdef CONFIG_NUMA
   1.356 +	if (pxm >= 0)
   1.357 +		controller->node = pxm_to_node(pxm);
   1.358 +#endif
   1.359 +
   1.360 +	acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
   1.361 +			&windows);
   1.362 +	controller->window = kmalloc_node(sizeof(*controller->window) * windows,
   1.363 +			GFP_KERNEL, controller->node);
   1.364 +	if (!controller->window)
   1.365 +		goto out2;
   1.366 +
   1.367 +	name = kmalloc(16, GFP_KERNEL);
   1.368 +	if (!name)
   1.369 +		goto out3;
   1.370 +
   1.371 +	sprintf(name, "PCI Bus %04x:%02x", domain, bus);
   1.372 +	info.controller = controller;
   1.373 +	info.name = name;
   1.374 +	acpi_walk_resources(device->handle, METHOD_NAME__CRS, add_window,
   1.375 +			&info);
   1.376 +
   1.377 +	pbus = pci_scan_bus_parented(NULL, bus, &pci_root_ops, controller);
   1.378 +	if (pbus)
   1.379 +		pcibios_setup_root_windows(pbus, controller);
   1.380 +
   1.381 +	return pbus;
   1.382 +
   1.383 +out3:
   1.384 +	kfree(controller->window);
   1.385 +out2:
   1.386 +	kfree(controller);
   1.387 +out1:
   1.388 +	return NULL;
   1.389 +}
   1.390 +
   1.391 +void pcibios_resource_to_bus(struct pci_dev *dev,
   1.392 +		struct pci_bus_region *region, struct resource *res)
   1.393 +{
   1.394 +	struct pci_controller *controller = PCI_CONTROLLER(dev);
   1.395 +	unsigned long offset = 0;
   1.396 +	int i;
   1.397 +
   1.398 +	for (i = 0; i < controller->windows; i++) {
   1.399 +		struct pci_window *window = &controller->window[i];
   1.400 +		if (!(window->resource.flags & res->flags))
   1.401 +			continue;
   1.402 +		if (window->resource.start > res->start)
   1.403 +			continue;
   1.404 +		if (window->resource.end < res->end)
   1.405 +			continue;
   1.406 +		offset = window->offset;
   1.407 +		break;
   1.408 +	}
   1.409 +
   1.410 +	region->start = res->start - offset;
   1.411 +	region->end = res->end - offset;
   1.412 +}
   1.413 +EXPORT_SYMBOL(pcibios_resource_to_bus);
   1.414 +
   1.415 +void pcibios_bus_to_resource(struct pci_dev *dev,
   1.416 +		struct resource *res, struct pci_bus_region *region)
   1.417 +{
   1.418 +	struct pci_controller *controller = PCI_CONTROLLER(dev);
   1.419 +	unsigned long offset = 0;
   1.420 +	int i;
   1.421 +
   1.422 +	for (i = 0; i < controller->windows; i++) {
   1.423 +		struct pci_window *window = &controller->window[i];
   1.424 +		if (!(window->resource.flags & res->flags))
   1.425 +			continue;
   1.426 +		if (window->resource.start - window->offset > region->start)
   1.427 +			continue;
   1.428 +		if (window->resource.end - window->offset < region->end)
   1.429 +			continue;
   1.430 +		offset = window->offset;
   1.431 +		break;
   1.432 +	}
   1.433 +
   1.434 +	res->start = region->start + offset;
   1.435 +	res->end = region->end + offset;
   1.436 +}
   1.437 +EXPORT_SYMBOL(pcibios_bus_to_resource);
   1.438 +
   1.439 +static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
   1.440 +{
   1.441 +	unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
   1.442 +	struct resource *devr = &dev->resource[idx];
   1.443 +
   1.444 +	if (!dev->bus)
   1.445 +		return 0;
   1.446 +	for (i=0; i<PCI_BUS_NUM_RESOURCES; i++) {
   1.447 +		struct resource *busr = dev->bus->resource[i];
   1.448 +
   1.449 +		if (!busr || ((busr->flags ^ devr->flags) & type_mask))
   1.450 +			continue;
   1.451 +		if ((devr->start) && (devr->start >= busr->start) &&
   1.452 +				(devr->end <= busr->end))
   1.453 +			return 1;
   1.454 +	}
   1.455 +	return 0;
   1.456 +}
   1.457 +
   1.458 +static void __devinit
   1.459 +pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
   1.460 +{
   1.461 +	struct pci_bus_region region;
   1.462 +	int i;
   1.463 +
   1.464 +	for (i = start; i < limit; i++) {
   1.465 +		if (!dev->resource[i].flags)
   1.466 +			continue;
   1.467 +		region.start = dev->resource[i].start;
   1.468 +		region.end = dev->resource[i].end;
   1.469 +		pcibios_bus_to_resource(dev, &dev->resource[i], &region);
   1.470 +		if ((is_valid_resource(dev, i)))
   1.471 +			pci_claim_resource(dev, i);
   1.472 +	}
   1.473 +}
   1.474 +
   1.475 +static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
   1.476 +{
   1.477 +	pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
   1.478 +}
   1.479 +
   1.480 +static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev)
   1.481 +{
   1.482 +	pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
   1.483 +}
   1.484 +
   1.485 +/*
   1.486 + *  Called after each bus is probed, but before its children are examined.
   1.487 + */
   1.488 +void __devinit
   1.489 +pcibios_fixup_bus (struct pci_bus *b)
   1.490 +{
   1.491 +	struct pci_dev *dev;
   1.492 +
   1.493 +	if (b->self) {
   1.494 +		pci_read_bridge_bases(b);
   1.495 +		pcibios_fixup_bridge_resources(b->self);
   1.496 +	}
   1.497 +	list_for_each_entry(dev, &b->devices, bus_list)
   1.498 +		pcibios_fixup_device_resources(dev);
   1.499 +
   1.500 +	return;
   1.501 +}
   1.502 +
   1.503 +void __devinit
   1.504 +pcibios_update_irq (struct pci_dev *dev, int irq)
   1.505 +{
   1.506 +	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
   1.507 +
   1.508 +	/* ??? FIXME -- record old value for shutdown.  */
   1.509 +}
   1.510 +
   1.511 +static inline int
   1.512 +pcibios_enable_resources (struct pci_dev *dev, int mask)
   1.513 +{
   1.514 +	u16 cmd, old_cmd;
   1.515 +	int idx;
   1.516 +	struct resource *r;
   1.517 +	unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
   1.518 +
   1.519 +	if (!dev)
   1.520 +		return -EINVAL;
   1.521 +
   1.522 +	pci_read_config_word(dev, PCI_COMMAND, &cmd);
   1.523 +	old_cmd = cmd;
   1.524 +	for (idx=0; idx<PCI_NUM_RESOURCES; idx++) {
   1.525 +		/* Only set up the desired resources.  */
   1.526 +		if (!(mask & (1 << idx)))
   1.527 +			continue;
   1.528 +
   1.529 +		r = &dev->resource[idx];
   1.530 +		if (!(r->flags & type_mask))
   1.531 +			continue;
   1.532 +		if ((idx == PCI_ROM_RESOURCE) &&
   1.533 +				(!(r->flags & IORESOURCE_ROM_ENABLE)))
   1.534 +			continue;
   1.535 +		if (!r->start && r->end) {
   1.536 +			printk(KERN_ERR
   1.537 +			       "PCI: Device %s not available because of resource collisions\n",
   1.538 +			       pci_name(dev));
   1.539 +			return -EINVAL;
   1.540 +		}
   1.541 +		if (r->flags & IORESOURCE_IO)
   1.542 +			cmd |= PCI_COMMAND_IO;
   1.543 +		if (r->flags & IORESOURCE_MEM)
   1.544 +			cmd |= PCI_COMMAND_MEMORY;
   1.545 +	}
   1.546 +	if (cmd != old_cmd) {
   1.547 +		printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
   1.548 +		pci_write_config_word(dev, PCI_COMMAND, cmd);
   1.549 +	}
   1.550 +	return 0;
   1.551 +}
   1.552 +
   1.553 +int
   1.554 +pcibios_enable_device (struct pci_dev *dev, int mask)
   1.555 +{
   1.556 +	int ret;
   1.557 +
   1.558 +	ret = pcibios_enable_resources(dev, mask);
   1.559 +	if (ret < 0)
   1.560 +		return ret;
   1.561 +
   1.562 +	return acpi_pci_irq_enable(dev);
   1.563 +}
   1.564 +
   1.565 +void
   1.566 +pcibios_disable_device (struct pci_dev *dev)
   1.567 +{
   1.568 +	acpi_pci_irq_disable(dev);
   1.569 +}
   1.570 +
   1.571 +void
   1.572 +pcibios_align_resource (void *data, struct resource *res,
   1.573 +		        resource_size_t size, resource_size_t align)
   1.574 +{
   1.575 +}
   1.576 +
   1.577 +/*
   1.578 + * PCI BIOS setup, always defaults to SAL interface
   1.579 + */
   1.580 +char * __init
   1.581 +pcibios_setup (char *str)
   1.582 +{
   1.583 +	return str;
   1.584 +}
   1.585 +
   1.586 +int
   1.587 +pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
   1.588 +		     enum pci_mmap_state mmap_state, int write_combine)
   1.589 +{
   1.590 +	/*
   1.591 +	 * I/O space cannot be accessed via normal processor loads and
   1.592 +	 * stores on this platform.
   1.593 +	 */
   1.594 +	if (mmap_state == pci_mmap_io)
   1.595 +		/*
   1.596 +		 * XXX we could relax this for I/O spaces for which ACPI
   1.597 +		 * indicates that the space is 1-to-1 mapped.  But at the
   1.598 +		 * moment, we don't support multiple PCI address spaces and
   1.599 +		 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
   1.600 +		 */
   1.601 +		return -EINVAL;
   1.602 +
   1.603 +	/*
   1.604 +	 * Leave vm_pgoff as-is, the PCI space address is the physical
   1.605 +	 * address on this platform.
   1.606 +	 */
   1.607 +	if (write_combine && efi_range_is_wc(vma->vm_start,
   1.608 +					     vma->vm_end - vma->vm_start))
   1.609 +		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
   1.610 +	else
   1.611 +		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
   1.612 +
   1.613 +	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
   1.614 +			     vma->vm_end - vma->vm_start, vma->vm_page_prot))
   1.615 +		return -EAGAIN;
   1.616 +
   1.617 +	return 0;
   1.618 +}
   1.619 +
   1.620 +/**
   1.621 + * ia64_pci_get_legacy_mem - generic legacy mem routine
   1.622 + * @bus: bus to get legacy memory base address for
   1.623 + *
   1.624 + * Find the base of legacy memory for @bus.  This is typically the first
   1.625 + * megabyte of bus address space for @bus or is simply 0 on platforms whose
   1.626 + * chipsets support legacy I/O and memory routing.  Returns the base address
   1.627 + * or an error pointer if an error occurred.
   1.628 + *
   1.629 + * This is the ia64 generic version of this routine.  Other platforms
   1.630 + * are free to override it with a machine vector.
   1.631 + */
   1.632 +char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
   1.633 +{
   1.634 +	return (char *)__IA64_UNCACHED_OFFSET;
   1.635 +}
   1.636 +
   1.637 +/**
   1.638 + * pci_mmap_legacy_page_range - map legacy memory space to userland
   1.639 + * @bus: bus whose legacy space we're mapping
   1.640 + * @vma: vma passed in by mmap
   1.641 + *
   1.642 + * Map legacy memory space for this device back to userspace using a machine
   1.643 + * vector to get the base address.
   1.644 + */
   1.645 +int
   1.646 +pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
   1.647 +{
   1.648 +	unsigned long size = vma->vm_end - vma->vm_start;
   1.649 +	pgprot_t prot;
   1.650 +	char *addr;
   1.651 +
   1.652 +	/*
   1.653 +	 * Avoid attribute aliasing.  See Documentation/ia64/aliasing.txt
   1.654 +	 * for more details.
   1.655 +	 */
   1.656 +	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
   1.657 +		return -EINVAL;
   1.658 +	prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
   1.659 +				    vma->vm_page_prot);
   1.660 +	if (pgprot_val(prot) != pgprot_val(pgprot_noncached(vma->vm_page_prot)))
   1.661 +		return -EINVAL;
   1.662 +
   1.663 +	addr = pci_get_legacy_mem(bus);
   1.664 +	if (IS_ERR(addr))
   1.665 +		return PTR_ERR(addr);
   1.666 +
   1.667 +	vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
   1.668 +	vma->vm_page_prot = prot;
   1.669 +
   1.670 +	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
   1.671 +			    size, vma->vm_page_prot))
   1.672 +		return -EAGAIN;
   1.673 +
   1.674 +	return 0;
   1.675 +}
   1.676 +
   1.677 +/**
   1.678 + * ia64_pci_legacy_read - read from legacy I/O space
   1.679 + * @bus: bus to read
   1.680 + * @port: legacy port value
   1.681 + * @val: caller allocated storage for returned value
   1.682 + * @size: number of bytes to read
   1.683 + *
   1.684 + * Simply reads @size bytes from @port and puts the result in @val.
   1.685 + *
   1.686 + * Again, this (and the write routine) are generic versions that can be
   1.687 + * overridden by the platform.  This is necessary on platforms that don't
   1.688 + * support legacy I/O routing or that hard fail on legacy I/O timeouts.
   1.689 + */
   1.690 +int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
   1.691 +{
   1.692 +	int ret = size;
   1.693 +
   1.694 +	switch (size) {
   1.695 +	case 1:
   1.696 +		*val = inb(port);
   1.697 +		break;
   1.698 +	case 2:
   1.699 +		*val = inw(port);
   1.700 +		break;
   1.701 +	case 4:
   1.702 +		*val = inl(port);
   1.703 +		break;
   1.704 +	default:
   1.705 +		ret = -EINVAL;
   1.706 +		break;
   1.707 +	}
   1.708 +
   1.709 +	return ret;
   1.710 +}
   1.711 +
   1.712 +/**
   1.713 + * ia64_pci_legacy_write - perform a legacy I/O write
   1.714 + * @bus: bus pointer
   1.715 + * @port: port to write
   1.716 + * @val: value to write
   1.717 + * @size: number of bytes to write from @val
   1.718 + *
   1.719 + * Simply writes @size bytes of @val to @port.
   1.720 + */
   1.721 +int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
   1.722 +{
   1.723 +	int ret = size;
   1.724 +
   1.725 +	switch (size) {
   1.726 +	case 1:
   1.727 +		outb(val, port);
   1.728 +		break;
   1.729 +	case 2:
   1.730 +		outw(val, port);
   1.731 +		break;
   1.732 +	case 4:
   1.733 +		outl(val, port);
   1.734 +		break;
   1.735 +	default:
   1.736 +		ret = -EINVAL;
   1.737 +		break;
   1.738 +	}
   1.739 +
   1.740 +	return ret;
   1.741 +}
   1.742 +
   1.743 +/**
   1.744 + * pci_cacheline_size - determine cacheline size for PCI devices
   1.745 + * @dev: void
   1.746 + *
   1.747 + * We want to use the line-size of the outer-most cache.  We assume
   1.748 + * that this line-size is the same for all CPUs.
   1.749 + *
   1.750 + * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
   1.751 + *
   1.752 + * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
   1.753 + */
   1.754 +static unsigned long
   1.755 +pci_cacheline_size (void)
   1.756 +{
   1.757 +	u64 levels, unique_caches;
   1.758 +	s64 status;
   1.759 +	pal_cache_config_info_t cci;
   1.760 +	static u8 cacheline_size;
   1.761 +
   1.762 +	if (cacheline_size)
   1.763 +		return cacheline_size;
   1.764 +
   1.765 +	status = ia64_pal_cache_summary(&levels, &unique_caches);
   1.766 +	if (status != 0) {
   1.767 +		printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
   1.768 +		       __FUNCTION__, status);
   1.769 +		return SMP_CACHE_BYTES;
   1.770 +	}
   1.771 +
   1.772 +	status = ia64_pal_cache_config_info(levels - 1, /* cache_type (data_or_unified)= */ 2,
   1.773 +					    &cci);
   1.774 +	if (status != 0) {
   1.775 +		printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed (status=%ld)\n",
   1.776 +		       __FUNCTION__, status);
   1.777 +		return SMP_CACHE_BYTES;
   1.778 +	}
   1.779 +	cacheline_size = 1 << cci.pcci_line_size;
   1.780 +	return cacheline_size;
   1.781 +}
   1.782 +
   1.783 +/**
   1.784 + * pcibios_prep_mwi - helper function for drivers/pci/pci.c:pci_set_mwi()
   1.785 + * @dev: the PCI device for which MWI is enabled
   1.786 + *
   1.787 + * For ia64, we can get the cacheline sizes from PAL.
   1.788 + *
   1.789 + * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
   1.790 + */
   1.791 +int
   1.792 +pcibios_prep_mwi (struct pci_dev *dev)
   1.793 +{
   1.794 +	unsigned long desired_linesize, current_linesize;
   1.795 +	int rc = 0;
   1.796 +	u8 pci_linesize;
   1.797 +
   1.798 +	desired_linesize = pci_cacheline_size();
   1.799 +
   1.800 +	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &pci_linesize);
   1.801 +	current_linesize = 4 * pci_linesize;
   1.802 +	if (desired_linesize != current_linesize) {
   1.803 +		printk(KERN_WARNING "PCI: slot %s has incorrect PCI cache line size of %lu bytes,",
   1.804 +		       pci_name(dev), current_linesize);
   1.805 +		if (current_linesize > desired_linesize) {
   1.806 +			printk(" expected %lu bytes instead\n", desired_linesize);
   1.807 +			rc = -EINVAL;
   1.808 +		} else {
   1.809 +			printk(" correcting to %lu\n", desired_linesize);
   1.810 +			pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, desired_linesize / 4);
   1.811 +		}
   1.812 +	}
   1.813 +	return rc;
   1.814 +}
   1.815 +
   1.816 +int pci_vector_resources(int last, int nr_released)
   1.817 +{
   1.818 +	int count = nr_released;
   1.819 +
   1.820 +	count += (IA64_LAST_DEVICE_VECTOR - last);
   1.821 +
   1.822 +	return count;
   1.823 +}