direct-io.hg

view xen/drivers/pci/pci.c @ 875:ad4db8b417c1

bitkeeper revision 1.547 (3fa3dd2aH8eamu3ONvYovJgq8wBNbQ)

Many files:
Fixes to the DOM0 interface and domain building code. Ready for new save/restore dom0_ops.
author kaf24@scramble.cl.cam.ac.uk
date Sat Nov 01 16:19:54 2003 +0000 (2003-11-01)
parents b2897c7b8bbf
children c67c82ddb44a
line source
1 /*
2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $
3 *
4 * PCI Bus Services, see include/linux/pci.h for further explanation.
5 *
6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7 * David Mosberger-Tang
8 *
9 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10 */
12 #include <linux/config.h>
13 #include <linux/sched.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 /*#include <linux/kernel.h>*/
17 #include <linux/pci.h>
18 /*#include <linux/string.h>*/
19 #include <linux/init.h>
20 #include <linux/slab.h>
21 #include <linux/ioport.h>
22 #include <linux/spinlock.h>
23 /*#include <linux/pm.h>*/
24 /*#include <linux/kmod.h>*/ /* for hotplug_path */
25 /*#include <linux/bitops.h>*/
26 #include <linux/delay.h>
27 #include <linux/cache.h>
29 #include <asm/page.h>
30 /*#include <asm/dma.h>*/ /* isa_dma_bridge_buggy */
32 #undef DEBUG
34 #ifdef DEBUG
35 #define DBG(x...) printk(x)
36 #else
37 #define DBG(x...)
38 #endif
40 LIST_HEAD(pci_root_buses);
41 LIST_HEAD(pci_devices);
43 /**
44 * pci_find_slot - locate PCI device from a given PCI slot
45 * @bus: number of PCI bus on which desired PCI device resides
46 * @devfn: encodes number of PCI slot in which the desired PCI
47 * device resides and the logical device number within that slot
48 * in case of multi-function devices.
49 *
50 * Given a PCI bus and slot/function number, the desired PCI device
51 * is located in system global list of PCI devices. If the device
52 * is found, a pointer to its data structure is returned. If no
53 * device is found, %NULL is returned.
54 */
55 struct pci_dev *
56 pci_find_slot(unsigned int bus, unsigned int devfn)
57 {
58 struct pci_dev *dev;
60 pci_for_each_dev(dev) {
61 if (dev->bus->number == bus && dev->devfn == devfn)
62 return dev;
63 }
64 return NULL;
65 }
67 /**
68 * pci_find_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
69 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
70 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
71 * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids
72 * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids
73 * @from: Previous PCI device found in search, or %NULL for new search.
74 *
75 * Iterates through the list of known PCI devices. If a PCI device is
76 * found with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its
77 * device structure is returned. Otherwise, %NULL is returned.
78 * A new search is initiated by passing %NULL to the @from argument.
79 * Otherwise if @from is not %NULL, searches continue from next device on the global list.
80 */
81 struct pci_dev *
82 pci_find_subsys(unsigned int vendor, unsigned int device,
83 unsigned int ss_vendor, unsigned int ss_device,
84 const struct pci_dev *from)
85 {
86 struct list_head *n = from ? from->global_list.next : pci_devices.next;
88 while (n != &pci_devices) {
89 struct pci_dev *dev = pci_dev_g(n);
90 if ((vendor == PCI_ANY_ID || dev->vendor == vendor) &&
91 (device == PCI_ANY_ID || dev->device == device) &&
92 (ss_vendor == PCI_ANY_ID || dev->subsystem_vendor == ss_vendor) &&
93 (ss_device == PCI_ANY_ID || dev->subsystem_device == ss_device))
94 return dev;
95 n = n->next;
96 }
97 return NULL;
98 }
101 /**
102 * pci_find_device - begin or continue searching for a PCI device by vendor/device id
103 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
104 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
105 * @from: Previous PCI device found in search, or %NULL for new search.
106 *
107 * Iterates through the list of known PCI devices. If a PCI device is
108 * found with a matching @vendor and @device, a pointer to its device structure is
109 * returned. Otherwise, %NULL is returned.
110 * A new search is initiated by passing %NULL to the @from argument.
111 * Otherwise if @from is not %NULL, searches continue from next device on the global list.
112 */
113 struct pci_dev *
114 pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from)
115 {
116 return pci_find_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
117 }
120 /**
121 * pci_find_class - begin or continue searching for a PCI device by class
122 * @class: search for a PCI device with this class designation
123 * @from: Previous PCI device found in search, or %NULL for new search.
124 *
125 * Iterates through the list of known PCI devices. If a PCI device is
126 * found with a matching @class, a pointer to its device structure is
127 * returned. Otherwise, %NULL is returned.
128 * A new search is initiated by passing %NULL to the @from argument.
129 * Otherwise if @from is not %NULL, searches continue from next device
130 * on the global list.
131 */
132 struct pci_dev *
133 pci_find_class(unsigned int class, const struct pci_dev *from)
134 {
135 struct list_head *n = from ? from->global_list.next : pci_devices.next;
137 while (n != &pci_devices) {
138 struct pci_dev *dev = pci_dev_g(n);
139 if (dev->class == class)
140 return dev;
141 n = n->next;
142 }
143 return NULL;
144 }
146 /**
147 * pci_find_capability - query for devices' capabilities
148 * @dev: PCI device to query
149 * @cap: capability code
150 *
151 * Tell if a device supports a given PCI capability.
152 * Returns the address of the requested capability structure within the
153 * device's PCI configuration space or 0 in case the device does not
154 * support it. Possible values for @cap:
155 *
156 * %PCI_CAP_ID_PM Power Management
157 *
158 * %PCI_CAP_ID_AGP Accelerated Graphics Port
159 *
160 * %PCI_CAP_ID_VPD Vital Product Data
161 *
162 * %PCI_CAP_ID_SLOTID Slot Identification
163 *
164 * %PCI_CAP_ID_MSI Message Signalled Interrupts
165 *
166 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
167 *
168 * %PCI_CAP_ID_PCIX PCI-X
169 */
170 int
171 pci_find_capability(struct pci_dev *dev, int cap)
172 {
173 u16 status;
174 u8 pos, id;
175 int ttl = 48;
177 pci_read_config_word(dev, PCI_STATUS, &status);
178 if (!(status & PCI_STATUS_CAP_LIST))
179 return 0;
180 switch (dev->hdr_type) {
181 case PCI_HEADER_TYPE_NORMAL:
182 case PCI_HEADER_TYPE_BRIDGE:
183 pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &pos);
184 break;
185 case PCI_HEADER_TYPE_CARDBUS:
186 pci_read_config_byte(dev, PCI_CB_CAPABILITY_LIST, &pos);
187 break;
188 default:
189 return 0;
190 }
191 while (ttl-- && pos >= 0x40) {
192 pos &= ~3;
193 pci_read_config_byte(dev, pos + PCI_CAP_LIST_ID, &id);
194 if (id == 0xff)
195 break;
196 if (id == cap)
197 return pos;
198 pci_read_config_byte(dev, pos + PCI_CAP_LIST_NEXT, &pos);
199 }
200 return 0;
201 }
204 /**
205 * pci_find_parent_resource - return resource region of parent bus of given region
206 * @dev: PCI device structure contains resources to be searched
207 * @res: child resource record for which parent is sought
208 *
209 * For given resource region of given device, return the resource
210 * region of parent bus the given region is contained in or where
211 * it should be allocated from.
212 */
213 struct resource *
214 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
215 {
216 const struct pci_bus *bus = dev->bus;
217 int i;
218 struct resource *best = NULL;
220 for(i=0; i<4; i++) {
221 struct resource *r = bus->resource[i];
222 if (!r)
223 continue;
224 if (res->start && !(res->start >= r->start && res->end <= r->end))
225 continue; /* Not contained */
226 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
227 continue; /* Wrong type */
228 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
229 return r; /* Exact match */
230 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
231 best = r; /* Approximating prefetchable by non-prefetchable */
232 }
233 return best;
234 }
236 /**
237 * pci_set_power_state - Set the power state of a PCI device
238 * @dev: PCI device to be suspended
239 * @state: Power state we're entering
240 *
241 * Transition a device to a new power state, using the Power Management
242 * Capabilities in the device's config space.
243 *
244 * RETURN VALUE:
245 * -EINVAL if trying to enter a lower state than we're already in.
246 * 0 if we're already in the requested state.
247 * -EIO if device does not support PCI PM.
248 * 0 if we can successfully change the power state.
249 */
251 int
252 pci_set_power_state(struct pci_dev *dev, int state)
253 {
254 int pm;
255 u16 pmcsr;
257 /* bound the state we're entering */
258 if (state > 3) state = 3;
260 /* Validate current state:
261 * Can enter D0 from any state, but if we can only go deeper
262 * to sleep if we're already in a low power state
263 */
264 if (state > 0 && dev->current_state > state)
265 return -EINVAL;
266 else if (dev->current_state == state)
267 return 0; /* we're already there */
269 /* find PCI PM capability in list */
270 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
272 /* abort if the device doesn't support PM capabilities */
273 if (!pm) return -EIO;
275 /* check if this device supports the desired state */
276 if (state == 1 || state == 2) {
277 u16 pmc;
278 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
279 if (state == 1 && !(pmc & PCI_PM_CAP_D1)) return -EIO;
280 else if (state == 2 && !(pmc & PCI_PM_CAP_D2)) return -EIO;
281 }
283 /* If we're in D3, force entire word to 0.
284 * This doesn't affect PME_Status, disables PME_En, and
285 * sets PowerState to 0.
286 */
287 if (dev->current_state >= 3)
288 pmcsr = 0;
289 else {
290 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
291 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
292 pmcsr |= state;
293 }
295 /* enter specified state */
296 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
298 /* Mandatory power management transition delays */
299 /* see PCI PM 1.1 5.6.1 table 18 */
300 if(state == 3 || dev->current_state == 3)
301 {
302 set_current_state(TASK_UNINTERRUPTIBLE);
303 schedule_timeout(HZ/100);
304 }
305 else if(state == 2 || dev->current_state == 2)
306 udelay(200);
307 dev->current_state = state;
309 return 0;
310 }
312 /**
313 * pci_save_state - save the PCI configuration space of a device before suspending
314 * @dev: - PCI device that we're dealing with
315 * @buffer: - buffer to hold config space context
316 *
317 * @buffer must be large enough to hold the entire PCI 2.2 config space
318 * (>= 64 bytes).
319 */
320 int
321 pci_save_state(struct pci_dev *dev, u32 *buffer)
322 {
323 int i;
324 if (buffer) {
325 /* XXX: 100% dword access ok here? */
326 for (i = 0; i < 16; i++)
327 pci_read_config_dword(dev, i * 4,&buffer[i]);
328 }
329 return 0;
330 }
332 /**
333 * pci_restore_state - Restore the saved state of a PCI device
334 * @dev: - PCI device that we're dealing with
335 * @buffer: - saved PCI config space
336 *
337 */
338 int
339 pci_restore_state(struct pci_dev *dev, u32 *buffer)
340 {
341 int i;
343 if (buffer) {
344 for (i = 0; i < 16; i++)
345 pci_write_config_dword(dev,i * 4, buffer[i]);
346 }
347 /*
348 * otherwise, write the context information we know from bootup.
349 * This works around a problem where warm-booting from Windows
350 * combined with a D3(hot)->D0 transition causes PCI config
351 * header data to be forgotten.
352 */
353 else {
354 for (i = 0; i < 6; i ++)
355 pci_write_config_dword(dev,
356 PCI_BASE_ADDRESS_0 + (i * 4),
357 dev->resource[i].start);
358 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
359 }
360 return 0;
361 }
363 /**
364 * pci_enable_device_bars - Initialize some of a device for use
365 * @dev: PCI device to be initialized
366 * @bars: bitmask of BAR's that must be configured
367 *
368 * Initialize device before it's used by a driver. Ask low-level code
369 * to enable selected I/O and memory resources. Wake up the device if it
370 * was suspended. Beware, this function can fail.
371 */
373 int
374 pci_enable_device_bars(struct pci_dev *dev, int bars)
375 {
376 int err;
378 pci_set_power_state(dev, 0);
379 if ((err = pcibios_enable_device(dev, bars)) < 0)
380 return err;
381 return 0;
382 }
384 /**
385 * pci_enable_device - Initialize device before it's used by a driver.
386 * @dev: PCI device to be initialized
387 *
388 * Initialize device before it's used by a driver. Ask low-level code
389 * to enable I/O and memory. Wake up the device if it was suspended.
390 * Beware, this function can fail.
391 */
392 int
393 pci_enable_device(struct pci_dev *dev)
394 {
395 return pci_enable_device_bars(dev, 0x3F);
396 }
398 /**
399 * pci_disable_device - Disable PCI device after use
400 * @dev: PCI device to be disabled
401 *
402 * Signal to the system that the PCI device is not in use by the system
403 * anymore. This only involves disabling PCI bus-mastering, if active.
404 */
405 void
406 pci_disable_device(struct pci_dev *dev)
407 {
408 u16 pci_command;
410 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
411 if (pci_command & PCI_COMMAND_MASTER) {
412 pci_command &= ~PCI_COMMAND_MASTER;
413 pci_write_config_word(dev, PCI_COMMAND, pci_command);
414 }
415 }
417 /**
418 * pci_enable_wake - enable device to generate PME# when suspended
419 * @dev: - PCI device to operate on
420 * @state: - Current state of device.
421 * @enable: - Flag to enable or disable generation
422 *
423 * Set the bits in the device's PM Capabilities to generate PME# when
424 * the system is suspended.
425 *
426 * -EIO is returned if device doesn't have PM Capabilities.
427 * -EINVAL is returned if device supports it, but can't generate wake events.
428 * 0 if operation is successful.
429 *
430 */
431 int pci_enable_wake(struct pci_dev *dev, u32 state, int enable)
432 {
433 int pm;
434 u16 value;
436 /* find PCI PM capability in list */
437 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
439 /* If device doesn't support PM Capabilities, but request is to disable
440 * wake events, it's a nop; otherwise fail */
441 if (!pm)
442 return enable ? -EIO : 0;
444 /* Check device's ability to generate PME# */
445 pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
447 value &= PCI_PM_CAP_PME_MASK;
448 value >>= ffs(value); /* First bit of mask */
450 /* Check if it can generate PME# from requested state. */
451 if (!value || !(value & (1 << state)))
452 return enable ? -EINVAL : 0;
454 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
456 /* Clear PME_Status by writing 1 to it and enable PME# */
457 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
459 if (!enable)
460 value &= ~PCI_PM_CTRL_PME_ENABLE;
462 pci_write_config_word(dev, pm + PCI_PM_CTRL, value);
464 return 0;
465 }
467 int
468 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
469 {
470 u8 pin;
472 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
473 if (!pin)
474 return -1;
475 pin--;
476 while (dev->bus->self) {
477 pin = (pin + PCI_SLOT(dev->devfn)) % 4;
478 dev = dev->bus->self;
479 }
480 *bridge = dev;
481 return pin;
482 }
484 /**
485 * pci_release_region - Release a PCI bar
486 * @pdev: PCI device whose resources were previously reserved by pci_request_region
487 * @bar: BAR to release
488 *
489 * Releases the PCI I/O and memory resources previously reserved by a
490 * successful call to pci_request_region. Call this function only
491 * after all use of the PCI regions has ceased.
492 */
493 void pci_release_region(struct pci_dev *pdev, int bar)
494 {
495 if (pci_resource_len(pdev, bar) == 0)
496 return;
497 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
498 release_region(pci_resource_start(pdev, bar),
499 pci_resource_len(pdev, bar));
500 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
501 release_mem_region(pci_resource_start(pdev, bar),
502 pci_resource_len(pdev, bar));
503 }
505 /**
506 * pci_request_region - Reserved PCI I/O and memory resource
507 * @pdev: PCI device whose resources are to be reserved
508 * @bar: BAR to be reserved
509 * @res_name: Name to be associated with resource.
510 *
511 * Mark the PCI region associated with PCI device @pdev BR @bar as
512 * being reserved by owner @res_name. Do not access any
513 * address inside the PCI regions unless this call returns
514 * successfully.
515 *
516 * Returns 0 on success, or %EBUSY on error. A warning
517 * message is also printed on failure.
518 */
519 int pci_request_region(struct pci_dev *pdev, int bar, char *res_name)
520 {
521 if (pci_resource_len(pdev, bar) == 0)
522 return 0;
524 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
525 if (!request_region(pci_resource_start(pdev, bar),
526 pci_resource_len(pdev, bar), res_name))
527 goto err_out;
528 }
529 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
530 if (!request_mem_region(pci_resource_start(pdev, bar),
531 pci_resource_len(pdev, bar), res_name))
532 goto err_out;
533 }
535 return 0;
537 err_out:
538 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
539 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
540 bar + 1, /* PCI BAR # */
541 pci_resource_len(pdev, bar), pci_resource_start(pdev, bar),
542 pdev->slot_name);
543 return -EBUSY;
544 }
547 /**
548 * pci_release_regions - Release reserved PCI I/O and memory resources
549 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
550 *
551 * Releases all PCI I/O and memory resources previously reserved by a
552 * successful call to pci_request_regions. Call this function only
553 * after all use of the PCI regions has ceased.
554 */
556 void pci_release_regions(struct pci_dev *pdev)
557 {
558 int i;
560 for (i = 0; i < 6; i++)
561 pci_release_region(pdev, i);
562 }
564 /**
565 * pci_request_regions - Reserved PCI I/O and memory resources
566 * @pdev: PCI device whose resources are to be reserved
567 * @res_name: Name to be associated with resource.
568 *
569 * Mark all PCI regions associated with PCI device @pdev as
570 * being reserved by owner @res_name. Do not access any
571 * address inside the PCI regions unless this call returns
572 * successfully.
573 *
574 * Returns 0 on success, or %EBUSY on error. A warning
575 * message is also printed on failure.
576 */
577 int pci_request_regions(struct pci_dev *pdev, char *res_name)
578 {
579 int i;
581 for (i = 0; i < 6; i++)
582 if(pci_request_region(pdev, i, res_name))
583 goto err_out;
584 return 0;
586 err_out:
587 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
588 pci_resource_flags(pdev, i) & IORESOURCE_IO ? "I/O" : "mem",
589 i + 1, /* PCI BAR # */
590 pci_resource_len(pdev, i), pci_resource_start(pdev, i),
591 pdev->slot_name);
592 while(--i >= 0)
593 pci_release_region(pdev, i);
595 return -EBUSY;
596 }
599 /*
600 * Registration of PCI drivers and handling of hot-pluggable devices.
601 */
603 static LIST_HEAD(pci_drivers);
605 /**
606 * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
607 * @ids: array of PCI device id structures to search in
608 * @dev: the PCI device structure to match against
609 *
610 * Used by a driver to check whether a PCI device present in the
611 * system is in its list of supported devices.Returns the matching
612 * pci_device_id structure or %NULL if there is no match.
613 */
614 const struct pci_device_id *
615 pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev)
616 {
617 while (ids->vendor || ids->subvendor || ids->class_mask) {
618 if ((ids->vendor == PCI_ANY_ID || ids->vendor == dev->vendor) &&
619 (ids->device == PCI_ANY_ID || ids->device == dev->device) &&
620 (ids->subvendor == PCI_ANY_ID || ids->subvendor == dev->subsystem_vendor) &&
621 (ids->subdevice == PCI_ANY_ID || ids->subdevice == dev->subsystem_device) &&
622 !((ids->class ^ dev->class) & ids->class_mask))
623 return ids;
624 ids++;
625 }
626 return NULL;
627 }
629 static int
630 pci_announce_device(struct pci_driver *drv, struct pci_dev *dev)
631 {
632 const struct pci_device_id *id;
633 int ret = 0;
635 if (drv->id_table) {
636 id = pci_match_device(drv->id_table, dev);
637 if (!id) {
638 ret = 0;
639 goto out;
640 }
641 } else
642 id = NULL;
644 dev_probe_lock();
645 if (drv->probe(dev, id) >= 0) {
646 dev->driver = drv;
647 ret = 1;
648 }
649 dev_probe_unlock();
650 out:
651 return ret;
652 }
654 /**
655 * pci_register_driver - register a new pci driver
656 * @drv: the driver structure to register
657 *
658 * Adds the driver structure to the list of registered drivers
659 * Returns the number of pci devices which were claimed by the driver
660 * during registration. The driver remains registered even if the
661 * return value is zero.
662 */
663 int
664 pci_register_driver(struct pci_driver *drv)
665 {
666 struct pci_dev *dev;
667 int count = 0;
669 list_add_tail(&drv->node, &pci_drivers);
670 pci_for_each_dev(dev) {
671 if (!pci_dev_driver(dev))
672 count += pci_announce_device(drv, dev);
673 }
674 return count;
675 }
677 /**
678 * pci_unregister_driver - unregister a pci driver
679 * @drv: the driver structure to unregister
680 *
681 * Deletes the driver structure from the list of registered PCI drivers,
682 * gives it a chance to clean up by calling its remove() function for
683 * each device it was responsible for, and marks those devices as
684 * driverless.
685 */
687 void
688 pci_unregister_driver(struct pci_driver *drv)
689 {
690 struct pci_dev *dev;
692 list_del(&drv->node);
693 pci_for_each_dev(dev) {
694 if (dev->driver == drv) {
695 if (drv->remove)
696 drv->remove(dev);
697 dev->driver = NULL;
698 }
699 }
700 }
702 #ifdef CONFIG_HOTPLUG
704 #ifndef FALSE
705 #define FALSE (0)
706 #define TRUE (!FALSE)
707 #endif
709 static void
710 run_sbin_hotplug(struct pci_dev *pdev, int insert)
711 {
712 int i;
713 char *argv[3], *envp[8];
714 char id[20], sub_id[24], bus_id[24], class_id[20];
716 if (!hotplug_path[0])
717 return;
719 sprintf(class_id, "PCI_CLASS=%04X", pdev->class);
720 sprintf(id, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device);
721 sprintf(sub_id, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor, pdev->subsystem_device);
722 sprintf(bus_id, "PCI_SLOT_NAME=%s", pdev->slot_name);
724 i = 0;
725 argv[i++] = hotplug_path;
726 argv[i++] = "pci";
727 argv[i] = 0;
729 i = 0;
730 /* minimal command environment */
731 envp[i++] = "HOME=/";
732 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
734 /* other stuff we want to pass to /sbin/hotplug */
735 envp[i++] = class_id;
736 envp[i++] = id;
737 envp[i++] = sub_id;
738 envp[i++] = bus_id;
739 if (insert)
740 envp[i++] = "ACTION=add";
741 else
742 envp[i++] = "ACTION=remove";
743 envp[i] = 0;
745 call_usermodehelper (argv [0], argv, envp);
746 }
748 /**
749 * pci_announce_device_to_drivers - tell the drivers a new device has appeared
750 * @dev: the device that has shown up
751 *
752 * Notifys the drivers that a new device has appeared, and also notifys
753 * userspace through /sbin/hotplug.
754 */
755 void
756 pci_announce_device_to_drivers(struct pci_dev *dev)
757 {
758 struct list_head *ln;
760 for(ln=pci_drivers.next; ln != &pci_drivers; ln=ln->next) {
761 struct pci_driver *drv = list_entry(ln, struct pci_driver, node);
762 if (drv->remove && pci_announce_device(drv, dev))
763 break;
764 }
766 /* notify userspace of new hotplug device */
767 run_sbin_hotplug(dev, TRUE);
768 }
770 /**
771 * pci_insert_device - insert a hotplug device
772 * @dev: the device to insert
773 * @bus: where to insert it
774 *
775 * Add a new device to the device lists and notify userspace (/sbin/hotplug).
776 */
777 void
778 pci_insert_device(struct pci_dev *dev, struct pci_bus *bus)
779 {
780 list_add_tail(&dev->bus_list, &bus->devices);
781 list_add_tail(&dev->global_list, &pci_devices);
782 #ifdef CONFIG_PROC_FS
783 pci_proc_attach_device(dev);
784 #endif
785 pci_announce_device_to_drivers(dev);
786 }
788 static void
789 pci_free_resources(struct pci_dev *dev)
790 {
791 int i;
793 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
794 struct resource *res = dev->resource + i;
795 if (res->parent)
796 release_resource(res);
797 }
798 }
800 /**
801 * pci_remove_device - remove a hotplug device
802 * @dev: the device to remove
803 *
804 * Delete the device structure from the device lists and
805 * notify userspace (/sbin/hotplug).
806 */
807 void
808 pci_remove_device(struct pci_dev *dev)
809 {
810 if (dev->driver) {
811 if (dev->driver->remove)
812 dev->driver->remove(dev);
813 dev->driver = NULL;
814 }
815 list_del(&dev->bus_list);
816 list_del(&dev->global_list);
817 pci_free_resources(dev);
818 #ifdef CONFIG_PROC_FS
819 pci_proc_detach_device(dev);
820 #endif
822 /* notify userspace of hotplug device removal */
823 run_sbin_hotplug(dev, FALSE);
824 }
826 #endif
828 static struct pci_driver pci_compat_driver = {
829 name: "compat"
830 };
832 /**
833 * pci_dev_driver - get the pci_driver of a device
834 * @dev: the device to query
835 *
836 * Returns the appropriate pci_driver structure or %NULL if there is no
837 * registered driver for the device.
838 */
839 struct pci_driver *
840 pci_dev_driver(const struct pci_dev *dev)
841 {
842 if (dev->driver)
843 return dev->driver;
844 else {
845 int i;
846 for(i=0; i<=PCI_ROM_RESOURCE; i++)
847 if (dev->resource[i].flags & IORESOURCE_BUSY)
848 return &pci_compat_driver;
849 }
850 return NULL;
851 }
854 /*
855 * This interrupt-safe spinlock protects all accesses to PCI
856 * configuration space.
857 */
859 static spinlock_t pci_lock = SPIN_LOCK_UNLOCKED;
861 /*
862 * Wrappers for all PCI configuration access functions. They just check
863 * alignment, do locking and call the low-level functions pointed to
864 * by pci_dev->ops.
865 */
867 #define PCI_byte_BAD 0
868 #define PCI_word_BAD (pos & 1)
869 #define PCI_dword_BAD (pos & 3)
871 #define PCI_OP(rw,size,type) \
872 int pci_##rw##_config_##size (struct pci_dev *dev, int pos, type value) \
873 { \
874 int res; \
875 unsigned long flags; \
876 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
877 spin_lock_irqsave(&pci_lock, flags); \
878 res = dev->bus->ops->rw##_##size(dev, pos, value); \
879 spin_unlock_irqrestore(&pci_lock, flags); \
880 return res; \
881 }
883 PCI_OP(read, byte, u8 *)
884 PCI_OP(read, word, u16 *)
885 PCI_OP(read, dword, u32 *)
886 PCI_OP(write, byte, u8)
887 PCI_OP(write, word, u16)
888 PCI_OP(write, dword, u32)
890 /**
891 * pci_set_master - enables bus-mastering for device dev
892 * @dev: the PCI device to enable
893 *
894 * Enables bus-mastering on the device and calls pcibios_set_master()
895 * to do the needed arch specific settings.
896 */
897 void
898 pci_set_master(struct pci_dev *dev)
899 {
900 u16 cmd;
902 pci_read_config_word(dev, PCI_COMMAND, &cmd);
903 if (! (cmd & PCI_COMMAND_MASTER)) {
904 DBG("PCI: Enabling bus mastering for device %s\n", dev->slot_name);
905 cmd |= PCI_COMMAND_MASTER;
906 pci_write_config_word(dev, PCI_COMMAND, cmd);
907 }
908 pcibios_set_master(dev);
909 }
911 /**
912 * pdev_set_mwi - arch helper function for pcibios_set_mwi
913 * @dev: the PCI device for which MWI is enabled
914 *
915 * Helper function for implementation the arch-specific pcibios_set_mwi
916 * function. Originally copied from drivers/net/acenic.c.
917 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
918 *
919 * RETURNS: An appriopriate -ERRNO error value on eror, or zero for success.
920 */
921 int
922 pdev_set_mwi(struct pci_dev *dev)
923 {
924 int rc = 0;
925 u8 cache_size;
927 /*
928 * Looks like this is necessary to deal with on all architectures,
929 * even this %$#%$# N440BX Intel based thing doesn't get it right.
930 * Ie. having two NICs in the machine, one will have the cache
931 * line set at boot time, the other will not.
932 */
933 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cache_size);
934 cache_size <<= 2;
935 if (cache_size != SMP_CACHE_BYTES) {
936 printk(KERN_WARNING "PCI: %s PCI cache line size set incorrectly (%i bytes) by BIOS/FW.\n",
937 dev->slot_name, cache_size);
938 if (cache_size > SMP_CACHE_BYTES) {
939 printk("PCI: %s cache line size too large - expecting %i.\n", dev->slot_name, SMP_CACHE_BYTES);
940 rc = -EINVAL;
941 } else {
942 printk("PCI: %s PCI cache line size corrected to %i.\n", dev->slot_name, SMP_CACHE_BYTES);
943 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
944 SMP_CACHE_BYTES >> 2);
945 }
946 }
948 return rc;
949 }
951 /**
952 * pci_set_mwi - enables memory-write-invalidate PCI transaction
953 * @dev: the PCI device for which MWI is enabled
954 *
955 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND,
956 * and then calls @pcibios_set_mwi to do the needed arch specific
957 * operations or a generic mwi-prep function.
958 *
959 * RETURNS: An appriopriate -ERRNO error value on eror, or zero for success.
960 */
961 int
962 pci_set_mwi(struct pci_dev *dev)
963 {
964 int rc;
965 u16 cmd;
967 #ifdef HAVE_ARCH_PCI_MWI
968 rc = pcibios_set_mwi(dev);
969 #else
970 rc = pdev_set_mwi(dev);
971 #endif
973 if (rc)
974 return rc;
976 pci_read_config_word(dev, PCI_COMMAND, &cmd);
977 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
978 DBG("PCI: Enabling Mem-Wr-Inval for device %s\n", dev->slot_name);
979 cmd |= PCI_COMMAND_INVALIDATE;
980 pci_write_config_word(dev, PCI_COMMAND, cmd);
981 }
983 return 0;
984 }
986 /**
987 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
988 * @dev: the PCI device to disable
989 *
990 * Disables PCI Memory-Write-Invalidate transaction on the device
991 */
992 void
993 pci_clear_mwi(struct pci_dev *dev)
994 {
995 u16 cmd;
997 pci_read_config_word(dev, PCI_COMMAND, &cmd);
998 if (cmd & PCI_COMMAND_INVALIDATE) {
999 cmd &= ~PCI_COMMAND_INVALIDATE;
1000 pci_write_config_word(dev, PCI_COMMAND, cmd);
1004 int
1005 pci_set_dma_mask(struct pci_dev *dev, u64 mask)
1007 if (!pci_dma_supported(dev, mask))
1008 return -EIO;
1010 dev->dma_mask = mask;
1012 return 0;
1015 int
1016 pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask)
1018 if (!pci_dac_dma_supported(dev, mask))
1019 return -EIO;
1021 dev->dma_mask = mask;
1023 return 0;
1026 /*
1027 * Translate the low bits of the PCI base
1028 * to the resource type
1029 */
1030 static inline unsigned int pci_calc_resource_flags(unsigned int flags)
1032 if (flags & PCI_BASE_ADDRESS_SPACE_IO)
1033 return IORESOURCE_IO;
1035 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
1036 return IORESOURCE_MEM | IORESOURCE_PREFETCH;
1038 return IORESOURCE_MEM;
1041 /*
1042 * Find the extent of a PCI decode, do sanity checks.
1043 */
1044 static u32 pci_size(u32 base, u32 maxbase, unsigned long mask)
1046 u32 size = mask & maxbase; /* Find the significant bits */
1047 if (!size)
1048 return 0;
1049 size = size & ~(size-1); /* Get the lowest of them to find the decode size */
1050 size -= 1; /* extent = size - 1 */
1051 if (base == maxbase && ((base | size) & mask) != mask)
1052 return 0; /* base == maxbase can be valid only
1053 if the BAR has been already
1054 programmed with all 1s */
1055 return size;
1058 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
1060 unsigned int pos, reg, next;
1061 u32 l, sz;
1062 struct resource *res;
1064 for(pos=0; pos<howmany; pos = next) {
1065 next = pos+1;
1066 res = &dev->resource[pos];
1067 res->name = dev->name;
1068 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
1069 pci_read_config_dword(dev, reg, &l);
1070 pci_write_config_dword(dev, reg, ~0);
1071 pci_read_config_dword(dev, reg, &sz);
1072 pci_write_config_dword(dev, reg, l);
1073 if (!sz || sz == 0xffffffff)
1074 continue;
1075 if (l == 0xffffffff)
1076 l = 0;
1077 if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) {
1078 sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK);
1079 if (!sz)
1080 continue;
1081 res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
1082 res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
1083 } else {
1084 sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff);
1085 if (!sz)
1086 continue;
1087 res->start = l & PCI_BASE_ADDRESS_IO_MASK;
1088 res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK;
1090 res->end = res->start + (unsigned long) sz;
1091 res->flags |= pci_calc_resource_flags(l);
1092 if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK))
1093 == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) {
1094 pci_read_config_dword(dev, reg+4, &l);
1095 next++;
1096 #if BITS_PER_LONG == 64
1097 res->start |= ((unsigned long) l) << 32;
1098 res->end = res->start + sz;
1099 pci_write_config_dword(dev, reg+4, ~0);
1100 pci_read_config_dword(dev, reg+4, &sz);
1101 pci_write_config_dword(dev, reg+4, l);
1102 if (~sz)
1103 res->end = res->start + 0xffffffff +
1104 (((unsigned long) ~sz) << 32);
1105 #else
1106 if (l) {
1107 printk(KERN_ERR "PCI: Unable to handle 64-bit address for device %s\n", dev->slot_name);
1108 res->start = 0;
1109 res->flags = 0;
1110 continue;
1112 #endif
1115 if (rom) {
1116 dev->rom_base_reg = rom;
1117 res = &dev->resource[PCI_ROM_RESOURCE];
1118 res->name = dev->name;
1119 pci_read_config_dword(dev, rom, &l);
1120 pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE);
1121 pci_read_config_dword(dev, rom, &sz);
1122 pci_write_config_dword(dev, rom, l);
1123 if (l == 0xffffffff)
1124 l = 0;
1125 if (sz && sz != 0xffffffff) {
1126 sz = pci_size(l, sz, PCI_ROM_ADDRESS_MASK);
1127 if (!sz)
1128 return;
1129 res->flags = (l & PCI_ROM_ADDRESS_ENABLE) |
1130 IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
1131 res->start = l & PCI_ROM_ADDRESS_MASK;
1132 res->end = res->start + (unsigned long) sz;
1137 void __devinit pci_read_bridge_bases(struct pci_bus *child)
1139 struct pci_dev *dev = child->self;
1140 u8 io_base_lo, io_limit_lo;
1141 u16 mem_base_lo, mem_limit_lo;
1142 unsigned long base, limit;
1143 struct resource *res;
1144 int i;
1146 if (!dev) /* It's a host bus, nothing to read */
1147 return;
1149 if (dev->transparent) {
1150 printk("Transparent bridge - %s\n", dev->name);
1151 for(i = 0; i < 4; i++)
1152 child->resource[i] = child->parent->resource[i];
1153 return;
1156 for(i=0; i<3; i++)
1157 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
1159 res = child->resource[0];
1160 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
1161 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
1162 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
1163 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
1165 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
1166 u16 io_base_hi, io_limit_hi;
1167 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
1168 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
1169 base |= (io_base_hi << 16);
1170 limit |= (io_limit_hi << 16);
1173 if (base && base <= limit) {
1174 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
1175 res->start = base;
1176 res->end = limit + 0xfff;
1179 res = child->resource[1];
1180 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
1181 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
1182 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
1183 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
1184 if (base && base <= limit) {
1185 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
1186 res->start = base;
1187 res->end = limit + 0xfffff;
1190 res = child->resource[2];
1191 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
1192 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
1193 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
1194 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
1196 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
1197 u32 mem_base_hi, mem_limit_hi;
1198 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
1199 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
1200 #if BITS_PER_LONG == 64
1201 base |= ((long) mem_base_hi) << 32;
1202 limit |= ((long) mem_limit_hi) << 32;
1203 #else
1204 if (mem_base_hi || mem_limit_hi) {
1205 printk(KERN_ERR "PCI: Unable to handle 64-bit address space for %s\n", child->name);
1206 return;
1208 #endif
1210 if (base && base <= limit) {
1211 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
1212 res->start = base;
1213 res->end = limit + 0xfffff;
1217 static struct pci_bus * __devinit pci_alloc_bus(void)
1219 struct pci_bus *b;
1221 b = kmalloc(sizeof(*b), GFP_KERNEL);
1222 if (b) {
1223 memset(b, 0, sizeof(*b));
1224 INIT_LIST_HEAD(&b->children);
1225 INIT_LIST_HEAD(&b->devices);
1227 return b;
1230 struct pci_bus * __devinit pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
1232 struct pci_bus *child;
1233 int i;
1235 /*
1236 * Allocate a new bus, and inherit stuff from the parent..
1237 */
1238 child = pci_alloc_bus();
1240 list_add_tail(&child->node, &parent->children);
1241 child->self = dev;
1242 dev->subordinate = child;
1243 child->parent = parent;
1244 child->ops = parent->ops;
1245 child->sysdata = parent->sysdata;
1247 /*
1248 * Set up the primary, secondary and subordinate
1249 * bus numbers.
1250 */
1251 child->number = child->secondary = busnr;
1252 child->primary = parent->secondary;
1253 child->subordinate = 0xff;
1255 /* Set up default resource pointers and names.. */
1256 for (i = 0; i < 4; i++) {
1257 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
1258 child->resource[i]->name = child->name;
1261 return child;
1264 unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus);
1266 /*
1267 * If it's a bridge, configure it and scan the bus behind it.
1268 * For CardBus bridges, we don't scan behind as the devices will
1269 * be handled by the bridge driver itself.
1271 * We need to process bridges in two passes -- first we scan those
1272 * already configured by the BIOS and after we are done with all of
1273 * them, we proceed to assigning numbers to the remaining buses in
1274 * order to avoid overlaps between old and new bus numbers.
1275 */
1276 static int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass)
1278 unsigned int buses;
1279 unsigned short cr;
1280 struct pci_bus *child;
1281 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
1283 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
1284 DBG("Scanning behind PCI bridge %s, config %06x, pass %d\n", dev->slot_name, buses & 0xffffff, pass);
1285 if ((buses & 0xffff00) && !pcibios_assign_all_busses()) {
1286 /*
1287 * Bus already configured by firmware, process it in the first
1288 * pass and just note the configuration.
1289 */
1290 if (pass)
1291 return max;
1292 child = pci_add_new_bus(bus, dev, 0);
1293 child->primary = buses & 0xFF;
1294 child->secondary = (buses >> 8) & 0xFF;
1295 child->subordinate = (buses >> 16) & 0xFF;
1296 child->number = child->secondary;
1297 if (!is_cardbus) {
1298 unsigned int cmax = pci_do_scan_bus(child);
1299 if (cmax > max) max = cmax;
1300 } else {
1301 unsigned int cmax = child->subordinate;
1302 if (cmax > max) max = cmax;
1304 } else {
1305 /*
1306 * We need to assign a number to this bus which we always
1307 * do in the second pass. We also keep all address decoders
1308 * on the bridge disabled during scanning. FIXME: Why?
1309 */
1310 if (!pass)
1311 return max;
1312 pci_read_config_word(dev, PCI_COMMAND, &cr);
1313 pci_write_config_word(dev, PCI_COMMAND, 0x0000);
1314 pci_write_config_word(dev, PCI_STATUS, 0xffff);
1316 child = pci_add_new_bus(bus, dev, ++max);
1317 buses = (buses & 0xff000000)
1318 | ((unsigned int)(child->primary) << 0)
1319 | ((unsigned int)(child->secondary) << 8)
1320 | ((unsigned int)(child->subordinate) << 16);
1321 /*
1322 * We need to blast all three values with a single write.
1323 */
1324 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1325 if (!is_cardbus) {
1326 /* Now we can scan all subordinate buses... */
1327 max = pci_do_scan_bus(child);
1328 } else {
1329 /*
1330 * For CardBus bridges, we leave 4 bus numbers
1331 * as cards with a PCI-to-PCI bridge can be
1332 * inserted later.
1333 */
1334 max += 3;
1336 /*
1337 * Set the subordinate bus number to its real value.
1338 */
1339 child->subordinate = max;
1340 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1341 pci_write_config_word(dev, PCI_COMMAND, cr);
1343 sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number);
1344 return max;
1347 /*
1348 * Read interrupt line and base address registers.
1349 * The architecture-dependent code can tweak these, of course.
1350 */
1351 static void pci_read_irq(struct pci_dev *dev)
1353 unsigned char irq;
1355 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1356 if (irq)
1357 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1358 dev->irq = irq;
1361 /**
1362 * pci_setup_device - fill in class and map information of a device
1363 * @dev: the device structure to fill
1365 * Initialize the device structure with information about the device's
1366 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1367 * Called at initialisation of the PCI subsystem and by CardBus services.
1368 * Returns 0 on success and -1 if unknown type of device (not normal, bridge
1369 * or CardBus).
1370 */
1371 int pci_setup_device(struct pci_dev * dev)
1373 u32 class;
1375 sprintf(dev->slot_name, "%02x:%02x.%d", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
1376 sprintf(dev->name, "PCI device %04x:%04x", dev->vendor, dev->device);
1378 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1379 class >>= 8; /* upper 3 bytes */
1380 dev->class = class;
1381 class >>= 8;
1383 DBG("Found %02x:%02x [%04x/%04x] %06x %02x\n", dev->bus->number, dev->devfn, dev->vendor, dev->device, class, dev->hdr_type);
1385 /* "Unknown power state" */
1386 dev->current_state = 4;
1388 switch (dev->hdr_type) { /* header type */
1389 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1390 if (class == PCI_CLASS_BRIDGE_PCI)
1391 goto bad;
1392 pci_read_irq(dev);
1393 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1394 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1395 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1396 break;
1398 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1399 if (class != PCI_CLASS_BRIDGE_PCI)
1400 goto bad;
1401 /* The PCI-to-PCI bridge spec requires that subtractive
1402 decoding (i.e. transparent) bridge must have programming
1403 interface code of 0x01. */
1404 dev->transparent = ((dev->class & 0xff) == 1);
1405 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1406 break;
1408 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1409 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1410 goto bad;
1411 pci_read_irq(dev);
1412 pci_read_bases(dev, 1, 0);
1413 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1414 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1415 break;
1417 default: /* unknown header */
1418 printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n",
1419 dev->slot_name, dev->hdr_type);
1420 return -1;
1422 bad:
1423 printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n",
1424 dev->slot_name, class, dev->hdr_type);
1425 dev->class = PCI_CLASS_NOT_DEFINED;
1428 /* We found a fine healthy device, go go go... */
1429 return 0;
1432 /*
1433 * Read the config data for a PCI device, sanity-check it
1434 * and fill in the dev structure...
1435 */
1436 struct pci_dev * __devinit pci_scan_device(struct pci_dev *temp)
1438 struct pci_dev *dev;
1439 u32 l;
1441 if (pci_read_config_dword(temp, PCI_VENDOR_ID, &l))
1442 return NULL;
1444 /* some broken boards return 0 or ~0 if a slot is empty: */
1445 if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
1446 return NULL;
1448 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
1449 if (!dev)
1450 return NULL;
1452 memcpy(dev, temp, sizeof(*dev));
1453 dev->vendor = l & 0xffff;
1454 dev->device = (l >> 16) & 0xffff;
1456 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1457 set this higher, assuming the system even supports it. */
1458 dev->dma_mask = 0xffffffff;
1459 if (pci_setup_device(dev) < 0) {
1460 kfree(dev);
1461 dev = NULL;
1463 return dev;
1466 struct pci_dev * __devinit pci_scan_slot(struct pci_dev *temp)
1468 struct pci_bus *bus = temp->bus;
1469 struct pci_dev *dev;
1470 struct pci_dev *first_dev = NULL;
1471 int func = 0;
1472 int is_multi = 0;
1473 u8 hdr_type;
1475 for (func = 0; func < 8; func++, temp->devfn++) {
1476 if (func && !is_multi) /* not a multi-function device */
1477 continue;
1478 if (pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type))
1479 continue;
1480 temp->hdr_type = hdr_type & 0x7f;
1482 dev = pci_scan_device(temp);
1483 if (!dev)
1484 continue;
1485 pci_name_device(dev);
1486 if (!func) {
1487 is_multi = hdr_type & 0x80;
1488 first_dev = dev;
1491 /*
1492 * Link the device to both the global PCI device chain and
1493 * the per-bus list of devices.
1494 */
1495 list_add_tail(&dev->global_list, &pci_devices);
1496 list_add_tail(&dev->bus_list, &bus->devices);
1498 /* Fix up broken headers */
1499 pci_fixup_device(PCI_FIXUP_HEADER, dev);
1501 return first_dev;
1504 unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
1506 unsigned int devfn, max, pass;
1507 struct list_head *ln;
1508 /* XEN MODIFICATION: Allocate 'dev0' on heap to avoid stack overflow. */
1509 struct pci_dev *dev, *dev0;
1511 DBG("Scanning bus %02x\n", bus->number);
1512 max = bus->secondary;
1514 /* Create a device template */
1515 dev0 = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
1516 if(!dev0) {
1517 panic("Out of memory scanning PCI bus!\n");
1519 memset(dev0, 0, sizeof(struct pci_dev));
1520 dev0->bus = bus;
1521 dev0->sysdata = bus->sysdata;
1523 /* Go find them, Rover! */
1524 for (devfn = 0; devfn < 0x100; devfn += 8) {
1525 dev0->devfn = devfn;
1526 pci_scan_slot(dev0);
1528 kfree(dev0);
1530 /*
1531 * After performing arch-dependent fixup of the bus, look behind
1532 * all PCI-to-PCI bridges on this bus.
1533 */
1534 DBG("Fixups for bus %02x\n", bus->number);
1535 pcibios_fixup_bus(bus);
1536 for (pass=0; pass < 2; pass++)
1537 for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) {
1538 dev = pci_dev_b(ln);
1539 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1540 max = pci_scan_bridge(bus, dev, max, pass);
1543 /*
1544 * We've scanned the bus and so we know all about what's on
1545 * the other side of any bridges that may be on this bus plus
1546 * any devices.
1548 * Return how far we've got finding sub-buses.
1549 */
1550 DBG("Bus scan for %02x returning with max=%02x\n", bus->number, max);
1551 return max;
1554 int __devinit pci_bus_exists(const struct list_head *list, int nr)
1556 const struct list_head *l;
1558 for(l=list->next; l != list; l = l->next) {
1559 const struct pci_bus *b = pci_bus_b(l);
1560 if (b->number == nr || pci_bus_exists(&b->children, nr))
1561 return 1;
1563 return 0;
1566 struct pci_bus * __devinit pci_alloc_primary_bus(int bus)
1568 struct pci_bus *b;
1570 if (pci_bus_exists(&pci_root_buses, bus)) {
1571 /* If we already got to this bus through a different bridge, ignore it */
1572 DBG("PCI: Bus %02x already known\n", bus);
1573 return NULL;
1576 b = pci_alloc_bus();
1577 list_add_tail(&b->node, &pci_root_buses);
1579 b->number = b->secondary = bus;
1580 b->resource[0] = &ioport_resource;
1581 b->resource[1] = &iomem_resource;
1582 return b;
1585 struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata)
1587 struct pci_bus *b = pci_alloc_primary_bus(bus);
1588 if (b) {
1589 b->sysdata = sysdata;
1590 b->ops = ops;
1591 b->subordinate = pci_do_scan_bus(b);
1593 return b;
1596 #ifdef CONFIG_PM
1598 /*
1599 * PCI Power management..
1601 * This needs to be done centralized, so that we power manage PCI
1602 * devices in the right order: we should not shut down PCI bridges
1603 * before we've shut down the devices behind them, and we should
1604 * not wake up devices before we've woken up the bridge to the
1605 * device.. Eh?
1607 * We do not touch devices that don't have a driver that exports
1608 * a suspend/resume function. That is just too dangerous. If the default
1609 * PCI suspend/resume functions work for a device, the driver can
1610 * easily implement them (ie just have a suspend function that calls
1611 * the pci_set_power_state() function).
1612 */
1614 static int pci_pm_save_state_device(struct pci_dev *dev, u32 state)
1616 int error = 0;
1617 if (dev) {
1618 struct pci_driver *driver = dev->driver;
1619 if (driver && driver->save_state)
1620 error = driver->save_state(dev,state);
1622 return error;
1625 static int pci_pm_suspend_device(struct pci_dev *dev, u32 state)
1627 int error = 0;
1628 if (dev) {
1629 struct pci_driver *driver = dev->driver;
1630 if (driver && driver->suspend)
1631 error = driver->suspend(dev,state);
1633 return error;
1636 static int pci_pm_resume_device(struct pci_dev *dev)
1638 int error = 0;
1639 if (dev) {
1640 struct pci_driver *driver = dev->driver;
1641 if (driver && driver->resume)
1642 error = driver->resume(dev);
1644 return error;
1647 static int pci_pm_save_state_bus(struct pci_bus *bus, u32 state)
1649 struct list_head *list;
1650 int error = 0;
1652 list_for_each(list, &bus->children) {
1653 error = pci_pm_save_state_bus(pci_bus_b(list),state);
1654 if (error) return error;
1656 list_for_each(list, &bus->devices) {
1657 error = pci_pm_save_state_device(pci_dev_b(list),state);
1658 if (error) return error;
1660 return 0;
1663 static int pci_pm_suspend_bus(struct pci_bus *bus, u32 state)
1665 struct list_head *list;
1667 /* Walk the bus children list */
1668 list_for_each(list, &bus->children)
1669 pci_pm_suspend_bus(pci_bus_b(list),state);
1671 /* Walk the device children list */
1672 list_for_each(list, &bus->devices)
1673 pci_pm_suspend_device(pci_dev_b(list),state);
1674 return 0;
1677 static int pci_pm_resume_bus(struct pci_bus *bus)
1679 struct list_head *list;
1681 /* Walk the device children list */
1682 list_for_each(list, &bus->devices)
1683 pci_pm_resume_device(pci_dev_b(list));
1685 /* And then walk the bus children */
1686 list_for_each(list, &bus->children)
1687 pci_pm_resume_bus(pci_bus_b(list));
1688 return 0;
1691 static int pci_pm_save_state(u32 state)
1693 struct list_head *list;
1694 struct pci_bus *bus;
1695 int error = 0;
1697 list_for_each(list, &pci_root_buses) {
1698 bus = pci_bus_b(list);
1699 error = pci_pm_save_state_bus(bus,state);
1700 if (!error)
1701 error = pci_pm_save_state_device(bus->self,state);
1703 return error;
1706 static int pci_pm_suspend(u32 state)
1708 struct list_head *list;
1709 struct pci_bus *bus;
1711 list_for_each(list, &pci_root_buses) {
1712 bus = pci_bus_b(list);
1713 pci_pm_suspend_bus(bus,state);
1714 pci_pm_suspend_device(bus->self,state);
1716 return 0;
1719 int pci_pm_resume(void)
1721 struct list_head *list;
1722 struct pci_bus *bus;
1724 list_for_each(list, &pci_root_buses) {
1725 bus = pci_bus_b(list);
1726 pci_pm_resume_device(bus->self);
1727 pci_pm_resume_bus(bus);
1729 return 0;
1732 static int
1733 pci_pm_callback(struct pm_dev *pm_device, pm_request_t rqst, void *data)
1735 int error = 0;
1737 switch (rqst) {
1738 case PM_SAVE_STATE:
1739 error = pci_pm_save_state((unsigned long)data);
1740 break;
1741 case PM_SUSPEND:
1742 error = pci_pm_suspend((unsigned long)data);
1743 break;
1744 case PM_RESUME:
1745 error = pci_pm_resume();
1746 break;
1747 default: break;
1749 return error;
1752 #endif
1755 #if 0 /* XXX KAF: Only USB uses this stuff -- I think we'll just bin it. */
1757 /*
1758 * Pool allocator ... wraps the pci_alloc_consistent page allocator, so
1759 * small blocks are easily used by drivers for bus mastering controllers.
1760 * This should probably be sharing the guts of the slab allocator.
1761 */
1763 struct pci_pool { /* the pool */
1764 struct list_head page_list;
1765 spinlock_t lock;
1766 size_t blocks_per_page;
1767 size_t size;
1768 int flags;
1769 struct pci_dev *dev;
1770 size_t allocation;
1771 char name [32];
1772 wait_queue_head_t waitq;
1773 };
1775 struct pci_page { /* cacheable header for 'allocation' bytes */
1776 struct list_head page_list;
1777 void *vaddr;
1778 dma_addr_t dma;
1779 unsigned long bitmap [0];
1780 };
1782 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
1783 #define POOL_POISON_BYTE 0xa7
1785 // #define CONFIG_PCIPOOL_DEBUG
1788 /**
1789 * pci_pool_create - Creates a pool of pci consistent memory blocks, for dma.
1790 * @name: name of pool, for diagnostics
1791 * @pdev: pci device that will be doing the DMA
1792 * @size: size of the blocks in this pool.
1793 * @align: alignment requirement for blocks; must be a power of two
1794 * @allocation: returned blocks won't cross this boundary (or zero)
1795 * @flags: SLAB_* flags (not all are supported).
1797 * Returns a pci allocation pool with the requested characteristics, or
1798 * null if one can't be created. Given one of these pools, pci_pool_alloc()
1799 * may be used to allocate memory. Such memory will all have "consistent"
1800 * DMA mappings, accessible by the device and its driver without using
1801 * cache flushing primitives. The actual size of blocks allocated may be
1802 * larger than requested because of alignment.
1804 * If allocation is nonzero, objects returned from pci_pool_alloc() won't
1805 * cross that size boundary. This is useful for devices which have
1806 * addressing restrictions on individual DMA transfers, such as not crossing
1807 * boundaries of 4KBytes.
1808 */
1809 struct pci_pool *
1810 pci_pool_create (const char *name, struct pci_dev *pdev,
1811 size_t size, size_t align, size_t allocation, int flags)
1813 struct pci_pool *retval;
1815 if (align == 0)
1816 align = 1;
1817 if (size == 0)
1818 return 0;
1819 else if (size < align)
1820 size = align;
1821 else if ((size % align) != 0) {
1822 size += align + 1;
1823 size &= ~(align - 1);
1826 if (allocation == 0) {
1827 if (PAGE_SIZE < size)
1828 allocation = size;
1829 else
1830 allocation = PAGE_SIZE;
1831 // FIXME: round up for less fragmentation
1832 } else if (allocation < size)
1833 return 0;
1835 if (!(retval = kmalloc (sizeof *retval, flags)))
1836 return retval;
1838 #ifdef CONFIG_PCIPOOL_DEBUG
1839 flags |= SLAB_POISON;
1840 #endif
1842 strncpy (retval->name, name, sizeof retval->name);
1843 retval->name [sizeof retval->name - 1] = 0;
1845 retval->dev = pdev;
1846 INIT_LIST_HEAD (&retval->page_list);
1847 spin_lock_init (&retval->lock);
1848 retval->size = size;
1849 retval->flags = flags;
1850 retval->allocation = allocation;
1851 retval->blocks_per_page = allocation / size;
1852 init_waitqueue_head (&retval->waitq);
1854 #ifdef CONFIG_PCIPOOL_DEBUG
1855 printk (KERN_DEBUG "pcipool create %s/%s size %d, %d/page (%d alloc)\n",
1856 pdev ? pdev->slot_name : NULL, retval->name, size,
1857 retval->blocks_per_page, allocation);
1858 #endif
1860 return retval;
1864 static struct pci_page *
1865 pool_alloc_page (struct pci_pool *pool, int mem_flags)
1867 struct pci_page *page;
1868 int mapsize;
1870 mapsize = pool->blocks_per_page;
1871 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
1872 mapsize *= sizeof (long);
1874 page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags);
1875 if (!page)
1876 return 0;
1877 page->vaddr = pci_alloc_consistent (pool->dev,
1878 pool->allocation,
1879 &page->dma);
1880 if (page->vaddr) {
1881 memset (page->bitmap, 0xff, mapsize); // bit set == free
1882 if (pool->flags & SLAB_POISON)
1883 memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
1884 list_add (&page->page_list, &pool->page_list);
1885 } else {
1886 kfree (page);
1887 page = 0;
1889 return page;
1893 static inline int
1894 is_page_busy (int blocks, unsigned long *bitmap)
1896 while (blocks > 0) {
1897 if (*bitmap++ != ~0UL)
1898 return 1;
1899 blocks -= BITS_PER_LONG;
1901 return 0;
1904 static void
1905 pool_free_page (struct pci_pool *pool, struct pci_page *page)
1907 dma_addr_t dma = page->dma;
1909 if (pool->flags & SLAB_POISON)
1910 memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
1911 pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma);
1912 list_del (&page->page_list);
1913 kfree (page);
1917 /**
1918 * pci_pool_destroy - destroys a pool of pci memory blocks.
1919 * @pool: pci pool that will be destroyed
1921 * Caller guarantees that no more memory from the pool is in use,
1922 * and that nothing will try to use the pool after this call.
1923 */
1924 void
1925 pci_pool_destroy (struct pci_pool *pool)
1927 unsigned long flags;
1929 #ifdef CONFIG_PCIPOOL_DEBUG
1930 printk (KERN_DEBUG "pcipool destroy %s/%s\n",
1931 pool->dev ? pool->dev->slot_name : NULL,
1932 pool->name);
1933 #endif
1935 spin_lock_irqsave (&pool->lock, flags);
1936 while (!list_empty (&pool->page_list)) {
1937 struct pci_page *page;
1938 page = list_entry (pool->page_list.next,
1939 struct pci_page, page_list);
1940 if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
1941 printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n",
1942 pool->dev ? pool->dev->slot_name : NULL,
1943 pool->name, page->vaddr);
1944 /* leak the still-in-use consistent memory */
1945 list_del (&page->page_list);
1946 kfree (page);
1947 } else
1948 pool_free_page (pool, page);
1950 spin_unlock_irqrestore (&pool->lock, flags);
1951 kfree (pool);
1955 /**
1956 * pci_pool_alloc - get a block of consistent memory
1957 * @pool: pci pool that will produce the block
1958 * @mem_flags: SLAB_KERNEL or SLAB_ATOMIC
1959 * @handle: pointer to dma address of block
1961 * This returns the kernel virtual address of a currently unused block,
1962 * and reports its dma address through the handle.
1963 * If such a memory block can't be allocated, null is returned.
1964 */
1965 void *
1966 pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle)
1968 unsigned long flags;
1969 struct list_head *entry;
1970 struct pci_page *page;
1971 int map, block;
1972 size_t offset;
1973 void *retval;
1975 restart:
1976 spin_lock_irqsave (&pool->lock, flags);
1977 list_for_each (entry, &pool->page_list) {
1978 int i;
1979 page = list_entry (entry, struct pci_page, page_list);
1980 /* only cachable accesses here ... */
1981 for (map = 0, i = 0;
1982 i < pool->blocks_per_page;
1983 i += BITS_PER_LONG, map++) {
1984 if (page->bitmap [map] == 0)
1985 continue;
1986 block = ffz (~ page->bitmap [map]);
1987 if ((i + block) < pool->blocks_per_page) {
1988 clear_bit (block, &page->bitmap [map]);
1989 offset = (BITS_PER_LONG * map) + block;
1990 offset *= pool->size;
1991 goto ready;
1995 if (!(page = pool_alloc_page (pool, mem_flags))) {
1996 if (mem_flags == SLAB_KERNEL) {
1997 DECLARE_WAITQUEUE (wait, current);
1999 current->state = TASK_INTERRUPTIBLE;
2000 add_wait_queue (&pool->waitq, &wait);
2001 spin_unlock_irqrestore (&pool->lock, flags);
2003 schedule_timeout (POOL_TIMEOUT_JIFFIES);
2005 current->state = TASK_RUNNING;
2006 remove_wait_queue (&pool->waitq, &wait);
2007 goto restart;
2009 retval = 0;
2010 goto done;
2013 clear_bit (0, &page->bitmap [0]);
2014 offset = 0;
2015 ready:
2016 retval = offset + page->vaddr;
2017 *handle = offset + page->dma;
2018 done:
2019 spin_unlock_irqrestore (&pool->lock, flags);
2020 return retval;
2024 static struct pci_page *
2025 pool_find_page (struct pci_pool *pool, dma_addr_t dma)
2027 unsigned long flags;
2028 struct list_head *entry;
2029 struct pci_page *page;
2031 spin_lock_irqsave (&pool->lock, flags);
2032 list_for_each (entry, &pool->page_list) {
2033 page = list_entry (entry, struct pci_page, page_list);
2034 if (dma < page->dma)
2035 continue;
2036 if (dma < (page->dma + pool->allocation))
2037 goto done;
2039 page = 0;
2040 done:
2041 spin_unlock_irqrestore (&pool->lock, flags);
2042 return page;
2046 /**
2047 * pci_pool_free - put block back into pci pool
2048 * @pool: the pci pool holding the block
2049 * @vaddr: virtual address of block
2050 * @dma: dma address of block
2052 * Caller promises neither device nor driver will again touch this block
2053 * unless it is first re-allocated.
2054 */
2055 void
2056 pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma)
2058 struct pci_page *page;
2059 unsigned long flags;
2060 int map, block;
2062 if ((page = pool_find_page (pool, dma)) == 0) {
2063 printk (KERN_ERR "pci_pool_free %s/%s, %p/%x (bad dma)\n",
2064 pool->dev ? pool->dev->slot_name : NULL,
2065 pool->name, vaddr, (int) (dma & 0xffffffff));
2066 return;
2068 #ifdef CONFIG_PCIPOOL_DEBUG
2069 if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
2070 printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%x\n",
2071 pool->dev ? pool->dev->slot_name : NULL,
2072 pool->name, vaddr, (int) (dma & 0xffffffff));
2073 return;
2075 #endif
2077 block = dma - page->dma;
2078 block /= pool->size;
2079 map = block / BITS_PER_LONG;
2080 block %= BITS_PER_LONG;
2082 #ifdef CONFIG_PCIPOOL_DEBUG
2083 if (page->bitmap [map] & (1UL << block)) {
2084 printk (KERN_ERR "pci_pool_free %s/%s, dma %x already free\n",
2085 pool->dev ? pool->dev->slot_name : NULL,
2086 pool->name, dma);
2087 return;
2089 #endif
2090 if (pool->flags & SLAB_POISON)
2091 memset (vaddr, POOL_POISON_BYTE, pool->size);
2093 spin_lock_irqsave (&pool->lock, flags);
2094 set_bit (block, &page->bitmap [map]);
2095 if (waitqueue_active (&pool->waitq))
2096 wake_up (&pool->waitq);
2097 /*
2098 * Resist a temptation to do
2099 * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
2100 * it is not interrupt safe. Better have empty pages hang around.
2101 */
2102 spin_unlock_irqrestore (&pool->lock, flags);
2105 #endif /* XXX End of PCI pool allocator stuff. */
2108 void __devinit pci_init(void)
2110 struct pci_dev *dev;
2112 pcibios_init();
2114 pci_for_each_dev(dev) {
2115 pci_fixup_device(PCI_FIXUP_FINAL, dev);
2118 #ifdef CONFIG_PM
2119 pm_register(PM_PCI_DEV, 0, pci_pm_callback);
2120 #endif
2123 static int __devinit pci_setup(char *str)
2125 while (str) {
2126 char *k = strchr(str, ',');
2127 if (k)
2128 *k++ = 0;
2129 if (*str && (str = pcibios_setup(str)) && *str) {
2130 /* PCI layer options should be handled here */
2131 printk(KERN_ERR "PCI: Unknown option `%s'\n", str);
2133 str = k;
2135 return 1;
2138 __setup("pci=", pci_setup);
2140 EXPORT_SYMBOL(pci_read_config_byte);
2141 EXPORT_SYMBOL(pci_read_config_word);
2142 EXPORT_SYMBOL(pci_read_config_dword);
2143 EXPORT_SYMBOL(pci_write_config_byte);
2144 EXPORT_SYMBOL(pci_write_config_word);
2145 EXPORT_SYMBOL(pci_write_config_dword);
2146 EXPORT_SYMBOL(pci_devices);
2147 EXPORT_SYMBOL(pci_root_buses);
2148 EXPORT_SYMBOL(pci_enable_device_bars);
2149 EXPORT_SYMBOL(pci_enable_device);
2150 EXPORT_SYMBOL(pci_disable_device);
2151 EXPORT_SYMBOL(pci_find_capability);
2152 EXPORT_SYMBOL(pci_release_regions);
2153 EXPORT_SYMBOL(pci_request_regions);
2154 EXPORT_SYMBOL(pci_release_region);
2155 EXPORT_SYMBOL(pci_request_region);
2156 EXPORT_SYMBOL(pci_find_class);
2157 EXPORT_SYMBOL(pci_find_device);
2158 EXPORT_SYMBOL(pci_find_slot);
2159 EXPORT_SYMBOL(pci_find_subsys);
2160 EXPORT_SYMBOL(pci_set_master);
2161 EXPORT_SYMBOL(pci_set_mwi);
2162 EXPORT_SYMBOL(pci_clear_mwi);
2163 EXPORT_SYMBOL(pdev_set_mwi);
2164 EXPORT_SYMBOL(pci_set_dma_mask);
2165 EXPORT_SYMBOL(pci_dac_set_dma_mask);
2166 EXPORT_SYMBOL(pci_assign_resource);
2167 EXPORT_SYMBOL(pci_register_driver);
2168 EXPORT_SYMBOL(pci_unregister_driver);
2169 EXPORT_SYMBOL(pci_dev_driver);
2170 EXPORT_SYMBOL(pci_match_device);
2171 EXPORT_SYMBOL(pci_find_parent_resource);
2173 #ifdef CONFIG_HOTPLUG
2174 EXPORT_SYMBOL(pci_setup_device);
2175 EXPORT_SYMBOL(pci_insert_device);
2176 EXPORT_SYMBOL(pci_remove_device);
2177 EXPORT_SYMBOL(pci_announce_device_to_drivers);
2178 EXPORT_SYMBOL(pci_add_new_bus);
2179 EXPORT_SYMBOL(pci_do_scan_bus);
2180 EXPORT_SYMBOL(pci_scan_slot);
2181 EXPORT_SYMBOL(pci_scan_bus);
2182 EXPORT_SYMBOL(pci_scan_device);
2183 EXPORT_SYMBOL(pci_read_bridge_bases);
2184 #ifdef CONFIG_PROC_FS
2185 EXPORT_SYMBOL(pci_proc_attach_device);
2186 EXPORT_SYMBOL(pci_proc_detach_device);
2187 EXPORT_SYMBOL(pci_proc_attach_bus);
2188 EXPORT_SYMBOL(pci_proc_detach_bus);
2189 EXPORT_SYMBOL(proc_bus_pci_dir);
2190 #endif
2191 #endif
2193 EXPORT_SYMBOL(pci_set_power_state);
2194 EXPORT_SYMBOL(pci_save_state);
2195 EXPORT_SYMBOL(pci_restore_state);
2196 EXPORT_SYMBOL(pci_enable_wake);
2198 /* Obsolete functions */
2200 EXPORT_SYMBOL(pcibios_present);
2201 EXPORT_SYMBOL(pcibios_read_config_byte);
2202 EXPORT_SYMBOL(pcibios_read_config_word);
2203 EXPORT_SYMBOL(pcibios_read_config_dword);
2204 EXPORT_SYMBOL(pcibios_write_config_byte);
2205 EXPORT_SYMBOL(pcibios_write_config_word);
2206 EXPORT_SYMBOL(pcibios_write_config_dword);
2207 EXPORT_SYMBOL(pcibios_find_class);
2208 EXPORT_SYMBOL(pcibios_find_device);
2210 /* Quirk info */
2212 EXPORT_SYMBOL(isa_dma_bridge_buggy);
2213 EXPORT_SYMBOL(pci_pci_problems);
2215 #if 0
2216 /* Pool allocator */
2218 EXPORT_SYMBOL (pci_pool_create);
2219 EXPORT_SYMBOL (pci_pool_destroy);
2220 EXPORT_SYMBOL (pci_pool_alloc);
2221 EXPORT_SYMBOL (pci_pool_free);
2223 #endif