direct-io.hg
changeset 1504:cd6f5625b3d7
bitkeeper revision 1.981 (40d1c7ddnMbgqT6f27EHAuSpvudKxw)
Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xeno
Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xeno
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Thu Jun 17 16:33:33 2004 +0000 (2004-06-17) |
parents | 7916e2d4ce11 7542831805f9 |
children | ead91151a0e6 |
files | xen/arch/x86/pci-irq.c xen/drivers/pci/pci.c |
line diff
1.1 --- a/xen/arch/x86/pci-irq.c Thu Jun 17 15:58:19 2004 +0000 1.2 +++ b/xen/arch/x86/pci-irq.c Thu Jun 17 16:33:33 2004 +0000 1.3 @@ -843,10 +843,6 @@ static struct irq_info *pirq_get_info(st 1.4 return NULL; 1.5 } 1.6 1.7 -static void pcibios_test_irq_handler(int irq, void *dev_id, struct pt_regs *regs) 1.8 -{ 1.9 -} 1.10 - 1.11 static int pcibios_lookup_irq(struct pci_dev *dev, int assign) 1.12 { 1.13 u8 pin;
2.1 --- a/xen/drivers/pci/pci.c Thu Jun 17 15:58:19 2004 +0000 2.2 +++ b/xen/drivers/pci/pci.c Thu Jun 17 16:33:33 2004 +0000 2.3 @@ -290,10 +290,7 @@ pci_set_power_state(struct pci_dev *dev, 2.4 /* Mandatory power management transition delays */ 2.5 /* see PCI PM 1.1 5.6.1 table 18 */ 2.6 if(state == 3 || dev->current_state == 3) 2.7 - { 2.8 - set_current_state(TASK_UNINTERRUPTIBLE); 2.9 - schedule_timeout(HZ/100); 2.10 - } 2.11 + mdelay(10); 2.12 else if(state == 2 || dev->current_state == 2) 2.13 udelay(200); 2.14 dev->current_state = state; 2.15 @@ -1673,359 +1670,7 @@ pci_pm_callback(struct pm_dev *pm_device 2.16 2.17 #endif 2.18 2.19 - 2.20 -#if 0 /* XXX KAF: Only USB uses this stuff -- I think we'll just bin it. */ 2.21 - 2.22 -/* 2.23 - * Pool allocator ... wraps the pci_alloc_consistent page allocator, so 2.24 - * small blocks are easily used by drivers for bus mastering controllers. 2.25 - * This should probably be sharing the guts of the slab allocator. 2.26 - */ 2.27 - 2.28 -struct pci_pool { /* the pool */ 2.29 - struct list_head page_list; 2.30 - spinlock_t lock; 2.31 - size_t blocks_per_page; 2.32 - size_t size; 2.33 - int flags; 2.34 - struct pci_dev *dev; 2.35 - size_t allocation; 2.36 - char name [32]; 2.37 - wait_queue_head_t waitq; 2.38 -}; 2.39 - 2.40 -struct pci_page { /* cacheable header for 'allocation' bytes */ 2.41 - struct list_head page_list; 2.42 - void *vaddr; 2.43 - dma_addr_t dma; 2.44 - unsigned long bitmap [0]; 2.45 -}; 2.46 - 2.47 -#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) 2.48 -#define POOL_POISON_BYTE 0xa7 2.49 - 2.50 -// #define CONFIG_PCIPOOL_DEBUG 2.51 - 2.52 - 2.53 -/** 2.54 - * pci_pool_create - Creates a pool of pci consistent memory blocks, for dma. 2.55 - * @name: name of pool, for diagnostics 2.56 - * @pdev: pci device that will be doing the DMA 2.57 - * @size: size of the blocks in this pool. 2.58 - * @align: alignment requirement for blocks; must be a power of two 2.59 - * @allocation: returned blocks won't cross this boundary (or zero) 2.60 - * @flags: SLAB_* flags (not all are supported). 2.61 - * 2.62 - * Returns a pci allocation pool with the requested characteristics, or 2.63 - * null if one can't be created. Given one of these pools, pci_pool_alloc() 2.64 - * may be used to allocate memory. Such memory will all have "consistent" 2.65 - * DMA mappings, accessible by the device and its driver without using 2.66 - * cache flushing primitives. The actual size of blocks allocated may be 2.67 - * larger than requested because of alignment. 2.68 - * 2.69 - * If allocation is nonzero, objects returned from pci_pool_alloc() won't 2.70 - * cross that size boundary. This is useful for devices which have 2.71 - * addressing restrictions on individual DMA transfers, such as not crossing 2.72 - * boundaries of 4KBytes. 2.73 - */ 2.74 -struct pci_pool * 2.75 -pci_pool_create (const char *name, struct pci_dev *pdev, 2.76 - size_t size, size_t align, size_t allocation, int flags) 2.77 -{ 2.78 - struct pci_pool *retval; 2.79 - 2.80 - if (align == 0) 2.81 - align = 1; 2.82 - if (size == 0) 2.83 - return 0; 2.84 - else if (size < align) 2.85 - size = align; 2.86 - else if ((size % align) != 0) { 2.87 - size += align + 1; 2.88 - size &= ~(align - 1); 2.89 - } 2.90 - 2.91 - if (allocation == 0) { 2.92 - if (PAGE_SIZE < size) 2.93 - allocation = size; 2.94 - else 2.95 - allocation = PAGE_SIZE; 2.96 - // FIXME: round up for less fragmentation 2.97 - } else if (allocation < size) 2.98 - return 0; 2.99 - 2.100 - if (!(retval = kmalloc (sizeof *retval, flags))) 2.101 - return retval; 2.102 - 2.103 -#ifdef CONFIG_PCIPOOL_DEBUG 2.104 - flags |= SLAB_POISON; 2.105 -#endif 2.106 - 2.107 - strncpy (retval->name, name, sizeof retval->name); 2.108 - retval->name [sizeof retval->name - 1] = 0; 2.109 - 2.110 - retval->dev = pdev; 2.111 - INIT_LIST_HEAD (&retval->page_list); 2.112 - spin_lock_init (&retval->lock); 2.113 - retval->size = size; 2.114 - retval->flags = flags; 2.115 - retval->allocation = allocation; 2.116 - retval->blocks_per_page = allocation / size; 2.117 - init_waitqueue_head (&retval->waitq); 2.118 - 2.119 -#ifdef CONFIG_PCIPOOL_DEBUG 2.120 - printk (KERN_DEBUG "pcipool create %s/%s size %d, %d/page (%d alloc)\n", 2.121 - pdev ? pdev->slot_name : NULL, retval->name, size, 2.122 - retval->blocks_per_page, allocation); 2.123 -#endif 2.124 - 2.125 - return retval; 2.126 -} 2.127 - 2.128 - 2.129 -static struct pci_page * 2.130 -pool_alloc_page (struct pci_pool *pool, int mem_flags) 2.131 -{ 2.132 - struct pci_page *page; 2.133 - int mapsize; 2.134 - 2.135 - mapsize = pool->blocks_per_page; 2.136 - mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; 2.137 - mapsize *= sizeof (long); 2.138 - 2.139 - page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags); 2.140 - if (!page) 2.141 - return 0; 2.142 - page->vaddr = pci_alloc_consistent (pool->dev, 2.143 - pool->allocation, 2.144 - &page->dma); 2.145 - if (page->vaddr) { 2.146 - memset (page->bitmap, 0xff, mapsize); // bit set == free 2.147 - if (pool->flags & SLAB_POISON) 2.148 - memset (page->vaddr, POOL_POISON_BYTE, pool->allocation); 2.149 - list_add (&page->page_list, &pool->page_list); 2.150 - } else { 2.151 - kfree (page); 2.152 - page = 0; 2.153 - } 2.154 - return page; 2.155 -} 2.156 - 2.157 - 2.158 -static inline int 2.159 -is_page_busy (int blocks, unsigned long *bitmap) 2.160 -{ 2.161 - while (blocks > 0) { 2.162 - if (*bitmap++ != ~0UL) 2.163 - return 1; 2.164 - blocks -= BITS_PER_LONG; 2.165 - } 2.166 - return 0; 2.167 -} 2.168 - 2.169 -static void 2.170 -pool_free_page (struct pci_pool *pool, struct pci_page *page) 2.171 -{ 2.172 - dma_addr_t dma = page->dma; 2.173 - 2.174 - if (pool->flags & SLAB_POISON) 2.175 - memset (page->vaddr, POOL_POISON_BYTE, pool->allocation); 2.176 - pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma); 2.177 - list_del (&page->page_list); 2.178 - kfree (page); 2.179 -} 2.180 - 2.181 - 2.182 -/** 2.183 - * pci_pool_destroy - destroys a pool of pci memory blocks. 2.184 - * @pool: pci pool that will be destroyed 2.185 - * 2.186 - * Caller guarantees that no more memory from the pool is in use, 2.187 - * and that nothing will try to use the pool after this call. 2.188 - */ 2.189 -void 2.190 -pci_pool_destroy (struct pci_pool *pool) 2.191 -{ 2.192 - unsigned long flags; 2.193 - 2.194 -#ifdef CONFIG_PCIPOOL_DEBUG 2.195 - printk (KERN_DEBUG "pcipool destroy %s/%s\n", 2.196 - pool->dev ? pool->dev->slot_name : NULL, 2.197 - pool->name); 2.198 -#endif 2.199 - 2.200 - spin_lock_irqsave (&pool->lock, flags); 2.201 - while (!list_empty (&pool->page_list)) { 2.202 - struct pci_page *page; 2.203 - page = list_entry (pool->page_list.next, 2.204 - struct pci_page, page_list); 2.205 - if (is_page_busy (pool->blocks_per_page, page->bitmap)) { 2.206 - printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n", 2.207 - pool->dev ? pool->dev->slot_name : NULL, 2.208 - pool->name, page->vaddr); 2.209 - /* leak the still-in-use consistent memory */ 2.210 - list_del (&page->page_list); 2.211 - kfree (page); 2.212 - } else 2.213 - pool_free_page (pool, page); 2.214 - } 2.215 - spin_unlock_irqrestore (&pool->lock, flags); 2.216 - kfree (pool); 2.217 -} 2.218 - 2.219 - 2.220 -/** 2.221 - * pci_pool_alloc - get a block of consistent memory 2.222 - * @pool: pci pool that will produce the block 2.223 - * @mem_flags: SLAB_KERNEL or SLAB_ATOMIC 2.224 - * @handle: pointer to dma address of block 2.225 - * 2.226 - * This returns the kernel virtual address of a currently unused block, 2.227 - * and reports its dma address through the handle. 2.228 - * If such a memory block can't be allocated, null is returned. 2.229 - */ 2.230 -void * 2.231 -pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle) 2.232 -{ 2.233 - unsigned long flags; 2.234 - struct list_head *entry; 2.235 - struct pci_page *page; 2.236 - int map, block; 2.237 - size_t offset; 2.238 - void *retval; 2.239 - 2.240 -restart: 2.241 - spin_lock_irqsave (&pool->lock, flags); 2.242 - list_for_each (entry, &pool->page_list) { 2.243 - int i; 2.244 - page = list_entry (entry, struct pci_page, page_list); 2.245 - /* only cachable accesses here ... */ 2.246 - for (map = 0, i = 0; 2.247 - i < pool->blocks_per_page; 2.248 - i += BITS_PER_LONG, map++) { 2.249 - if (page->bitmap [map] == 0) 2.250 - continue; 2.251 - block = ffz (~ page->bitmap [map]); 2.252 - if ((i + block) < pool->blocks_per_page) { 2.253 - clear_bit (block, &page->bitmap [map]); 2.254 - offset = (BITS_PER_LONG * map) + block; 2.255 - offset *= pool->size; 2.256 - goto ready; 2.257 - } 2.258 - } 2.259 - } 2.260 - if (!(page = pool_alloc_page (pool, mem_flags))) { 2.261 - if (mem_flags == SLAB_KERNEL) { 2.262 - DECLARE_WAITQUEUE (wait, current); 2.263 - 2.264 - current->state = TASK_INTERRUPTIBLE; 2.265 - add_wait_queue (&pool->waitq, &wait); 2.266 - spin_unlock_irqrestore (&pool->lock, flags); 2.267 - 2.268 - schedule_timeout (POOL_TIMEOUT_JIFFIES); 2.269 - 2.270 - current->state = TASK_RUNNING; 2.271 - remove_wait_queue (&pool->waitq, &wait); 2.272 - goto restart; 2.273 - } 2.274 - retval = 0; 2.275 - goto done; 2.276 - } 2.277 - 2.278 - clear_bit (0, &page->bitmap [0]); 2.279 - offset = 0; 2.280 -ready: 2.281 - retval = offset + page->vaddr; 2.282 - *handle = offset + page->dma; 2.283 -done: 2.284 - spin_unlock_irqrestore (&pool->lock, flags); 2.285 - return retval; 2.286 -} 2.287 - 2.288 - 2.289 -static struct pci_page * 2.290 -pool_find_page (struct pci_pool *pool, dma_addr_t dma) 2.291 -{ 2.292 - unsigned long flags; 2.293 - struct list_head *entry; 2.294 - struct pci_page *page; 2.295 - 2.296 - spin_lock_irqsave (&pool->lock, flags); 2.297 - list_for_each (entry, &pool->page_list) { 2.298 - page = list_entry (entry, struct pci_page, page_list); 2.299 - if (dma < page->dma) 2.300 - continue; 2.301 - if (dma < (page->dma + pool->allocation)) 2.302 - goto done; 2.303 - } 2.304 - page = 0; 2.305 -done: 2.306 - spin_unlock_irqrestore (&pool->lock, flags); 2.307 - return page; 2.308 -} 2.309 - 2.310 - 2.311 -/** 2.312 - * pci_pool_free - put block back into pci pool 2.313 - * @pool: the pci pool holding the block 2.314 - * @vaddr: virtual address of block 2.315 - * @dma: dma address of block 2.316 - * 2.317 - * Caller promises neither device nor driver will again touch this block 2.318 - * unless it is first re-allocated. 2.319 - */ 2.320 -void 2.321 -pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma) 2.322 -{ 2.323 - struct pci_page *page; 2.324 - unsigned long flags; 2.325 - int map, block; 2.326 - 2.327 - if ((page = pool_find_page (pool, dma)) == 0) { 2.328 - printk (KERN_ERR "pci_pool_free %s/%s, %p/%x (bad dma)\n", 2.329 - pool->dev ? pool->dev->slot_name : NULL, 2.330 - pool->name, vaddr, (int) (dma & 0xffffffff)); 2.331 - return; 2.332 - } 2.333 -#ifdef CONFIG_PCIPOOL_DEBUG 2.334 - if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { 2.335 - printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%x\n", 2.336 - pool->dev ? pool->dev->slot_name : NULL, 2.337 - pool->name, vaddr, (int) (dma & 0xffffffff)); 2.338 - return; 2.339 - } 2.340 -#endif 2.341 - 2.342 - block = dma - page->dma; 2.343 - block /= pool->size; 2.344 - map = block / BITS_PER_LONG; 2.345 - block %= BITS_PER_LONG; 2.346 - 2.347 -#ifdef CONFIG_PCIPOOL_DEBUG 2.348 - if (page->bitmap [map] & (1UL << block)) { 2.349 - printk (KERN_ERR "pci_pool_free %s/%s, dma %x already free\n", 2.350 - pool->dev ? pool->dev->slot_name : NULL, 2.351 - pool->name, dma); 2.352 - return; 2.353 - } 2.354 -#endif 2.355 - if (pool->flags & SLAB_POISON) 2.356 - memset (vaddr, POOL_POISON_BYTE, pool->size); 2.357 - 2.358 - spin_lock_irqsave (&pool->lock, flags); 2.359 - set_bit (block, &page->bitmap [map]); 2.360 - if (waitqueue_active (&pool->waitq)) 2.361 - wake_up (&pool->waitq); 2.362 - /* 2.363 - * Resist a temptation to do 2.364 - * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); 2.365 - * it is not interrupt safe. Better have empty pages hang around. 2.366 - */ 2.367 - spin_unlock_irqrestore (&pool->lock, flags); 2.368 -} 2.369 - 2.370 -#endif /* XXX End of PCI pool allocator stuff. */ 2.371 - 2.372 +/* NB. Xen doesn't include the pool allocator. */ 2.373 2.374 void __devinit pci_init(void) 2.375 { 2.376 @@ -2132,13 +1777,3 @@ EXPORT_SYMBOL(pcibios_find_device); 2.377 2.378 EXPORT_SYMBOL(isa_dma_bridge_buggy); 2.379 EXPORT_SYMBOL(pci_pci_problems); 2.380 - 2.381 -#if 0 2.382 -/* Pool allocator */ 2.383 - 2.384 -EXPORT_SYMBOL (pci_pool_create); 2.385 -EXPORT_SYMBOL (pci_pool_destroy); 2.386 -EXPORT_SYMBOL (pci_pool_alloc); 2.387 -EXPORT_SYMBOL (pci_pool_free); 2.388 - 2.389 -#endif