direct-io.hg
changeset 4089:962d8ce831c9
bitkeeper revision 1.1159.258.43 (4230e614QJDirekggk8u5NMTqkptrQ)
Update to Linux 2.6.11.
Signed-off-by: Christian Limpach <chris@xensource.com>
Update to Linux 2.6.11.
Signed-off-by: Christian Limpach <chris@xensource.com>
author | cl349@firebug.cl.cam.ac.uk |
---|---|
date | Fri Mar 11 00:28:04 2005 +0000 (2005-03-11) |
parents | cff0d3baf599 |
children | 4d3267854182 |
files | .rootkeys linux-2.6.10-xen-sparse/include/linux/highmem.h linux-2.6.10-xen-sparse/mm/highmem.c linux-2.6.11-xen-sparse/include/linux/highmem.h linux-2.6.11-xen-sparse/mm/highmem.c |
line diff
1.1 --- a/.rootkeys Fri Mar 11 00:19:45 2005 +0000 1.2 +++ b/.rootkeys Fri Mar 11 00:28:04 2005 +0000 1.3 @@ -128,8 +128,6 @@ 409ba2e7akOFqQUg6Qyg2s28xcXiMg linux-2.4 1.4 3e5a4e683HKVU-sxtagrDasRB8eBVw linux-2.4.29-xen-sparse/mm/swapfile.c 1.5 41180721bNns9Na7w1nJ0ZVt8bhUNA linux-2.4.29-xen-sparse/mm/vmalloc.c 1.6 41505c57WAd5l1rlfCLNSCpx9J13vA linux-2.4.29-xen-sparse/net/core/skbuff.c 1.7 -42305f545Vc5SLCUewZ2-n-P9JJhEQ linux-2.6.10-xen-sparse/include/linux/highmem.h 1.8 -42305f54Q6xJ1bXcQJlCQq1m-e2C8g linux-2.6.10-xen-sparse/mm/highmem.c 1.9 40f562372u3A7_kfbYYixPHJJxYUxA linux-2.6.11-xen-sparse/arch/xen/Kconfig 1.10 40f56237utH41NPukqHksuNf29IC9A linux-2.6.11-xen-sparse/arch/xen/Kconfig.drivers 1.11 40f56237penAAlWVBVDpeQZNFIg8CA linux-2.6.11-xen-sparse/arch/xen/Makefile 1.12 @@ -250,9 +248,11 @@ 40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6 1.13 4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.11-xen-sparse/include/asm-xen/queues.h 1.14 3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.11-xen-sparse/include/asm-xen/xen_proc.h 1.15 419b4e93z2S0gR17XTy8wg09JEwAhg linux-2.6.11-xen-sparse/include/linux/gfp.h 1.16 +42305f545Vc5SLCUewZ2-n-P9JJhEQ linux-2.6.11-xen-sparse/include/linux/highmem.h 1.17 419dfc609zbti8rqL60tL2dHXQ_rvQ linux-2.6.11-xen-sparse/include/linux/irq.h 1.18 419dfc6awx7w88wk6cG9P3mPidX6LQ linux-2.6.11-xen-sparse/kernel/irq/manage.c 1.19 40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.11-xen-sparse/mkbuildtree 1.20 +42305f54Q6xJ1bXcQJlCQq1m-e2C8g linux-2.6.11-xen-sparse/mm/highmem.c 1.21 412f46c0LJuKAgSPGoC0Z1DEkLfuLA linux-2.6.11-xen-sparse/mm/memory.c 1.22 410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.11-xen-sparse/mm/page_alloc.c 1.23 413cb1e4zst25MDYjg63Y-NGC5_pLg netbsd-2.0-xen-sparse/Makefile
2.1 --- a/linux-2.6.10-xen-sparse/include/linux/highmem.h Fri Mar 11 00:19:45 2005 +0000 2.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 2.3 @@ -1,95 +0,0 @@ 2.4 -#ifndef _LINUX_HIGHMEM_H 2.5 -#define _LINUX_HIGHMEM_H 2.6 - 2.7 -#include <linux/config.h> 2.8 -#include <linux/fs.h> 2.9 -#include <linux/mm.h> 2.10 - 2.11 -#include <asm/cacheflush.h> 2.12 - 2.13 -#ifdef CONFIG_HIGHMEM 2.14 - 2.15 -extern struct page *highmem_start_page; 2.16 - 2.17 -#include <asm/highmem.h> 2.18 - 2.19 -/* declarations for linux/mm/highmem.c */ 2.20 -unsigned int nr_free_highpages(void); 2.21 -void kmap_flush_unused(void); 2.22 - 2.23 -#else /* CONFIG_HIGHMEM */ 2.24 - 2.25 -static inline unsigned int nr_free_highpages(void) { return 0; } 2.26 -static inline void kmap_flush_unused(void) { } 2.27 - 2.28 -static inline void *kmap(struct page *page) 2.29 -{ 2.30 - might_sleep(); 2.31 - return page_address(page); 2.32 -} 2.33 - 2.34 -#define kunmap(page) do { (void) (page); } while (0) 2.35 - 2.36 -#define kmap_atomic(page, idx) page_address(page) 2.37 -#define kunmap_atomic(addr, idx) do { } while (0) 2.38 -#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 2.39 - 2.40 -#endif /* CONFIG_HIGHMEM */ 2.41 - 2.42 -/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 2.43 -static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 2.44 -{ 2.45 - void *addr = kmap_atomic(page, KM_USER0); 2.46 - clear_user_page(addr, vaddr, page); 2.47 - kunmap_atomic(addr, KM_USER0); 2.48 - /* Make sure this page is cleared on other CPU's too before using it */ 2.49 - smp_wmb(); 2.50 -} 2.51 - 2.52 -static inline void clear_highpage(struct page *page) 2.53 -{ 2.54 - void *kaddr = kmap_atomic(page, KM_USER0); 2.55 - clear_page(kaddr); 2.56 - kunmap_atomic(kaddr, KM_USER0); 2.57 -} 2.58 - 2.59 -/* 2.60 - * Same but also flushes aliased cache contents to RAM. 2.61 - */ 2.62 -static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) 2.63 -{ 2.64 - void *kaddr; 2.65 - 2.66 - BUG_ON(offset + size > PAGE_SIZE); 2.67 - 2.68 - kaddr = kmap_atomic(page, KM_USER0); 2.69 - memset((char *)kaddr + offset, 0, size); 2.70 - flush_dcache_page(page); 2.71 - kunmap_atomic(kaddr, KM_USER0); 2.72 -} 2.73 - 2.74 -static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) 2.75 -{ 2.76 - char *vfrom, *vto; 2.77 - 2.78 - vfrom = kmap_atomic(from, KM_USER0); 2.79 - vto = kmap_atomic(to, KM_USER1); 2.80 - copy_user_page(vto, vfrom, vaddr, to); 2.81 - kunmap_atomic(vfrom, KM_USER0); 2.82 - kunmap_atomic(vto, KM_USER1); 2.83 - /* Make sure this page is cleared on other CPU's too before using it */ 2.84 - smp_wmb(); 2.85 -} 2.86 - 2.87 -static inline void copy_highpage(struct page *to, struct page *from) 2.88 -{ 2.89 - char *vfrom, *vto; 2.90 - 2.91 - vfrom = kmap_atomic(from, KM_USER0); 2.92 - vto = kmap_atomic(to, KM_USER1); 2.93 - copy_page(vto, vfrom); 2.94 - kunmap_atomic(vfrom, KM_USER0); 2.95 - kunmap_atomic(vto, KM_USER1); 2.96 -} 2.97 - 2.98 -#endif /* _LINUX_HIGHMEM_H */
3.1 --- a/linux-2.6.10-xen-sparse/mm/highmem.c Fri Mar 11 00:19:45 2005 +0000 3.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 3.3 @@ -1,614 +0,0 @@ 3.4 -/* 3.5 - * High memory handling common code and variables. 3.6 - * 3.7 - * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de 3.8 - * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de 3.9 - * 3.10 - * 3.11 - * Redesigned the x86 32-bit VM architecture to deal with 3.12 - * 64-bit physical space. With current x86 CPUs this 3.13 - * means up to 64 Gigabytes physical RAM. 3.14 - * 3.15 - * Rewrote high memory support to move the page cache into 3.16 - * high memory. Implemented permanent (schedulable) kmaps 3.17 - * based on Linus' idea. 3.18 - * 3.19 - * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 3.20 - */ 3.21 - 3.22 -#include <linux/mm.h> 3.23 -#include <linux/module.h> 3.24 -#include <linux/swap.h> 3.25 -#include <linux/bio.h> 3.26 -#include <linux/pagemap.h> 3.27 -#include <linux/mempool.h> 3.28 -#include <linux/blkdev.h> 3.29 -#include <linux/init.h> 3.30 -#include <linux/hash.h> 3.31 -#include <linux/highmem.h> 3.32 -#include <asm/tlbflush.h> 3.33 - 3.34 -static mempool_t *page_pool, *isa_page_pool; 3.35 - 3.36 -static void *page_pool_alloc(int gfp_mask, void *data) 3.37 -{ 3.38 - int gfp = gfp_mask | (int) (long) data; 3.39 - 3.40 - return alloc_page(gfp); 3.41 -} 3.42 - 3.43 -static void page_pool_free(void *page, void *data) 3.44 -{ 3.45 - __free_page(page); 3.46 -} 3.47 - 3.48 -/* 3.49 - * Virtual_count is not a pure "count". 3.50 - * 0 means that it is not mapped, and has not been mapped 3.51 - * since a TLB flush - it is usable. 3.52 - * 1 means that there are no users, but it has been mapped 3.53 - * since the last TLB flush - so we can't use it. 3.54 - * n means that there are (n-1) current users of it. 3.55 - */ 3.56 -#ifdef CONFIG_HIGHMEM 3.57 -static int pkmap_count[LAST_PKMAP]; 3.58 -static unsigned int last_pkmap_nr; 3.59 -static spinlock_t kmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; 3.60 - 3.61 -pte_t * pkmap_page_table; 3.62 - 3.63 -static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); 3.64 - 3.65 -static void flush_all_zero_pkmaps(void) 3.66 -{ 3.67 - int i; 3.68 - 3.69 - flush_cache_kmaps(); 3.70 - 3.71 - for (i = 0; i < LAST_PKMAP; i++) { 3.72 - struct page *page; 3.73 - 3.74 - /* 3.75 - * zero means we don't have anything to do, 3.76 - * >1 means that it is still in use. Only 3.77 - * a count of 1 means that it is free but 3.78 - * needs to be unmapped 3.79 - */ 3.80 - if (pkmap_count[i] != 1) 3.81 - continue; 3.82 - pkmap_count[i] = 0; 3.83 - 3.84 - /* sanity check */ 3.85 - if (pte_none(pkmap_page_table[i])) 3.86 - BUG(); 3.87 - 3.88 - /* 3.89 - * Don't need an atomic fetch-and-clear op here; 3.90 - * no-one has the page mapped, and cannot get at 3.91 - * its virtual address (and hence PTE) without first 3.92 - * getting the kmap_lock (which is held here). 3.93 - * So no dangers, even with speculative execution. 3.94 - */ 3.95 - page = pte_page(pkmap_page_table[i]); 3.96 - pte_clear(&pkmap_page_table[i]); 3.97 - 3.98 - set_page_address(page, NULL); 3.99 - } 3.100 - flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); 3.101 -} 3.102 - 3.103 -static inline unsigned long map_new_virtual(struct page *page) 3.104 -{ 3.105 - unsigned long vaddr; 3.106 - int count; 3.107 - 3.108 -start: 3.109 - count = LAST_PKMAP; 3.110 - /* Find an empty entry */ 3.111 - for (;;) { 3.112 - last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; 3.113 - if (!last_pkmap_nr) { 3.114 - flush_all_zero_pkmaps(); 3.115 - count = LAST_PKMAP; 3.116 - } 3.117 - if (!pkmap_count[last_pkmap_nr]) 3.118 - break; /* Found a usable entry */ 3.119 - if (--count) 3.120 - continue; 3.121 - 3.122 - /* 3.123 - * Sleep for somebody else to unmap their entries 3.124 - */ 3.125 - { 3.126 - DECLARE_WAITQUEUE(wait, current); 3.127 - 3.128 - __set_current_state(TASK_UNINTERRUPTIBLE); 3.129 - add_wait_queue(&pkmap_map_wait, &wait); 3.130 - spin_unlock(&kmap_lock); 3.131 - schedule(); 3.132 - remove_wait_queue(&pkmap_map_wait, &wait); 3.133 - spin_lock(&kmap_lock); 3.134 - 3.135 - /* Somebody else might have mapped it while we slept */ 3.136 - if (page_address(page)) 3.137 - return (unsigned long)page_address(page); 3.138 - 3.139 - /* Re-start */ 3.140 - goto start; 3.141 - } 3.142 - } 3.143 - vaddr = PKMAP_ADDR(last_pkmap_nr); 3.144 - set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); 3.145 - 3.146 - pkmap_count[last_pkmap_nr] = 1; 3.147 - set_page_address(page, (void *)vaddr); 3.148 - 3.149 - return vaddr; 3.150 -} 3.151 - 3.152 -void kmap_flush_unused(void) 3.153 -{ 3.154 - spin_lock(&kmap_lock); 3.155 - flush_all_zero_pkmaps(); 3.156 - spin_unlock(&kmap_lock); 3.157 -} 3.158 - 3.159 -EXPORT_SYMBOL(kmap_flush_unused); 3.160 - 3.161 -void fastcall *kmap_high(struct page *page) 3.162 -{ 3.163 - unsigned long vaddr; 3.164 - 3.165 - /* 3.166 - * For highmem pages, we can't trust "virtual" until 3.167 - * after we have the lock. 3.168 - * 3.169 - * We cannot call this from interrupts, as it may block 3.170 - */ 3.171 - spin_lock(&kmap_lock); 3.172 - vaddr = (unsigned long)page_address(page); 3.173 - if (!vaddr) 3.174 - vaddr = map_new_virtual(page); 3.175 - pkmap_count[PKMAP_NR(vaddr)]++; 3.176 - if (pkmap_count[PKMAP_NR(vaddr)] < 2) 3.177 - BUG(); 3.178 - spin_unlock(&kmap_lock); 3.179 - return (void*) vaddr; 3.180 -} 3.181 - 3.182 -EXPORT_SYMBOL(kmap_high); 3.183 - 3.184 -void fastcall kunmap_high(struct page *page) 3.185 -{ 3.186 - unsigned long vaddr; 3.187 - unsigned long nr; 3.188 - int need_wakeup; 3.189 - 3.190 - spin_lock(&kmap_lock); 3.191 - vaddr = (unsigned long)page_address(page); 3.192 - if (!vaddr) 3.193 - BUG(); 3.194 - nr = PKMAP_NR(vaddr); 3.195 - 3.196 - /* 3.197 - * A count must never go down to zero 3.198 - * without a TLB flush! 3.199 - */ 3.200 - need_wakeup = 0; 3.201 - switch (--pkmap_count[nr]) { 3.202 - case 0: 3.203 - BUG(); 3.204 - case 1: 3.205 - /* 3.206 - * Avoid an unnecessary wake_up() function call. 3.207 - * The common case is pkmap_count[] == 1, but 3.208 - * no waiters. 3.209 - * The tasks queued in the wait-queue are guarded 3.210 - * by both the lock in the wait-queue-head and by 3.211 - * the kmap_lock. As the kmap_lock is held here, 3.212 - * no need for the wait-queue-head's lock. Simply 3.213 - * test if the queue is empty. 3.214 - */ 3.215 - need_wakeup = waitqueue_active(&pkmap_map_wait); 3.216 - } 3.217 - spin_unlock(&kmap_lock); 3.218 - 3.219 - /* do wake-up, if needed, race-free outside of the spin lock */ 3.220 - if (need_wakeup) 3.221 - wake_up(&pkmap_map_wait); 3.222 -} 3.223 - 3.224 -EXPORT_SYMBOL(kunmap_high); 3.225 - 3.226 -#define POOL_SIZE 64 3.227 - 3.228 -static __init int init_emergency_pool(void) 3.229 -{ 3.230 - struct sysinfo i; 3.231 - si_meminfo(&i); 3.232 - si_swapinfo(&i); 3.233 - 3.234 - if (!i.totalhigh) 3.235 - return 0; 3.236 - 3.237 - page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); 3.238 - if (!page_pool) 3.239 - BUG(); 3.240 - printk("highmem bounce pool size: %d pages\n", POOL_SIZE); 3.241 - 3.242 - return 0; 3.243 -} 3.244 - 3.245 -__initcall(init_emergency_pool); 3.246 - 3.247 -/* 3.248 - * highmem version, map in to vec 3.249 - */ 3.250 -static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) 3.251 -{ 3.252 - unsigned long flags; 3.253 - unsigned char *vto; 3.254 - 3.255 - local_irq_save(flags); 3.256 - vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ); 3.257 - memcpy(vto + to->bv_offset, vfrom, to->bv_len); 3.258 - kunmap_atomic(vto, KM_BOUNCE_READ); 3.259 - local_irq_restore(flags); 3.260 -} 3.261 - 3.262 -#else /* CONFIG_HIGHMEM */ 3.263 - 3.264 -#define bounce_copy_vec(to, vfrom) \ 3.265 - memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) 3.266 - 3.267 -#endif 3.268 - 3.269 -#define ISA_POOL_SIZE 16 3.270 - 3.271 -/* 3.272 - * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA 3.273 - * as the max address, so check if the pool has already been created. 3.274 - */ 3.275 -int init_emergency_isa_pool(void) 3.276 -{ 3.277 - if (isa_page_pool) 3.278 - return 0; 3.279 - 3.280 - isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA); 3.281 - if (!isa_page_pool) 3.282 - BUG(); 3.283 - 3.284 - printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); 3.285 - return 0; 3.286 -} 3.287 - 3.288 -/* 3.289 - * Simple bounce buffer support for highmem pages. Depending on the 3.290 - * queue gfp mask set, *to may or may not be a highmem page. kmap it 3.291 - * always, it will do the Right Thing 3.292 - */ 3.293 -static void copy_to_high_bio_irq(struct bio *to, struct bio *from) 3.294 -{ 3.295 - unsigned char *vfrom; 3.296 - struct bio_vec *tovec, *fromvec; 3.297 - int i; 3.298 - 3.299 - __bio_for_each_segment(tovec, to, i, 0) { 3.300 - fromvec = from->bi_io_vec + i; 3.301 - 3.302 - /* 3.303 - * not bounced 3.304 - */ 3.305 - if (tovec->bv_page == fromvec->bv_page) 3.306 - continue; 3.307 - 3.308 - /* 3.309 - * fromvec->bv_offset and fromvec->bv_len might have been 3.310 - * modified by the block layer, so use the original copy, 3.311 - * bounce_copy_vec already uses tovec->bv_len 3.312 - */ 3.313 - vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; 3.314 - 3.315 - flush_dcache_page(tovec->bv_page); 3.316 - bounce_copy_vec(tovec, vfrom); 3.317 - } 3.318 -} 3.319 - 3.320 -static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) 3.321 -{ 3.322 - struct bio *bio_orig = bio->bi_private; 3.323 - struct bio_vec *bvec, *org_vec; 3.324 - int i; 3.325 - 3.326 - if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) 3.327 - set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); 3.328 - 3.329 - /* 3.330 - * free up bounce indirect pages used 3.331 - */ 3.332 - __bio_for_each_segment(bvec, bio, i, 0) { 3.333 - org_vec = bio_orig->bi_io_vec + i; 3.334 - if (bvec->bv_page == org_vec->bv_page) 3.335 - continue; 3.336 - 3.337 - mempool_free(bvec->bv_page, pool); 3.338 - } 3.339 - 3.340 - bio_endio(bio_orig, bio_orig->bi_size, err); 3.341 - bio_put(bio); 3.342 -} 3.343 - 3.344 -static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) 3.345 -{ 3.346 - if (bio->bi_size) 3.347 - return 1; 3.348 - 3.349 - bounce_end_io(bio, page_pool, err); 3.350 - return 0; 3.351 -} 3.352 - 3.353 -static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int err) 3.354 -{ 3.355 - if (bio->bi_size) 3.356 - return 1; 3.357 - 3.358 - bounce_end_io(bio, isa_page_pool, err); 3.359 - return 0; 3.360 -} 3.361 - 3.362 -static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) 3.363 -{ 3.364 - struct bio *bio_orig = bio->bi_private; 3.365 - 3.366 - if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 3.367 - copy_to_high_bio_irq(bio_orig, bio); 3.368 - 3.369 - bounce_end_io(bio, pool, err); 3.370 -} 3.371 - 3.372 -static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err) 3.373 -{ 3.374 - if (bio->bi_size) 3.375 - return 1; 3.376 - 3.377 - __bounce_end_io_read(bio, page_pool, err); 3.378 - return 0; 3.379 -} 3.380 - 3.381 -static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int err) 3.382 -{ 3.383 - if (bio->bi_size) 3.384 - return 1; 3.385 - 3.386 - __bounce_end_io_read(bio, isa_page_pool, err); 3.387 - return 0; 3.388 -} 3.389 - 3.390 -static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, 3.391 - mempool_t *pool) 3.392 -{ 3.393 - struct page *page; 3.394 - struct bio *bio = NULL; 3.395 - int i, rw = bio_data_dir(*bio_orig); 3.396 - struct bio_vec *to, *from; 3.397 - 3.398 - bio_for_each_segment(from, *bio_orig, i) { 3.399 - page = from->bv_page; 3.400 - 3.401 - /* 3.402 - * is destination page below bounce pfn? 3.403 - */ 3.404 - if (page_to_pfn(page) < q->bounce_pfn) 3.405 - continue; 3.406 - 3.407 - /* 3.408 - * irk, bounce it 3.409 - */ 3.410 - if (!bio) 3.411 - bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt); 3.412 - 3.413 - to = bio->bi_io_vec + i; 3.414 - 3.415 - to->bv_page = mempool_alloc(pool, q->bounce_gfp); 3.416 - to->bv_len = from->bv_len; 3.417 - to->bv_offset = from->bv_offset; 3.418 - 3.419 - if (rw == WRITE) { 3.420 - char *vto, *vfrom; 3.421 - 3.422 - flush_dcache_page(from->bv_page); 3.423 - vto = page_address(to->bv_page) + to->bv_offset; 3.424 - vfrom = kmap(from->bv_page) + from->bv_offset; 3.425 - memcpy(vto, vfrom, to->bv_len); 3.426 - kunmap(from->bv_page); 3.427 - } 3.428 - } 3.429 - 3.430 - /* 3.431 - * no pages bounced 3.432 - */ 3.433 - if (!bio) 3.434 - return; 3.435 - 3.436 - /* 3.437 - * at least one page was bounced, fill in possible non-highmem 3.438 - * pages 3.439 - */ 3.440 - bio_for_each_segment(from, *bio_orig, i) { 3.441 - to = bio_iovec_idx(bio, i); 3.442 - if (!to->bv_page) { 3.443 - to->bv_page = from->bv_page; 3.444 - to->bv_len = from->bv_len; 3.445 - to->bv_offset = from->bv_offset; 3.446 - } 3.447 - } 3.448 - 3.449 - bio->bi_bdev = (*bio_orig)->bi_bdev; 3.450 - bio->bi_flags |= (1 << BIO_BOUNCED); 3.451 - bio->bi_sector = (*bio_orig)->bi_sector; 3.452 - bio->bi_rw = (*bio_orig)->bi_rw; 3.453 - 3.454 - bio->bi_vcnt = (*bio_orig)->bi_vcnt; 3.455 - bio->bi_idx = (*bio_orig)->bi_idx; 3.456 - bio->bi_size = (*bio_orig)->bi_size; 3.457 - 3.458 - if (pool == page_pool) { 3.459 - bio->bi_end_io = bounce_end_io_write; 3.460 - if (rw == READ) 3.461 - bio->bi_end_io = bounce_end_io_read; 3.462 - } else { 3.463 - bio->bi_end_io = bounce_end_io_write_isa; 3.464 - if (rw == READ) 3.465 - bio->bi_end_io = bounce_end_io_read_isa; 3.466 - } 3.467 - 3.468 - bio->bi_private = *bio_orig; 3.469 - *bio_orig = bio; 3.470 -} 3.471 - 3.472 -void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) 3.473 -{ 3.474 - mempool_t *pool; 3.475 - 3.476 - /* 3.477 - * for non-isa bounce case, just check if the bounce pfn is equal 3.478 - * to or bigger than the highest pfn in the system -- in that case, 3.479 - * don't waste time iterating over bio segments 3.480 - */ 3.481 - if (!(q->bounce_gfp & GFP_DMA)) { 3.482 - if (q->bounce_pfn >= blk_max_pfn) 3.483 - return; 3.484 - pool = page_pool; 3.485 - } else { 3.486 - BUG_ON(!isa_page_pool); 3.487 - pool = isa_page_pool; 3.488 - } 3.489 - 3.490 - /* 3.491 - * slow path 3.492 - */ 3.493 - __blk_queue_bounce(q, bio_orig, pool); 3.494 -} 3.495 - 3.496 -EXPORT_SYMBOL(blk_queue_bounce); 3.497 - 3.498 -#if defined(HASHED_PAGE_VIRTUAL) 3.499 - 3.500 -#define PA_HASH_ORDER 7 3.501 - 3.502 -/* 3.503 - * Describes one page->virtual association 3.504 - */ 3.505 -struct page_address_map { 3.506 - struct page *page; 3.507 - void *virtual; 3.508 - struct list_head list; 3.509 -}; 3.510 - 3.511 -/* 3.512 - * page_address_map freelist, allocated from page_address_maps. 3.513 - */ 3.514 -static struct list_head page_address_pool; /* freelist */ 3.515 -static spinlock_t pool_lock; /* protects page_address_pool */ 3.516 - 3.517 -/* 3.518 - * Hash table bucket 3.519 - */ 3.520 -static struct page_address_slot { 3.521 - struct list_head lh; /* List of page_address_maps */ 3.522 - spinlock_t lock; /* Protect this bucket's list */ 3.523 -} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; 3.524 - 3.525 -static struct page_address_slot *page_slot(struct page *page) 3.526 -{ 3.527 - return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; 3.528 -} 3.529 - 3.530 -void *page_address(struct page *page) 3.531 -{ 3.532 - unsigned long flags; 3.533 - void *ret; 3.534 - struct page_address_slot *pas; 3.535 - 3.536 - if (!PageHighMem(page)) 3.537 - return lowmem_page_address(page); 3.538 - 3.539 - pas = page_slot(page); 3.540 - ret = NULL; 3.541 - spin_lock_irqsave(&pas->lock, flags); 3.542 - if (!list_empty(&pas->lh)) { 3.543 - struct page_address_map *pam; 3.544 - 3.545 - list_for_each_entry(pam, &pas->lh, list) { 3.546 - if (pam->page == page) { 3.547 - ret = pam->virtual; 3.548 - goto done; 3.549 - } 3.550 - } 3.551 - } 3.552 -done: 3.553 - spin_unlock_irqrestore(&pas->lock, flags); 3.554 - return ret; 3.555 -} 3.556 - 3.557 -EXPORT_SYMBOL(page_address); 3.558 - 3.559 -void set_page_address(struct page *page, void *virtual) 3.560 -{ 3.561 - unsigned long flags; 3.562 - struct page_address_slot *pas; 3.563 - struct page_address_map *pam; 3.564 - 3.565 - BUG_ON(!PageHighMem(page)); 3.566 - 3.567 - pas = page_slot(page); 3.568 - if (virtual) { /* Add */ 3.569 - BUG_ON(list_empty(&page_address_pool)); 3.570 - 3.571 - spin_lock_irqsave(&pool_lock, flags); 3.572 - pam = list_entry(page_address_pool.next, 3.573 - struct page_address_map, list); 3.574 - list_del(&pam->list); 3.575 - spin_unlock_irqrestore(&pool_lock, flags); 3.576 - 3.577 - pam->page = page; 3.578 - pam->virtual = virtual; 3.579 - 3.580 - spin_lock_irqsave(&pas->lock, flags); 3.581 - list_add_tail(&pam->list, &pas->lh); 3.582 - spin_unlock_irqrestore(&pas->lock, flags); 3.583 - } else { /* Remove */ 3.584 - spin_lock_irqsave(&pas->lock, flags); 3.585 - list_for_each_entry(pam, &pas->lh, list) { 3.586 - if (pam->page == page) { 3.587 - list_del(&pam->list); 3.588 - spin_unlock_irqrestore(&pas->lock, flags); 3.589 - spin_lock_irqsave(&pool_lock, flags); 3.590 - list_add_tail(&pam->list, &page_address_pool); 3.591 - spin_unlock_irqrestore(&pool_lock, flags); 3.592 - goto done; 3.593 - } 3.594 - } 3.595 - spin_unlock_irqrestore(&pas->lock, flags); 3.596 - } 3.597 -done: 3.598 - return; 3.599 -} 3.600 - 3.601 -static struct page_address_map page_address_maps[LAST_PKMAP]; 3.602 - 3.603 -void __init page_address_init(void) 3.604 -{ 3.605 - int i; 3.606 - 3.607 - INIT_LIST_HEAD(&page_address_pool); 3.608 - for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) 3.609 - list_add(&page_address_maps[i].list, &page_address_pool); 3.610 - for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { 3.611 - INIT_LIST_HEAD(&page_address_htable[i].lh); 3.612 - spin_lock_init(&page_address_htable[i].lock); 3.613 - } 3.614 - spin_lock_init(&pool_lock); 3.615 -} 3.616 - 3.617 -#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
4.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 4.2 +++ b/linux-2.6.11-xen-sparse/include/linux/highmem.h Fri Mar 11 00:28:04 2005 +0000 4.3 @@ -0,0 +1,106 @@ 4.4 +#ifndef _LINUX_HIGHMEM_H 4.5 +#define _LINUX_HIGHMEM_H 4.6 + 4.7 +#include <linux/config.h> 4.8 +#include <linux/fs.h> 4.9 +#include <linux/mm.h> 4.10 + 4.11 +#include <asm/cacheflush.h> 4.12 + 4.13 +#ifdef CONFIG_HIGHMEM 4.14 + 4.15 +#include <asm/highmem.h> 4.16 + 4.17 +/* declarations for linux/mm/highmem.c */ 4.18 +unsigned int nr_free_highpages(void); 4.19 +void kmap_flush_unused(void); 4.20 + 4.21 +#else /* CONFIG_HIGHMEM */ 4.22 + 4.23 +static inline unsigned int nr_free_highpages(void) { return 0; } 4.24 +static inline void kmap_flush_unused(void) { } 4.25 + 4.26 +static inline void *kmap(struct page *page) 4.27 +{ 4.28 + might_sleep(); 4.29 + return page_address(page); 4.30 +} 4.31 + 4.32 +#define kunmap(page) do { (void) (page); } while (0) 4.33 + 4.34 +#define kmap_atomic(page, idx) page_address(page) 4.35 +#define kunmap_atomic(addr, idx) do { } while (0) 4.36 +#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 4.37 + 4.38 +#endif /* CONFIG_HIGHMEM */ 4.39 + 4.40 +/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 4.41 +static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 4.42 +{ 4.43 + void *addr = kmap_atomic(page, KM_USER0); 4.44 + clear_user_page(addr, vaddr, page); 4.45 + kunmap_atomic(addr, KM_USER0); 4.46 + /* Make sure this page is cleared on other CPU's too before using it */ 4.47 + smp_wmb(); 4.48 +} 4.49 + 4.50 +#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 4.51 +static inline struct page * 4.52 +alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr) 4.53 +{ 4.54 + struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, vaddr); 4.55 + 4.56 + if (page) 4.57 + clear_user_highpage(page, vaddr); 4.58 + 4.59 + return page; 4.60 +} 4.61 +#endif 4.62 + 4.63 +static inline void clear_highpage(struct page *page) 4.64 +{ 4.65 + void *kaddr = kmap_atomic(page, KM_USER0); 4.66 + clear_page(kaddr); 4.67 + kunmap_atomic(kaddr, KM_USER0); 4.68 +} 4.69 + 4.70 +/* 4.71 + * Same but also flushes aliased cache contents to RAM. 4.72 + */ 4.73 +static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) 4.74 +{ 4.75 + void *kaddr; 4.76 + 4.77 + BUG_ON(offset + size > PAGE_SIZE); 4.78 + 4.79 + kaddr = kmap_atomic(page, KM_USER0); 4.80 + memset((char *)kaddr + offset, 0, size); 4.81 + flush_dcache_page(page); 4.82 + kunmap_atomic(kaddr, KM_USER0); 4.83 +} 4.84 + 4.85 +static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) 4.86 +{ 4.87 + char *vfrom, *vto; 4.88 + 4.89 + vfrom = kmap_atomic(from, KM_USER0); 4.90 + vto = kmap_atomic(to, KM_USER1); 4.91 + copy_user_page(vto, vfrom, vaddr, to); 4.92 + kunmap_atomic(vfrom, KM_USER0); 4.93 + kunmap_atomic(vto, KM_USER1); 4.94 + /* Make sure this page is cleared on other CPU's too before using it */ 4.95 + smp_wmb(); 4.96 +} 4.97 + 4.98 +static inline void copy_highpage(struct page *to, struct page *from) 4.99 +{ 4.100 + char *vfrom, *vto; 4.101 + 4.102 + vfrom = kmap_atomic(from, KM_USER0); 4.103 + vto = kmap_atomic(to, KM_USER1); 4.104 + copy_page(vto, vfrom); 4.105 + kunmap_atomic(vfrom, KM_USER0); 4.106 + kunmap_atomic(vto, KM_USER1); 4.107 +} 4.108 + 4.109 +#endif /* _LINUX_HIGHMEM_H */
5.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 5.2 +++ b/linux-2.6.11-xen-sparse/mm/highmem.c Fri Mar 11 00:28:04 2005 +0000 5.3 @@ -0,0 +1,614 @@ 5.4 +/* 5.5 + * High memory handling common code and variables. 5.6 + * 5.7 + * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de 5.8 + * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de 5.9 + * 5.10 + * 5.11 + * Redesigned the x86 32-bit VM architecture to deal with 5.12 + * 64-bit physical space. With current x86 CPUs this 5.13 + * means up to 64 Gigabytes physical RAM. 5.14 + * 5.15 + * Rewrote high memory support to move the page cache into 5.16 + * high memory. Implemented permanent (schedulable) kmaps 5.17 + * based on Linus' idea. 5.18 + * 5.19 + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 5.20 + */ 5.21 + 5.22 +#include <linux/mm.h> 5.23 +#include <linux/module.h> 5.24 +#include <linux/swap.h> 5.25 +#include <linux/bio.h> 5.26 +#include <linux/pagemap.h> 5.27 +#include <linux/mempool.h> 5.28 +#include <linux/blkdev.h> 5.29 +#include <linux/init.h> 5.30 +#include <linux/hash.h> 5.31 +#include <linux/highmem.h> 5.32 +#include <asm/tlbflush.h> 5.33 + 5.34 +static mempool_t *page_pool, *isa_page_pool; 5.35 + 5.36 +static void *page_pool_alloc(int gfp_mask, void *data) 5.37 +{ 5.38 + int gfp = gfp_mask | (int) (long) data; 5.39 + 5.40 + return alloc_page(gfp); 5.41 +} 5.42 + 5.43 +static void page_pool_free(void *page, void *data) 5.44 +{ 5.45 + __free_page(page); 5.46 +} 5.47 + 5.48 +/* 5.49 + * Virtual_count is not a pure "count". 5.50 + * 0 means that it is not mapped, and has not been mapped 5.51 + * since a TLB flush - it is usable. 5.52 + * 1 means that there are no users, but it has been mapped 5.53 + * since the last TLB flush - so we can't use it. 5.54 + * n means that there are (n-1) current users of it. 5.55 + */ 5.56 +#ifdef CONFIG_HIGHMEM 5.57 +static int pkmap_count[LAST_PKMAP]; 5.58 +static unsigned int last_pkmap_nr; 5.59 +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); 5.60 + 5.61 +pte_t * pkmap_page_table; 5.62 + 5.63 +static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); 5.64 + 5.65 +static void flush_all_zero_pkmaps(void) 5.66 +{ 5.67 + int i; 5.68 + 5.69 + flush_cache_kmaps(); 5.70 + 5.71 + for (i = 0; i < LAST_PKMAP; i++) { 5.72 + struct page *page; 5.73 + 5.74 + /* 5.75 + * zero means we don't have anything to do, 5.76 + * >1 means that it is still in use. Only 5.77 + * a count of 1 means that it is free but 5.78 + * needs to be unmapped 5.79 + */ 5.80 + if (pkmap_count[i] != 1) 5.81 + continue; 5.82 + pkmap_count[i] = 0; 5.83 + 5.84 + /* sanity check */ 5.85 + if (pte_none(pkmap_page_table[i])) 5.86 + BUG(); 5.87 + 5.88 + /* 5.89 + * Don't need an atomic fetch-and-clear op here; 5.90 + * no-one has the page mapped, and cannot get at 5.91 + * its virtual address (and hence PTE) without first 5.92 + * getting the kmap_lock (which is held here). 5.93 + * So no dangers, even with speculative execution. 5.94 + */ 5.95 + page = pte_page(pkmap_page_table[i]); 5.96 + pte_clear(&pkmap_page_table[i]); 5.97 + 5.98 + set_page_address(page, NULL); 5.99 + } 5.100 + flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); 5.101 +} 5.102 + 5.103 +static inline unsigned long map_new_virtual(struct page *page) 5.104 +{ 5.105 + unsigned long vaddr; 5.106 + int count; 5.107 + 5.108 +start: 5.109 + count = LAST_PKMAP; 5.110 + /* Find an empty entry */ 5.111 + for (;;) { 5.112 + last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; 5.113 + if (!last_pkmap_nr) { 5.114 + flush_all_zero_pkmaps(); 5.115 + count = LAST_PKMAP; 5.116 + } 5.117 + if (!pkmap_count[last_pkmap_nr]) 5.118 + break; /* Found a usable entry */ 5.119 + if (--count) 5.120 + continue; 5.121 + 5.122 + /* 5.123 + * Sleep for somebody else to unmap their entries 5.124 + */ 5.125 + { 5.126 + DECLARE_WAITQUEUE(wait, current); 5.127 + 5.128 + __set_current_state(TASK_UNINTERRUPTIBLE); 5.129 + add_wait_queue(&pkmap_map_wait, &wait); 5.130 + spin_unlock(&kmap_lock); 5.131 + schedule(); 5.132 + remove_wait_queue(&pkmap_map_wait, &wait); 5.133 + spin_lock(&kmap_lock); 5.134 + 5.135 + /* Somebody else might have mapped it while we slept */ 5.136 + if (page_address(page)) 5.137 + return (unsigned long)page_address(page); 5.138 + 5.139 + /* Re-start */ 5.140 + goto start; 5.141 + } 5.142 + } 5.143 + vaddr = PKMAP_ADDR(last_pkmap_nr); 5.144 + set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); 5.145 + 5.146 + pkmap_count[last_pkmap_nr] = 1; 5.147 + set_page_address(page, (void *)vaddr); 5.148 + 5.149 + return vaddr; 5.150 +} 5.151 + 5.152 +void kmap_flush_unused(void) 5.153 +{ 5.154 + spin_lock(&kmap_lock); 5.155 + flush_all_zero_pkmaps(); 5.156 + spin_unlock(&kmap_lock); 5.157 +} 5.158 + 5.159 +EXPORT_SYMBOL(kmap_flush_unused); 5.160 + 5.161 +void fastcall *kmap_high(struct page *page) 5.162 +{ 5.163 + unsigned long vaddr; 5.164 + 5.165 + /* 5.166 + * For highmem pages, we can't trust "virtual" until 5.167 + * after we have the lock. 5.168 + * 5.169 + * We cannot call this from interrupts, as it may block 5.170 + */ 5.171 + spin_lock(&kmap_lock); 5.172 + vaddr = (unsigned long)page_address(page); 5.173 + if (!vaddr) 5.174 + vaddr = map_new_virtual(page); 5.175 + pkmap_count[PKMAP_NR(vaddr)]++; 5.176 + if (pkmap_count[PKMAP_NR(vaddr)] < 2) 5.177 + BUG(); 5.178 + spin_unlock(&kmap_lock); 5.179 + return (void*) vaddr; 5.180 +} 5.181 + 5.182 +EXPORT_SYMBOL(kmap_high); 5.183 + 5.184 +void fastcall kunmap_high(struct page *page) 5.185 +{ 5.186 + unsigned long vaddr; 5.187 + unsigned long nr; 5.188 + int need_wakeup; 5.189 + 5.190 + spin_lock(&kmap_lock); 5.191 + vaddr = (unsigned long)page_address(page); 5.192 + if (!vaddr) 5.193 + BUG(); 5.194 + nr = PKMAP_NR(vaddr); 5.195 + 5.196 + /* 5.197 + * A count must never go down to zero 5.198 + * without a TLB flush! 5.199 + */ 5.200 + need_wakeup = 0; 5.201 + switch (--pkmap_count[nr]) { 5.202 + case 0: 5.203 + BUG(); 5.204 + case 1: 5.205 + /* 5.206 + * Avoid an unnecessary wake_up() function call. 5.207 + * The common case is pkmap_count[] == 1, but 5.208 + * no waiters. 5.209 + * The tasks queued in the wait-queue are guarded 5.210 + * by both the lock in the wait-queue-head and by 5.211 + * the kmap_lock. As the kmap_lock is held here, 5.212 + * no need for the wait-queue-head's lock. Simply 5.213 + * test if the queue is empty. 5.214 + */ 5.215 + need_wakeup = waitqueue_active(&pkmap_map_wait); 5.216 + } 5.217 + spin_unlock(&kmap_lock); 5.218 + 5.219 + /* do wake-up, if needed, race-free outside of the spin lock */ 5.220 + if (need_wakeup) 5.221 + wake_up(&pkmap_map_wait); 5.222 +} 5.223 + 5.224 +EXPORT_SYMBOL(kunmap_high); 5.225 + 5.226 +#define POOL_SIZE 64 5.227 + 5.228 +static __init int init_emergency_pool(void) 5.229 +{ 5.230 + struct sysinfo i; 5.231 + si_meminfo(&i); 5.232 + si_swapinfo(&i); 5.233 + 5.234 + if (!i.totalhigh) 5.235 + return 0; 5.236 + 5.237 + page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); 5.238 + if (!page_pool) 5.239 + BUG(); 5.240 + printk("highmem bounce pool size: %d pages\n", POOL_SIZE); 5.241 + 5.242 + return 0; 5.243 +} 5.244 + 5.245 +__initcall(init_emergency_pool); 5.246 + 5.247 +/* 5.248 + * highmem version, map in to vec 5.249 + */ 5.250 +static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) 5.251 +{ 5.252 + unsigned long flags; 5.253 + unsigned char *vto; 5.254 + 5.255 + local_irq_save(flags); 5.256 + vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ); 5.257 + memcpy(vto + to->bv_offset, vfrom, to->bv_len); 5.258 + kunmap_atomic(vto, KM_BOUNCE_READ); 5.259 + local_irq_restore(flags); 5.260 +} 5.261 + 5.262 +#else /* CONFIG_HIGHMEM */ 5.263 + 5.264 +#define bounce_copy_vec(to, vfrom) \ 5.265 + memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) 5.266 + 5.267 +#endif 5.268 + 5.269 +#define ISA_POOL_SIZE 16 5.270 + 5.271 +/* 5.272 + * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA 5.273 + * as the max address, so check if the pool has already been created. 5.274 + */ 5.275 +int init_emergency_isa_pool(void) 5.276 +{ 5.277 + if (isa_page_pool) 5.278 + return 0; 5.279 + 5.280 + isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA); 5.281 + if (!isa_page_pool) 5.282 + BUG(); 5.283 + 5.284 + printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); 5.285 + return 0; 5.286 +} 5.287 + 5.288 +/* 5.289 + * Simple bounce buffer support for highmem pages. Depending on the 5.290 + * queue gfp mask set, *to may or may not be a highmem page. kmap it 5.291 + * always, it will do the Right Thing 5.292 + */ 5.293 +static void copy_to_high_bio_irq(struct bio *to, struct bio *from) 5.294 +{ 5.295 + unsigned char *vfrom; 5.296 + struct bio_vec *tovec, *fromvec; 5.297 + int i; 5.298 + 5.299 + __bio_for_each_segment(tovec, to, i, 0) { 5.300 + fromvec = from->bi_io_vec + i; 5.301 + 5.302 + /* 5.303 + * not bounced 5.304 + */ 5.305 + if (tovec->bv_page == fromvec->bv_page) 5.306 + continue; 5.307 + 5.308 + /* 5.309 + * fromvec->bv_offset and fromvec->bv_len might have been 5.310 + * modified by the block layer, so use the original copy, 5.311 + * bounce_copy_vec already uses tovec->bv_len 5.312 + */ 5.313 + vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; 5.314 + 5.315 + flush_dcache_page(tovec->bv_page); 5.316 + bounce_copy_vec(tovec, vfrom); 5.317 + } 5.318 +} 5.319 + 5.320 +static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) 5.321 +{ 5.322 + struct bio *bio_orig = bio->bi_private; 5.323 + struct bio_vec *bvec, *org_vec; 5.324 + int i; 5.325 + 5.326 + if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) 5.327 + set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); 5.328 + 5.329 + /* 5.330 + * free up bounce indirect pages used 5.331 + */ 5.332 + __bio_for_each_segment(bvec, bio, i, 0) { 5.333 + org_vec = bio_orig->bi_io_vec + i; 5.334 + if (bvec->bv_page == org_vec->bv_page) 5.335 + continue; 5.336 + 5.337 + mempool_free(bvec->bv_page, pool); 5.338 + } 5.339 + 5.340 + bio_endio(bio_orig, bio_orig->bi_size, err); 5.341 + bio_put(bio); 5.342 +} 5.343 + 5.344 +static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) 5.345 +{ 5.346 + if (bio->bi_size) 5.347 + return 1; 5.348 + 5.349 + bounce_end_io(bio, page_pool, err); 5.350 + return 0; 5.351 +} 5.352 + 5.353 +static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int err) 5.354 +{ 5.355 + if (bio->bi_size) 5.356 + return 1; 5.357 + 5.358 + bounce_end_io(bio, isa_page_pool, err); 5.359 + return 0; 5.360 +} 5.361 + 5.362 +static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) 5.363 +{ 5.364 + struct bio *bio_orig = bio->bi_private; 5.365 + 5.366 + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 5.367 + copy_to_high_bio_irq(bio_orig, bio); 5.368 + 5.369 + bounce_end_io(bio, pool, err); 5.370 +} 5.371 + 5.372 +static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err) 5.373 +{ 5.374 + if (bio->bi_size) 5.375 + return 1; 5.376 + 5.377 + __bounce_end_io_read(bio, page_pool, err); 5.378 + return 0; 5.379 +} 5.380 + 5.381 +static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int err) 5.382 +{ 5.383 + if (bio->bi_size) 5.384 + return 1; 5.385 + 5.386 + __bounce_end_io_read(bio, isa_page_pool, err); 5.387 + return 0; 5.388 +} 5.389 + 5.390 +static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, 5.391 + mempool_t *pool) 5.392 +{ 5.393 + struct page *page; 5.394 + struct bio *bio = NULL; 5.395 + int i, rw = bio_data_dir(*bio_orig); 5.396 + struct bio_vec *to, *from; 5.397 + 5.398 + bio_for_each_segment(from, *bio_orig, i) { 5.399 + page = from->bv_page; 5.400 + 5.401 + /* 5.402 + * is destination page below bounce pfn? 5.403 + */ 5.404 + if (page_to_pfn(page) < q->bounce_pfn) 5.405 + continue; 5.406 + 5.407 + /* 5.408 + * irk, bounce it 5.409 + */ 5.410 + if (!bio) 5.411 + bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt); 5.412 + 5.413 + to = bio->bi_io_vec + i; 5.414 + 5.415 + to->bv_page = mempool_alloc(pool, q->bounce_gfp); 5.416 + to->bv_len = from->bv_len; 5.417 + to->bv_offset = from->bv_offset; 5.418 + 5.419 + if (rw == WRITE) { 5.420 + char *vto, *vfrom; 5.421 + 5.422 + flush_dcache_page(from->bv_page); 5.423 + vto = page_address(to->bv_page) + to->bv_offset; 5.424 + vfrom = kmap(from->bv_page) + from->bv_offset; 5.425 + memcpy(vto, vfrom, to->bv_len); 5.426 + kunmap(from->bv_page); 5.427 + } 5.428 + } 5.429 + 5.430 + /* 5.431 + * no pages bounced 5.432 + */ 5.433 + if (!bio) 5.434 + return; 5.435 + 5.436 + /* 5.437 + * at least one page was bounced, fill in possible non-highmem 5.438 + * pages 5.439 + */ 5.440 + __bio_for_each_segment(from, *bio_orig, i, 0) { 5.441 + to = bio_iovec_idx(bio, i); 5.442 + if (!to->bv_page) { 5.443 + to->bv_page = from->bv_page; 5.444 + to->bv_len = from->bv_len; 5.445 + to->bv_offset = from->bv_offset; 5.446 + } 5.447 + } 5.448 + 5.449 + bio->bi_bdev = (*bio_orig)->bi_bdev; 5.450 + bio->bi_flags |= (1 << BIO_BOUNCED); 5.451 + bio->bi_sector = (*bio_orig)->bi_sector; 5.452 + bio->bi_rw = (*bio_orig)->bi_rw; 5.453 + 5.454 + bio->bi_vcnt = (*bio_orig)->bi_vcnt; 5.455 + bio->bi_idx = (*bio_orig)->bi_idx; 5.456 + bio->bi_size = (*bio_orig)->bi_size; 5.457 + 5.458 + if (pool == page_pool) { 5.459 + bio->bi_end_io = bounce_end_io_write; 5.460 + if (rw == READ) 5.461 + bio->bi_end_io = bounce_end_io_read; 5.462 + } else { 5.463 + bio->bi_end_io = bounce_end_io_write_isa; 5.464 + if (rw == READ) 5.465 + bio->bi_end_io = bounce_end_io_read_isa; 5.466 + } 5.467 + 5.468 + bio->bi_private = *bio_orig; 5.469 + *bio_orig = bio; 5.470 +} 5.471 + 5.472 +void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) 5.473 +{ 5.474 + mempool_t *pool; 5.475 + 5.476 + /* 5.477 + * for non-isa bounce case, just check if the bounce pfn is equal 5.478 + * to or bigger than the highest pfn in the system -- in that case, 5.479 + * don't waste time iterating over bio segments 5.480 + */ 5.481 + if (!(q->bounce_gfp & GFP_DMA)) { 5.482 + if (q->bounce_pfn >= blk_max_pfn) 5.483 + return; 5.484 + pool = page_pool; 5.485 + } else { 5.486 + BUG_ON(!isa_page_pool); 5.487 + pool = isa_page_pool; 5.488 + } 5.489 + 5.490 + /* 5.491 + * slow path 5.492 + */ 5.493 + __blk_queue_bounce(q, bio_orig, pool); 5.494 +} 5.495 + 5.496 +EXPORT_SYMBOL(blk_queue_bounce); 5.497 + 5.498 +#if defined(HASHED_PAGE_VIRTUAL) 5.499 + 5.500 +#define PA_HASH_ORDER 7 5.501 + 5.502 +/* 5.503 + * Describes one page->virtual association 5.504 + */ 5.505 +struct page_address_map { 5.506 + struct page *page; 5.507 + void *virtual; 5.508 + struct list_head list; 5.509 +}; 5.510 + 5.511 +/* 5.512 + * page_address_map freelist, allocated from page_address_maps. 5.513 + */ 5.514 +static struct list_head page_address_pool; /* freelist */ 5.515 +static spinlock_t pool_lock; /* protects page_address_pool */ 5.516 + 5.517 +/* 5.518 + * Hash table bucket 5.519 + */ 5.520 +static struct page_address_slot { 5.521 + struct list_head lh; /* List of page_address_maps */ 5.522 + spinlock_t lock; /* Protect this bucket's list */ 5.523 +} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; 5.524 + 5.525 +static struct page_address_slot *page_slot(struct page *page) 5.526 +{ 5.527 + return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; 5.528 +} 5.529 + 5.530 +void *page_address(struct page *page) 5.531 +{ 5.532 + unsigned long flags; 5.533 + void *ret; 5.534 + struct page_address_slot *pas; 5.535 + 5.536 + if (!PageHighMem(page)) 5.537 + return lowmem_page_address(page); 5.538 + 5.539 + pas = page_slot(page); 5.540 + ret = NULL; 5.541 + spin_lock_irqsave(&pas->lock, flags); 5.542 + if (!list_empty(&pas->lh)) { 5.543 + struct page_address_map *pam; 5.544 + 5.545 + list_for_each_entry(pam, &pas->lh, list) { 5.546 + if (pam->page == page) { 5.547 + ret = pam->virtual; 5.548 + goto done; 5.549 + } 5.550 + } 5.551 + } 5.552 +done: 5.553 + spin_unlock_irqrestore(&pas->lock, flags); 5.554 + return ret; 5.555 +} 5.556 + 5.557 +EXPORT_SYMBOL(page_address); 5.558 + 5.559 +void set_page_address(struct page *page, void *virtual) 5.560 +{ 5.561 + unsigned long flags; 5.562 + struct page_address_slot *pas; 5.563 + struct page_address_map *pam; 5.564 + 5.565 + BUG_ON(!PageHighMem(page)); 5.566 + 5.567 + pas = page_slot(page); 5.568 + if (virtual) { /* Add */ 5.569 + BUG_ON(list_empty(&page_address_pool)); 5.570 + 5.571 + spin_lock_irqsave(&pool_lock, flags); 5.572 + pam = list_entry(page_address_pool.next, 5.573 + struct page_address_map, list); 5.574 + list_del(&pam->list); 5.575 + spin_unlock_irqrestore(&pool_lock, flags); 5.576 + 5.577 + pam->page = page; 5.578 + pam->virtual = virtual; 5.579 + 5.580 + spin_lock_irqsave(&pas->lock, flags); 5.581 + list_add_tail(&pam->list, &pas->lh); 5.582 + spin_unlock_irqrestore(&pas->lock, flags); 5.583 + } else { /* Remove */ 5.584 + spin_lock_irqsave(&pas->lock, flags); 5.585 + list_for_each_entry(pam, &pas->lh, list) { 5.586 + if (pam->page == page) { 5.587 + list_del(&pam->list); 5.588 + spin_unlock_irqrestore(&pas->lock, flags); 5.589 + spin_lock_irqsave(&pool_lock, flags); 5.590 + list_add_tail(&pam->list, &page_address_pool); 5.591 + spin_unlock_irqrestore(&pool_lock, flags); 5.592 + goto done; 5.593 + } 5.594 + } 5.595 + spin_unlock_irqrestore(&pas->lock, flags); 5.596 + } 5.597 +done: 5.598 + return; 5.599 +} 5.600 + 5.601 +static struct page_address_map page_address_maps[LAST_PKMAP]; 5.602 + 5.603 +void __init page_address_init(void) 5.604 +{ 5.605 + int i; 5.606 + 5.607 + INIT_LIST_HEAD(&page_address_pool); 5.608 + for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) 5.609 + list_add(&page_address_maps[i].list, &page_address_pool); 5.610 + for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { 5.611 + INIT_LIST_HEAD(&page_address_htable[i].lh); 5.612 + spin_lock_init(&page_address_htable[i].lock); 5.613 + } 5.614 + spin_lock_init(&pool_lock); 5.615 +} 5.616 + 5.617 +#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */