ia64/xen-unstable
changeset 11766:3971f49ce592
[BLK] tap: Allocate pages for foreign mappings individually rather than contiguously.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Fri Oct 06 10:30:43 2006 +0100 (2006-10-06) |
parents | 34b2348dfe4b |
children | 7efaaae37415 |
files | linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c |
line diff
1.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c Fri Oct 06 08:09:52 2006 +0100 1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c Fri Oct 06 10:30:43 2006 +0100 1.3 @@ -186,16 +186,18 @@ static inline unsigned int RTN_PEND_IDX( 1.4 1.5 #define BLKBACK_INVALID_HANDLE (~0) 1.6 1.7 -typedef struct mmap_page { 1.8 - unsigned long start; 1.9 - struct page *mpage; 1.10 -} mmap_page_t; 1.11 +static struct page **foreign_pages[MAX_DYNAMIC_MEM]; 1.12 +static inline unsigned long idx_to_kaddr( 1.13 + unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx) 1.14 +{ 1.15 + unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx; 1.16 + unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]); 1.17 + return (unsigned long)pfn_to_kaddr(pfn); 1.18 +} 1.19 1.20 -static mmap_page_t mmap_start[MAX_DYNAMIC_MEM]; 1.21 static unsigned short mmap_alloc = 0; 1.22 static unsigned short mmap_lock = 0; 1.23 static unsigned short mmap_inuse = 0; 1.24 -static unsigned long *pending_addrs[MAX_DYNAMIC_MEM]; 1.25 1.26 /****************************************************************** 1.27 * GRANT HANDLES 1.28 @@ -726,63 +728,33 @@ static void make_response(blkif_t *blkif 1.29 static int req_increase(void) 1.30 { 1.31 int i, j; 1.32 - struct page *page; 1.33 - int ret; 1.34 - 1.35 - ret = -EINVAL; 1.36 - if (mmap_alloc >= MAX_PENDING_REQS || mmap_lock) 1.37 - goto done; 1.38 + struct page **pages = NULL; 1.39 1.40 -#ifdef __ia64__ 1.41 - extern unsigned long alloc_empty_foreign_map_page_range( 1.42 - unsigned long pages); 1.43 - mmap_start[mmap_alloc].start = (unsigned long) 1.44 - alloc_empty_foreign_map_page_range(mmap_pages); 1.45 -#else /* ! ia64 */ 1.46 - page = balloon_alloc_empty_page_range(mmap_pages); 1.47 - ret = -ENOMEM; 1.48 - if (page == NULL) { 1.49 - printk("%s balloon_alloc_empty_page_range gave NULL\n", __FUNCTION__); 1.50 - goto done; 1.51 + if (mmap_alloc >= MAX_PENDING_REQS || mmap_lock) 1.52 + return -EINVAL; 1.53 + 1.54 + pending_reqs[mmap_alloc] = kzalloc(sizeof(pending_req_t) * 1.55 + blkif_reqs, GFP_KERNEL); 1.56 + pages = kmalloc(sizeof(pages[0]) * mmap_pages, GFP_KERNEL); 1.57 + 1.58 + if (!pending_reqs[mmap_alloc] || !pages) 1.59 + goto out_of_memory; 1.60 + 1.61 + for (i = 0; i < mmap_pages; i++) { 1.62 + pages[i] = balloon_alloc_empty_page(); 1.63 + if (!pages[i]) { 1.64 + while (--i >= 0) 1.65 + balloon_free_empty_page(pages[i]); 1.66 + goto out_of_memory; 1.67 + } 1.68 } 1.69 1.70 - /* Pin all of the pages. */ 1.71 - for (i=0; i<mmap_pages; i++) 1.72 - get_page(&page[i]); 1.73 - 1.74 - mmap_start[mmap_alloc].start = 1.75 - (unsigned long)pfn_to_kaddr(page_to_pfn(page)); 1.76 - mmap_start[mmap_alloc].mpage = page; 1.77 - 1.78 -#endif 1.79 - 1.80 - pending_reqs[mmap_alloc] = kzalloc(sizeof(pending_req_t) * 1.81 - blkif_reqs, GFP_KERNEL); 1.82 - pending_addrs[mmap_alloc] = kzalloc(sizeof(unsigned long) * 1.83 - mmap_pages, GFP_KERNEL); 1.84 + foreign_pages[mmap_alloc] = pages; 1.85 1.86 - ret = -ENOMEM; 1.87 - if (!pending_reqs[mmap_alloc] || !pending_addrs[mmap_alloc]) { 1.88 - kfree(pending_reqs[mmap_alloc]); 1.89 - kfree(pending_addrs[mmap_alloc]); 1.90 - WPRINTK("%s: out of memory\n", __FUNCTION__); 1.91 - ret = -ENOMEM; 1.92 - goto done; 1.93 - } 1.94 - 1.95 - ret = 0; 1.96 + DPRINTK("%s: reqs=%d, pages=%d\n", 1.97 + __FUNCTION__, blkif_reqs, mmap_pages); 1.98 1.99 - DPRINTK("%s: reqs=%d, pages=%d, mmap_vstart=0x%lx\n", 1.100 - __FUNCTION__, blkif_reqs, mmap_pages, 1.101 - mmap_start[mmap_alloc].start); 1.102 - 1.103 - BUG_ON(mmap_start[mmap_alloc].start == 0); 1.104 - 1.105 - for (i = 0; i < mmap_pages; i++) 1.106 - pending_addrs[mmap_alloc][i] = 1.107 - mmap_start[mmap_alloc].start + (i << PAGE_SHIFT); 1.108 - 1.109 - for (i = 0; i < MAX_PENDING_REQS ; i++) { 1.110 + for (i = 0; i < MAX_PENDING_REQS; i++) { 1.111 list_add_tail(&pending_reqs[mmap_alloc][i].free_list, 1.112 &pending_free); 1.113 pending_reqs[mmap_alloc][i].mem_idx = mmap_alloc; 1.114 @@ -793,30 +765,27 @@ static int req_increase(void) 1.115 1.116 mmap_alloc++; 1.117 DPRINTK("# MMAPs increased to %d\n",mmap_alloc); 1.118 -done: 1.119 - return ret; 1.120 + return 0; 1.121 + 1.122 + out_of_memory: 1.123 + kfree(pages); 1.124 + kfree(pending_reqs[mmap_alloc]); 1.125 + WPRINTK("%s: out of memory\n", __FUNCTION__); 1.126 + return -ENOMEM; 1.127 } 1.128 1.129 static void mmap_req_del(int mmap) 1.130 { 1.131 int i; 1.132 - struct page *page; 1.133 - 1.134 - /*Spinlock already acquired*/ 1.135 - kfree(pending_reqs[mmap]); 1.136 - kfree(pending_addrs[mmap]); 1.137 1.138 -#ifdef __ia64__ 1.139 - /*Not sure what goes here yet!*/ 1.140 -#else 1.141 + BUG_ON(!spin_is_locked(&pending_free_lock)); 1.142 1.143 - /* Unpin all of the pages. */ 1.144 - page = mmap_start[mmap].mpage; 1.145 - for (i=0; i<mmap_pages; i++) 1.146 - put_page(&page[i]); 1.147 + kfree(pending_reqs[mmap]); 1.148 1.149 - balloon_dealloc_empty_page_range(mmap_start[mmap].mpage, mmap_pages); 1.150 -#endif 1.151 + for (i = 0; i < mmap_pages; i++) 1.152 + balloon_free_empty_page(foreign_pages[mmap][i]); 1.153 + kfree(foreign_pages[mmap]); 1.154 + foreign_pages[mmap] = NULL; 1.155 1.156 mmap_lock = 0; 1.157 DPRINTK("# MMAPs decreased to %d\n",mmap_alloc); 1.158 @@ -887,7 +856,7 @@ static void fast_flush_area(pending_req_ 1.159 mmap_idx = req->mem_idx; 1.160 1.161 for (i = 0; i < req->nr_pages; i++) { 1.162 - kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start, k_idx, i); 1.163 + kvaddr = idx_to_kaddr(mmap_idx, k_idx, i); 1.164 uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i); 1.165 1.166 khandle = &pending_handle(mmap_idx, k_idx, i); 1.167 @@ -896,7 +865,7 @@ static void fast_flush_area(pending_req_ 1.168 continue; 1.169 } 1.170 gnttab_set_unmap_op(&unmap[invcount], 1.171 - MMAP_VADDR(mmap_start[mmap_idx].start, k_idx, i), 1.172 + idx_to_kaddr(mmap_idx, k_idx, i), 1.173 GNTMAP_host_map, khandle->kernel); 1.174 invcount++; 1.175 1.176 @@ -1030,9 +999,8 @@ static int blktap_read_ufe_ring(tap_blki 1.177 struct page *pg; 1.178 int offset; 1.179 1.180 - uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j); 1.181 - kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start, 1.182 - pending_idx, j); 1.183 + uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j); 1.184 + kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j); 1.185 1.186 pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT); 1.187 ClearPageReserved(pg); 1.188 @@ -1214,8 +1182,7 @@ static void dispatch_rw_block_io(blkif_t 1.189 uint32_t flags; 1.190 1.191 uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i); 1.192 - kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start, 1.193 - pending_idx, i); 1.194 + kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i); 1.195 page = virt_to_page(kvaddr); 1.196 1.197 sector = req->sector_number + (8*i); 1.198 @@ -1267,8 +1234,7 @@ static void dispatch_rw_block_io(blkif_t 1.199 struct page *pg; 1.200 1.201 uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2); 1.202 - kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start, 1.203 - pending_idx, i/2); 1.204 + kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2); 1.205 1.206 if (unlikely(map[i].status != 0)) { 1.207 WPRINTK("invalid kernel buffer -- " 1.208 @@ -1298,8 +1264,7 @@ static void dispatch_rw_block_io(blkif_t 1.209 unsigned long kvaddr; 1.210 struct page *pg; 1.211 1.212 - kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start, 1.213 - pending_idx, i); 1.214 + kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i); 1.215 pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT); 1.216 SetPageReserved(pg); 1.217 }