ia64/xen-unstable
changeset 19348:dd3219cd019a
Code cleanups after page offline patch.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu Mar 12 15:31:36 2009 +0000 (2009-03-12) |
parents | c9a35fb19e75 |
children | 97f78142cd4c |
files | xen/common/page_alloc.c xen/common/sysctl.c xen/include/asm-x86/mm.h |
line diff
1.1 --- a/xen/common/page_alloc.c Thu Mar 12 15:08:08 2009 +0000 1.2 +++ b/xen/common/page_alloc.c Thu Mar 12 15:31:36 2009 +0000 1.3 @@ -75,11 +75,11 @@ static DEFINE_SPINLOCK(page_scrub_lock); 1.4 PAGE_LIST_HEAD(page_scrub_list); 1.5 static unsigned long scrub_pages; 1.6 1.7 -/* Offlined page list, protected by heap_lock */ 1.8 +/* Offlined page list, protected by heap_lock. */ 1.9 PAGE_LIST_HEAD(page_offlined_list); 1.10 +/* Broken page list, protected by heap_lock. */ 1.11 +PAGE_LIST_HEAD(page_broken_list); 1.12 1.13 -/* Broken page list, protected by heap_lock */ 1.14 -PAGE_LIST_HEAD(page_broken_list); 1.15 /********************* 1.16 * ALLOCATION BITMAP 1.17 * One bit per page of memory. Bit set => page is allocated. 1.18 @@ -427,9 +427,7 @@ static struct page_info *alloc_heap_page 1.19 return pg; 1.20 } 1.21 1.22 -/* 1.23 - * Remove any offlined page in the buddy poined by head 1.24 - */ 1.25 +/* Remove any offlined page in the buddy pointed to by head. */ 1.26 static int reserve_offlined_page(struct page_info *head) 1.27 { 1.28 unsigned int node = phys_to_nid(page_to_maddr(head)); 1.29 @@ -448,7 +446,7 @@ static int reserve_offlined_page(struct 1.30 struct page_info *pg; 1.31 int next_order; 1.32 1.33 - if (test_bit(_PGC_offlined, &cur_head->count_info)) 1.34 + if ( test_bit(_PGC_offlined, &cur_head->count_info) ) 1.35 { 1.36 cur_head++; 1.37 continue; 1.38 @@ -456,29 +454,27 @@ static int reserve_offlined_page(struct 1.39 1.40 next_order = cur_order = 0; 1.41 1.42 - while (cur_order < head_order) 1.43 + while ( cur_order < head_order ) 1.44 { 1.45 next_order = cur_order + 1; 1.46 1.47 - if ( (cur_head + (1 << next_order)) >= (head + ( 1 << head_order))) 1.48 + if ( (cur_head + (1 << next_order)) >= (head + ( 1 << head_order)) ) 1.49 goto merge; 1.50 1.51 - for (i = (1 << cur_order), pg = cur_head + (1 << cur_order); 1.52 - i < (1 << next_order); 1.53 - i++, pg ++) 1.54 - if (test_bit(_PGC_offlined, &pg->count_info)) 1.55 + for ( i = (1 << cur_order), pg = cur_head + (1 << cur_order ); 1.56 + i < (1 << next_order); 1.57 + i++, pg++ ) 1.58 + if ( test_bit(_PGC_offlined, &pg->count_info) ) 1.59 break; 1.60 - if (i == ( 1 << next_order)) 1.61 + if ( i == ( 1 << next_order) ) 1.62 { 1.63 cur_order = next_order; 1.64 continue; 1.65 } 1.66 else 1.67 { 1.68 - /* 1.69 - * We don't need considering merge outside the head_order 1.70 - */ 1.71 -merge: 1.72 + merge: 1.73 + /* We don't consider merging outside the head_order. */ 1.74 page_list_add_tail(cur_head, &heap(node, zone, cur_order)); 1.75 PFN_ORDER(cur_head) = cur_order; 1.76 cur_head += (1 << cur_order); 1.77 @@ -487,21 +483,20 @@ merge: 1.78 } 1.79 } 1.80 1.81 - for (cur_head = head; cur_head < head + ( 1UL << head_order); cur_head++) 1.82 + for ( cur_head = head; cur_head < head + ( 1UL << head_order); cur_head++ ) 1.83 { 1.84 - if (!test_bit(_PGC_offlined, &cur_head->count_info)) 1.85 + if ( !test_bit(_PGC_offlined, &cur_head->count_info) ) 1.86 continue; 1.87 1.88 - avail[node][zone] --; 1.89 + avail[node][zone]--; 1.90 1.91 map_alloc(page_to_mfn(cur_head), 1); 1.92 1.93 - if (test_bit(_PGC_broken, &cur_head->count_info)) 1.94 - page_list_add_tail(cur_head, &page_broken_list); 1.95 - else 1.96 - page_list_add_tail(cur_head, &page_offlined_list); 1.97 + page_list_add_tail(cur_head, 1.98 + test_bit(_PGC_broken, &cur_head->count_info) ? 1.99 + &page_broken_list : &page_offlined_list); 1.100 1.101 - count ++; 1.102 + count++; 1.103 } 1.104 1.105 return count; 1.106 @@ -534,7 +529,7 @@ static void free_heap_pages( 1.107 */ 1.108 ASSERT(!(pg[i].count_info & PGC_offlined)); 1.109 pg[i].count_info &= PGC_offlining | PGC_broken; 1.110 - if (pg[i].count_info & PGC_offlining) 1.111 + if ( pg[i].count_info & PGC_offlining ) 1.112 { 1.113 pg[i].count_info &= ~PGC_offlining; 1.114 pg[i].count_info |= PGC_offlined; 1.115 @@ -584,7 +579,7 @@ static void free_heap_pages( 1.116 PFN_ORDER(pg) = order; 1.117 page_list_add_tail(pg, &heap(node, zone, order)); 1.118 1.119 - if (tainted) 1.120 + if ( tainted ) 1.121 reserve_offlined_page(pg); 1.122 1.123 spin_unlock(&heap_lock); 1.124 @@ -607,9 +602,6 @@ static unsigned long mark_page_offline(s 1.125 unsigned long nx, x, y = pg->count_info; 1.126 1.127 ASSERT(page_is_ram_type(page_to_mfn(pg), RAM_TYPE_CONVENTIONAL)); 1.128 - /* 1.129 - * Caller gurantee the page will not be reassigned during this process 1.130 - */ 1.131 ASSERT(spin_is_locked(&heap_lock)); 1.132 1.133 do { 1.134 @@ -617,21 +609,23 @@ static unsigned long mark_page_offline(s 1.135 1.136 if ( ((x & PGC_offlined_broken) == PGC_offlined_broken) ) 1.137 return y; 1.138 - /* PGC_offlined means it is free pages */ 1.139 - if (x & PGC_offlined) 1.140 + 1.141 + if ( x & PGC_offlined ) 1.142 { 1.143 - if (broken && !(nx & PGC_broken)) 1.144 + /* PGC_offlined means it is a free page. */ 1.145 + if ( broken && !(nx & PGC_broken) ) 1.146 nx |= PGC_broken; 1.147 else 1.148 return y; 1.149 } 1.150 - /* It is not offlined, not reserved page */ 1.151 - else if ( allocated_in_map(page_to_mfn(pg)) ) 1.152 - nx |= PGC_offlining; 1.153 else 1.154 - nx |= PGC_offlined; 1.155 + { 1.156 + /* It is not offlined, not reserved page */ 1.157 + nx |= (allocated_in_map(page_to_mfn(pg)) ? 1.158 + PGC_offlining : PGC_offlined); 1.159 + } 1.160 1.161 - if (broken) 1.162 + if ( broken ) 1.163 nx |= PGC_broken; 1.164 } while ( (y = cmpxchg(&pg->count_info, x, nx)) != x ); 1.165 1.166 @@ -644,7 +638,6 @@ static int reserve_heap_page(struct page 1.167 unsigned int i, node = phys_to_nid(page_to_maddr(pg)); 1.168 unsigned int zone = page_to_zone(pg); 1.169 1.170 - /* get the header */ 1.171 for ( i = 0; i <= MAX_ORDER; i++ ) 1.172 { 1.173 struct page_info *tmp; 1.174 @@ -652,7 +645,7 @@ static int reserve_heap_page(struct page 1.175 if ( page_list_empty(&heap(node, zone, i)) ) 1.176 continue; 1.177 1.178 - page_list_for_each_safe(head, tmp, &heap(node, zone, i)) 1.179 + page_list_for_each_safe ( head, tmp, &heap(node, zone, i) ) 1.180 { 1.181 if ( (head <= pg) && 1.182 (head + (1UL << i) > pg) ) 1.183 @@ -664,9 +657,6 @@ static int reserve_heap_page(struct page 1.184 1.185 } 1.186 1.187 -/* 1.188 - * offline one page 1.189 - */ 1.190 int offline_page(unsigned long mfn, int broken, uint32_t *status) 1.191 { 1.192 unsigned long old_info = 0; 1.193 @@ -674,7 +664,7 @@ int offline_page(unsigned long mfn, int 1.194 int ret = 0; 1.195 struct page_info *pg; 1.196 1.197 - if (mfn > max_page) 1.198 + if ( mfn > max_page ) 1.199 { 1.200 dprintk(XENLOG_WARNING, 1.201 "try to offline page out of range %lx\n", mfn); 1.202 @@ -684,7 +674,6 @@ int offline_page(unsigned long mfn, int 1.203 *status = 0; 1.204 pg = mfn_to_page(mfn); 1.205 1.206 - 1.207 #if defined(__x86_64__) 1.208 /* Xen's txt mfn in x86_64 is reserved in e820 */ 1.209 if ( is_xen_fixed_mfn(mfn) ) 1.210 @@ -701,7 +690,7 @@ int offline_page(unsigned long mfn, int 1.211 * N.B. xen's txt in x86_64 is marked reserved and handled already 1.212 * Also kexec range is reserved 1.213 */ 1.214 - if (!page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL)) 1.215 + if ( !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) ) 1.216 { 1.217 *status = PG_OFFLINE_FAILED | PG_OFFLINE_NOT_CONV_RAM; 1.218 return -EINVAL; 1.219 @@ -717,11 +706,11 @@ int offline_page(unsigned long mfn, int 1.220 reserve_heap_page(pg); 1.221 *status = PG_OFFLINE_OFFLINED; 1.222 } 1.223 - else if (test_bit(_PGC_offlined, &pg->count_info)) 1.224 + else if ( test_bit(_PGC_offlined, &pg->count_info) ) 1.225 { 1.226 *status = PG_OFFLINE_OFFLINED; 1.227 } 1.228 - else if ((owner = page_get_owner_and_reference(pg))) 1.229 + else if ( (owner = page_get_owner_and_reference(pg)) ) 1.230 { 1.231 *status = PG_OFFLINE_OWNED | PG_OFFLINE_PENDING | 1.232 (owner->domain_id << PG_OFFLINE_OWNER_SHIFT); 1.233 @@ -747,7 +736,7 @@ int offline_page(unsigned long mfn, int 1.234 (DOMID_INVALID << PG_OFFLINE_OWNER_SHIFT ); 1.235 } 1.236 1.237 - if (broken) 1.238 + if ( broken ) 1.239 *status |= PG_OFFLINE_BROKEN; 1.240 1.241 spin_unlock(&heap_lock); 1.242 @@ -782,21 +771,21 @@ unsigned int online_page(unsigned long m 1.243 ret = -EINVAL; 1.244 *status = PG_ONLINE_FAILED |PG_ONLINE_BROKEN; 1.245 } 1.246 - else if (pg->count_info & PGC_offlined) 1.247 + else if ( pg->count_info & PGC_offlined ) 1.248 { 1.249 clear_bit(_PGC_offlined, &pg->count_info); 1.250 page_list_del(pg, &page_offlined_list); 1.251 *status = PG_ONLINE_ONLINED; 1.252 free = 1; 1.253 } 1.254 - else if (pg->count_info & PGC_offlining) 1.255 + else if ( pg->count_info & PGC_offlining ) 1.256 { 1.257 clear_bit(_PGC_offlining, &pg->count_info); 1.258 *status = PG_ONLINE_ONLINED; 1.259 } 1.260 spin_unlock(&heap_lock); 1.261 1.262 - if (free) 1.263 + if ( free ) 1.264 free_heap_pages(pg, 0); 1.265 1.266 return ret;
2.1 --- a/xen/common/sysctl.c Thu Mar 12 15:08:08 2009 +0000 2.2 +++ b/xen/common/sysctl.c Thu Mar 12 15:31:36 2009 +0000 2.3 @@ -241,7 +241,7 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc 2.4 ptr = status = xmalloc_bytes( sizeof(uint32_t) * 2.5 (op->u.page_offline.end - 2.6 op->u.page_offline.start + 1)); 2.7 - if (!status) 2.8 + if ( !status ) 2.9 { 2.10 dprintk(XENLOG_WARNING, "Out of memory for page offline op\n"); 2.11 ret = -ENOMEM; 2.12 @@ -255,7 +255,7 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc 2.13 pfn <= op->u.page_offline.end; 2.14 pfn ++ ) 2.15 { 2.16 - switch (op->u.page_offline.cmd) 2.17 + switch ( op->u.page_offline.cmd ) 2.18 { 2.19 /* Shall revert her if failed, or leave caller do it? */ 2.20 case sysctl_page_offline: 2.21 @@ -278,12 +278,14 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc 2.22 break; 2.23 } 2.24 2.25 - if (copy_to_guest(op->u.page_offline.status, status, 2.26 - op->u.page_offline.end - op->u.page_offline.start + 1)) 2.27 + if ( copy_to_guest( 2.28 + op->u.page_offline.status, status, 2.29 + op->u.page_offline.end - op->u.page_offline.start + 1) ) 2.30 { 2.31 ret = -EFAULT; 2.32 break; 2.33 } 2.34 + 2.35 xfree(status); 2.36 } 2.37 break;
3.1 --- a/xen/include/asm-x86/mm.h Thu Mar 12 15:08:08 2009 +0000 3.2 +++ b/xen/include/asm-x86/mm.h Thu Mar 12 15:31:36 2009 +0000 3.3 @@ -198,27 +198,26 @@ struct page_info 3.4 /* 3-bit PAT/PCD/PWT cache-attribute hint. */ 3.5 #define PGC_cacheattr_base PG_shift(6) 3.6 #define PGC_cacheattr_mask PG_mask(7, 6) 3.7 - 3.8 - /* Page is broken? */ 3.9 - #define _PGC_broken PG_shift(7) 3.10 - #define PGC_broken PG_mask(1, 7) 3.11 - /* Page is offline pending ? */ 3.12 - #define _PGC_offlining PG_shift(8) 3.13 - #define PGC_offlining PG_mask(1, 8) 3.14 - /* Page is offlined */ 3.15 - #define _PGC_offlined PG_shift(9) 3.16 - #define PGC_offlined PG_mask(1, 9) 3.17 - #define PGC_offlined_broken (PGC_offlined | PGC_broken) 3.18 - 3.19 - #define is_page_offlining(page) ((page)->count_info & PGC_offlining) 3.20 - #define is_page_offlined(page) ((page)->count_info & PGC_offlined) 3.21 - #define is_page_broken(page) ((page)->count_info & PGC_broken) 3.22 - #define is_page_online(page) (!is_page_offlined(page)) 3.23 + /* Page is broken? */ 3.24 +#define _PGC_broken PG_shift(7) 3.25 +#define PGC_broken PG_mask(1, 7) 3.26 + /* Page is offline pending ? */ 3.27 +#define _PGC_offlining PG_shift(8) 3.28 +#define PGC_offlining PG_mask(1, 8) 3.29 + /* Page is offlined */ 3.30 +#define _PGC_offlined PG_shift(9) 3.31 +#define PGC_offlined PG_mask(1, 9) 3.32 +#define PGC_offlined_broken (PGC_offlined | PGC_broken) 3.33 3.34 /* Count of references to this frame. */ 3.35 #define PGC_count_width PG_shift(9) 3.36 #define PGC_count_mask ((1UL<<PGC_count_width)-1) 3.37 3.38 +#define is_page_offlining(page) ((page)->count_info & PGC_offlining) 3.39 +#define is_page_offlined(page) ((page)->count_info & PGC_offlined) 3.40 +#define is_page_broken(page) ((page)->count_info & PGC_broken) 3.41 +#define is_page_online(page) (!is_page_offlined(page)) 3.42 + 3.43 #if defined(__i386__) 3.44 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page)) 3.45 #define is_xen_heap_mfn(mfn) ({ \