ia64/xen-unstable
changeset 2346:f2b75edc9ce1
bitkeeper revision 1.1159.1.105 (412cbda71t8CNbo2C1IX2_T3wi4QIQ)
Move count_info outside pfn_info union, to where it belongs.
Move count_info outside pfn_info union, to where it belongs.
author | kaf24@labyrinth.cl.cam.ac.uk |
---|---|
date | Wed Aug 25 16:26:15 2004 +0000 (2004-08-25) |
parents | bd470dc06d31 |
children | 950fc272bee9 388784b73401 |
files | xen/arch/x86/domain.c xen/arch/x86/memory.c xen/common/dom0_ops.c xen/common/dom_mem_ops.c xen/common/keyhandler.c xen/common/page_alloc.c xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/xen/arch/x86/domain.c Wed Aug 25 15:40:30 2004 +0000 1.2 +++ b/xen/arch/x86/domain.c Wed Aug 25 16:26:15 2004 +0000 1.3 @@ -480,7 +480,7 @@ void domain_relinquish_memory(struct dom 1.4 { 1.5 page = list_entry(ent, struct pfn_info, list); 1.6 1.7 - if ( test_and_clear_bit(_PGC_allocated, &page->u.inuse.count_info) ) 1.8 + if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) 1.9 put_page(page); 1.10 } 1.11 1.12 @@ -489,10 +489,10 @@ void domain_relinquish_memory(struct dom 1.13 { 1.14 page = list_entry(ent, struct pfn_info, list); 1.15 1.16 - if ( test_and_clear_bit(_PGC_guest_pinned, &page->u.inuse.count_info) ) 1.17 + if ( test_and_clear_bit(_PGC_guest_pinned, &page->count_info) ) 1.18 put_page_and_type(page); 1.19 1.20 - if ( test_and_clear_bit(_PGC_allocated, &page->u.inuse.count_info) ) 1.21 + if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) 1.22 put_page(page); 1.23 1.24 /* 1.25 @@ -668,9 +668,9 @@ int construct_dom0(struct domain *p, 1.26 mfn++ ) 1.27 { 1.28 page = &frame_table[mfn]; 1.29 - page->u.inuse.domain = p; 1.30 - page->u.inuse.type_info = 0; 1.31 - page->u.inuse.count_info = PGC_always_set | PGC_allocated | 1; 1.32 + page->u.inuse.domain = p; 1.33 + page->u.inuse.type_info = 0; 1.34 + page->count_info = PGC_always_set | PGC_allocated | 1; 1.35 list_add_tail(&page->list, &p->page_list); 1.36 p->tot_pages++; p->max_pages++; 1.37 } 1.38 @@ -715,7 +715,7 @@ int construct_dom0(struct domain *p, 1.39 *l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT); 1.40 1.41 page = &frame_table[mfn]; 1.42 - set_bit(_PGC_tlb_flush_on_type_change, &page->u.inuse.count_info); 1.43 + set_bit(_PGC_tlb_flush_on_type_change, &page->count_info); 1.44 if ( !get_page_and_type(page, p, PGT_writable_page) ) 1.45 BUG(); 1.46 1.47 @@ -739,7 +739,7 @@ int construct_dom0(struct domain *p, 1.48 /* Get another ref to L2 page so that it can be pinned. */ 1.49 if ( !get_page_and_type(page, p, PGT_l2_page_table) ) 1.50 BUG(); 1.51 - set_bit(_PGC_guest_pinned, &page->u.inuse.count_info); 1.52 + set_bit(_PGC_guest_pinned, &page->count_info); 1.53 } 1.54 else 1.55 {
2.1 --- a/xen/arch/x86/memory.c Wed Aug 25 15:40:30 2004 +0000 2.2 +++ b/xen/arch/x86/memory.c Wed Aug 25 16:26:15 2004 +0000 2.3 @@ -146,6 +146,23 @@ void arch_init_memory(void) 2.4 static void ptwr_disable(void); 2.5 unsigned long mfn; 2.6 2.7 + /* 2.8 + * We are rather picky about the layout of 'struct pfn_info'. The 2.9 + * count_info and domain fields must be adjacent, as we perform atomic 2.10 + * 64-bit operations on them. Also, just for sanity, we assert the size 2.11 + * of the structure here. 2.12 + */ 2.13 + if ( (offsetof(struct pfn_info, u.inuse.domain) != 2.14 + (offsetof(struct pfn_info, count_info) + sizeof(u32))) || 2.15 + (sizeof(struct pfn_info) != 24) ) 2.16 + { 2.17 + printk("Weird pfn_info layout (%ld,%ld,%d)\n", 2.18 + offsetof(struct pfn_info, count_info), 2.19 + offsetof(struct pfn_info, u.inuse.domain), 2.20 + sizeof(struct pfn_info)); 2.21 + for ( ; ; ) ; 2.22 + } 2.23 + 2.24 memset(percpu_info, 0, sizeof(percpu_info)); 2.25 2.26 vm_assist_info[VMASST_TYPE_writable_pagetables].enable = 2.27 @@ -154,7 +171,7 @@ void arch_init_memory(void) 2.28 ptwr_disable; 2.29 2.30 for ( mfn = 0; mfn < max_page; mfn++ ) 2.31 - frame_table[mfn].u.inuse.count_info |= PGC_always_set; 2.32 + frame_table[mfn].count_info |= PGC_always_set; 2.33 2.34 /* Initialise to a magic of 0x55555555 so easier to spot bugs later. */ 2.35 memset(machine_to_phys_mapping, 0x55, 4<<20); 2.36 @@ -182,9 +199,9 @@ void arch_init_memory(void) 2.37 mfn < virt_to_phys(&machine_to_phys_mapping[1<<20])>>PAGE_SHIFT; 2.38 mfn++ ) 2.39 { 2.40 - frame_table[mfn].u.inuse.count_info |= PGC_allocated | 1; 2.41 - frame_table[mfn].u.inuse.type_info = PGT_gdt_page | 1; /* non-RW */ 2.42 - frame_table[mfn].u.inuse.domain = dom_xen; 2.43 + frame_table[mfn].count_info |= PGC_allocated | 1; 2.44 + frame_table[mfn].u.inuse.type_info = PGT_gdt_page | 1; /* non-RW */ 2.45 + frame_table[mfn].u.inuse.domain = dom_xen; 2.46 } 2.47 } 2.48 2.49 @@ -417,11 +434,12 @@ get_page_from_l1e( 2.50 * No need for LOCK prefix -- we know that count_info is never zero 2.51 * because it contains PGC_always_set. 2.52 */ 2.53 + ASSERT(test_bit(_PGC_always_set, &page->count_info)); 2.54 __asm__ __volatile__( 2.55 "cmpxchg8b %2" 2.56 - : "=a" (e), "=d" (count_info), 2.57 - "=m" (*(volatile u64 *)(&page->u.inuse.domain)) 2.58 - : "0" (0), "1" (0), "b" (0), "c" (0) ); 2.59 + : "=d" (e), "=a" (count_info), 2.60 + "=m" (*(volatile u64 *)(&page->count_info)) 2.61 + : "0" (0), "1" (0), "c" (0), "b" (0) ); 2.62 if ( unlikely((count_info & PGC_count_mask) == 0) || 2.63 unlikely(e == NULL) || unlikely(!get_domain(e)) ) 2.64 return 0; 2.65 @@ -434,7 +452,7 @@ get_page_from_l1e( 2.66 { 2.67 if ( unlikely(!get_page_type(page, PGT_writable_page)) ) 2.68 return 0; 2.69 - set_bit(_PGC_tlb_flush_on_type_change, &page->u.inuse.count_info); 2.70 + set_bit(_PGC_tlb_flush_on_type_change, &page->count_info); 2.71 } 2.72 2.73 return 1; 2.74 @@ -741,7 +759,7 @@ static int mod_l1_entry(l1_pgentry_t *pl 2.75 int alloc_page_type(struct pfn_info *page, unsigned int type) 2.76 { 2.77 if ( unlikely(test_and_clear_bit(_PGC_tlb_flush_on_type_change, 2.78 - &page->u.inuse.count_info)) ) 2.79 + &page->count_info)) ) 2.80 { 2.81 struct domain *p = page->u.inuse.domain; 2.82 if ( unlikely(NEED_FLUSH(tlbflush_time[p->processor], 2.83 @@ -822,8 +840,8 @@ static int do_extended_command(unsigned 2.84 break; 2.85 } 2.86 2.87 - if ( unlikely(test_and_set_bit(_PGC_guest_pinned, 2.88 - &page->u.inuse.count_info)) ) 2.89 + if ( unlikely(test_and_set_bit(_PGC_guest_pinned, 2.90 + &page->count_info)) ) 2.91 { 2.92 MEM_LOG("Pfn %08lx already pinned", pfn); 2.93 put_page_and_type(page); 2.94 @@ -840,7 +858,7 @@ static int do_extended_command(unsigned 2.95 ptr, page->u.inuse.domain); 2.96 } 2.97 else if ( likely(test_and_clear_bit(_PGC_guest_pinned, 2.98 - &page->u.inuse.count_info)) ) 2.99 + &page->count_info)) ) 2.100 { 2.101 put_page_and_type(page); 2.102 put_page(page); 2.103 @@ -1007,7 +1025,7 @@ static int do_extended_command(unsigned 2.104 * disappears then the deallocation routine will safely spin. 2.105 */ 2.106 nd = page->u.inuse.domain; 2.107 - y = page->u.inuse.count_info; 2.108 + y = page->count_info; 2.109 do { 2.110 x = y; 2.111 if ( unlikely((x & (PGC_count_mask|PGC_allocated)) != 2.112 @@ -1022,9 +1040,9 @@ static int do_extended_command(unsigned 2.113 } 2.114 __asm__ __volatile__( 2.115 LOCK_PREFIX "cmpxchg8b %3" 2.116 - : "=a" (nd), "=d" (y), "=b" (e), 2.117 - "=m" (*(volatile u64 *)(&page->u.inuse.domain)) 2.118 - : "0" (d), "1" (x), "b" (e), "c" (x) ); 2.119 + : "=d" (nd), "=a" (y), "=c" (e), 2.120 + "=m" (*(volatile u64 *)(&page->count_info)) 2.121 + : "0" (d), "1" (x), "c" (e), "b" (x) ); 2.122 } 2.123 while ( unlikely(nd != d) || unlikely(y != x) ); 2.124 2.125 @@ -1395,7 +1413,7 @@ void ptwr_reconnect_disconnected(unsigne 2.126 l1_pgentry_val(linear_pg_table[(unsigned long)pl2e >> 2.127 PAGE_SHIFT]) >> PAGE_SHIFT, 2.128 frame_table[pfn].u.inuse.type_info, 2.129 - frame_table[pfn].u.inuse.count_info, 2.130 + frame_table[pfn].count_info, 2.131 frame_table[pfn].u.inuse.domain->domain)); 2.132 2.133 nl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT); 2.134 @@ -1417,7 +1435,7 @@ void ptwr_reconnect_disconnected(unsigne 2.135 PTWR_PRINTK(("[A] now pl2e %p l2e %08lx taf %08x/%08x/%u\n", 2.136 pl2e, l2_pgentry_val(*pl2e), 2.137 frame_table[pfn].u.inuse.type_info, 2.138 - frame_table[pfn].u.inuse.count_info, 2.139 + frame_table[pfn].count_info, 2.140 frame_table[pfn].u.inuse.domain->domain)); 2.141 ptwr_info[cpu].disconnected = ENTRIES_PER_L2_PAGETABLE; 2.142 /* make pt page write protected */ 2.143 @@ -1535,7 +1553,7 @@ int ptwr_do_page_fault(unsigned long add 2.144 l1_pgentry_t *pl1e; 2.145 PTWR_PRINTK(("[I] freeing l1 page %p taf %08x/%08x\n", page, 2.146 page->u.inuse.type_info, 2.147 - page->u.inuse.count_info)); 2.148 + page->count_info)); 2.149 if (ptwr_info[cpu].writable_idx == PTWR_NR_WRITABLES) 2.150 ptwr_flush_inactive(); 2.151 ptwr_info[cpu].writables[ptwr_info[cpu].writable_idx] = addr; 2.152 @@ -1560,7 +1578,7 @@ int ptwr_do_page_fault(unsigned long add 2.153 >> PAGE_SHIFT]) >> 2.154 PAGE_SHIFT, 2.155 frame_table[pfn].u.inuse.type_info, 2.156 - frame_table[pfn].u.inuse.count_info, 2.157 + frame_table[pfn].count_info, 2.158 frame_table[pfn].u.inuse.domain->domain)); 2.159 /* disconnect l1 page */ 2.160 nl2e = mk_l2_pgentry((l2_pgentry_val(*pl2e) & ~_PAGE_PRESENT)); 2.161 @@ -1571,7 +1589,7 @@ int ptwr_do_page_fault(unsigned long add 2.162 PTWR_PRINTK(("[A] now pl2e %p l2e %08lx " 2.163 "taf %08x/%08x/%u\n", pl2e, l2_pgentry_val(*pl2e), 2.164 frame_table[pfn].u.inuse.type_info, 2.165 - frame_table[pfn].u.inuse.count_info, 2.166 + frame_table[pfn].count_info, 2.167 frame_table[pfn].u.inuse.domain->domain)); 2.168 ptwr_info[cpu].writable_l1 = addr; 2.169 pl1e = map_domain_mem(l2_pgentry_to_pagenr(nl2e) <<
3.1 --- a/xen/common/dom0_ops.c Wed Aug 25 15:40:30 2004 +0000 3.2 +++ b/xen/common/dom0_ops.c Wed Aug 25 16:26:15 2004 +0000 3.3 @@ -628,7 +628,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 3.4 break; 3.5 } 3.6 3.7 - if ( page->u.inuse.count_info & PGC_guest_pinned ) 3.8 + if ( page->count_info & PGC_guest_pinned ) 3.9 type |= LPINTAB; 3.10 l_arr[j] |= type; 3.11 put_page(page);
4.1 --- a/xen/common/dom_mem_ops.c Wed Aug 25 15:40:30 2004 +0000 4.2 +++ b/xen/common/dom_mem_ops.c Wed Aug 25 16:26:15 2004 +0000 4.3 @@ -82,12 +82,10 @@ static long free_dom_mem(struct domain * 4.4 return i; 4.5 } 4.6 4.7 - if ( test_and_clear_bit(_PGC_guest_pinned, 4.8 - &page->u.inuse.count_info) ) 4.9 + if ( test_and_clear_bit(_PGC_guest_pinned, &page->count_info) ) 4.10 put_page_and_type(page); 4.11 4.12 - if ( test_and_clear_bit(_PGC_allocated, 4.13 - &page->u.inuse.count_info) ) 4.14 + if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) 4.15 put_page(page); 4.16 4.17 put_page(page);
5.1 --- a/xen/common/keyhandler.c Wed Aug 25 15:40:30 2004 +0000 5.2 +++ b/xen/common/keyhandler.c Wed Aug 25 16:26:15 2004 +0000 5.3 @@ -82,14 +82,14 @@ void do_task_queues(unsigned char key, v 5.4 { 5.5 page = list_entry(ent, struct pfn_info, list); 5.6 printk("Page %08x: caf=%08x, taf=%08x\n", 5.7 - page_to_phys(page), page->u.inuse.count_info, 5.8 + page_to_phys(page), page->count_info, 5.9 page->u.inuse.type_info); 5.10 } 5.11 } 5.12 5.13 page = virt_to_page(d->shared_info); 5.14 printk("Shared_info@%08x: caf=%08x, taf=%08x\n", 5.15 - page_to_phys(page), page->u.inuse.count_info, 5.16 + page_to_phys(page), page->count_info, 5.17 page->u.inuse.type_info); 5.18 5.19 printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n",
6.1 --- a/xen/common/page_alloc.c Wed Aug 25 15:40:30 2004 +0000 6.2 +++ b/xen/common/page_alloc.c Wed Aug 25 16:26:15 2004 +0000 6.3 @@ -310,9 +310,9 @@ unsigned long alloc_xenheap_pages(int or 6.4 6.5 for ( i = 0; i < (1 << order); i++ ) 6.6 { 6.7 - pg[i].u.inuse.count_info = PGC_always_set; 6.8 - pg[i].u.inuse.domain = NULL; 6.9 - pg[i].u.inuse.type_info = 0; 6.10 + pg[i].count_info = PGC_always_set; 6.11 + pg[i].u.inuse.domain = NULL; 6.12 + pg[i].u.inuse.type_info = 0; 6.13 } 6.14 6.15 return (unsigned long)page_to_virt(pg); 6.16 @@ -383,9 +383,9 @@ struct pfn_info *alloc_domheap_pages(str 6.17 } 6.18 } 6.19 6.20 - pg[i].u.inuse.count_info = PGC_always_set; 6.21 - pg[i].u.inuse.domain = NULL; 6.22 - pg[i].u.inuse.type_info = 0; 6.23 + pg[i].count_info = PGC_always_set; 6.24 + pg[i].u.inuse.domain = NULL; 6.25 + pg[i].u.inuse.type_info = 0; 6.26 } 6.27 6.28 if ( d == NULL ) 6.29 @@ -411,7 +411,7 @@ struct pfn_info *alloc_domheap_pages(str 6.30 { 6.31 pg[i].u.inuse.domain = d; 6.32 wmb(); /* Domain pointer must be visible before updating refcnt. */ 6.33 - pg[i].u.inuse.count_info |= PGC_allocated | 1; 6.34 + pg[i].count_info |= PGC_allocated | 1; 6.35 list_add_tail(&pg[i].list, &d->page_list); 6.36 } 6.37
7.1 --- a/xen/include/asm-x86/mm.h Wed Aug 25 15:40:30 2004 +0000 7.2 +++ b/xen/include/asm-x86/mm.h Wed Aug 25 16:26:15 2004 +0000 7.3 @@ -30,25 +30,24 @@ struct pfn_info 7.4 /* Each frame can be threaded onto a doubly-linked list. */ 7.5 struct list_head list; 7.6 7.7 + /* Reference count and various PGC_xxx flags and fields. */ 7.8 + u32 count_info; 7.9 + 7.10 /* Context-dependent fields follow... */ 7.11 union { 7.12 7.13 - /* Page is in use by a domain. */ 7.14 + /* Page is in use: ((count_info & PGC_count_mask) != 0). */ 7.15 struct { 7.16 - /* Owner of this page. */ 7.17 + /* Owner of this page (NULL if page is anonymous). */ 7.18 struct domain *domain; 7.19 - /* Reference count and various PGC_xxx flags and fields. */ 7.20 - u32 count_info; 7.21 /* Type reference count and various PGT_xxx flags and fields. */ 7.22 u32 type_info; 7.23 } inuse; 7.24 7.25 - /* Page is on a free list. */ 7.26 + /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ 7.27 struct { 7.28 /* Mask of possibly-tainted TLBs. */ 7.29 unsigned long cpu_mask; 7.30 - /* Must be at same offset as 'u.inuse.count_flags'. */ 7.31 - u32 __unavailable; 7.32 /* Order-size of the free chunk this page is the head of. */ 7.33 u8 order; 7.34 } free; 7.35 @@ -108,8 +107,8 @@ struct pfn_info 7.36 wmb(); /* install valid domain ptr before updating refcnt. */ \ 7.37 spin_lock(&(_dom)->page_alloc_lock); \ 7.38 /* _dom holds an allocation reference */ \ 7.39 - ASSERT((_pfn)->u.inuse.count_info == PGC_always_set); \ 7.40 - (_pfn)->u.inuse.count_info |= PGC_allocated | 1; \ 7.41 + ASSERT((_pfn)->count_info == PGC_always_set); \ 7.42 + (_pfn)->count_info |= PGC_allocated | 1; \ 7.43 if ( unlikely((_dom)->xenheap_pages++ == 0) ) \ 7.44 get_knownalive_domain(_dom); \ 7.45 list_add_tail(&(_pfn)->list, &(_dom)->xenpage_list); \ 7.46 @@ -126,13 +125,13 @@ void free_page_type(struct pfn_info *pag 7.47 7.48 static inline void put_page(struct pfn_info *page) 7.49 { 7.50 - u32 nx, x, y = page->u.inuse.count_info; 7.51 + u32 nx, x, y = page->count_info; 7.52 7.53 do { 7.54 x = y; 7.55 nx = x - 1; 7.56 } 7.57 - while ( unlikely((y = cmpxchg(&page->u.inuse.count_info, x, nx)) != x) ); 7.58 + while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); 7.59 7.60 if ( unlikely((nx & PGC_count_mask) == 0) ) 7.61 free_domheap_page(page); 7.62 @@ -142,7 +141,7 @@ static inline void put_page(struct pfn_i 7.63 static inline int get_page(struct pfn_info *page, 7.64 struct domain *domain) 7.65 { 7.66 - u32 x, nx, y = page->u.inuse.count_info; 7.67 + u32 x, nx, y = page->count_info; 7.68 struct domain *p, *np = page->u.inuse.domain; 7.69 7.70 do { 7.71 @@ -160,9 +159,9 @@ static inline int get_page(struct pfn_in 7.72 } 7.73 __asm__ __volatile__( 7.74 LOCK_PREFIX "cmpxchg8b %3" 7.75 - : "=a" (np), "=d" (y), "=b" (p), 7.76 - "=m" (*(volatile u64 *)(&page->u.inuse.domain)) 7.77 - : "0" (p), "1" (x), "b" (p), "c" (nx) ); 7.78 + : "=d" (np), "=a" (y), "=c" (p), 7.79 + "=m" (*(volatile u64 *)(&page->count_info)) 7.80 + : "0" (p), "1" (x), "c" (p), "b" (nx) ); 7.81 } 7.82 while ( unlikely(np != p) || unlikely(y != x) ); 7.83 7.84 @@ -254,7 +253,7 @@ static inline int get_page_type(struct p 7.85 DPRINTK("Error while validating pfn %08lx for type %08x." 7.86 " caf=%08x taf=%08x\n", 7.87 page_to_pfn(page), type, 7.88 - page->u.inuse.count_info, 7.89 + page->count_info, 7.90 page->u.inuse.type_info); 7.91 put_page_type(page); 7.92 return 0; 7.93 @@ -292,7 +291,7 @@ static inline int get_page_and_type(stru 7.94 ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \ 7.95 ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0) 7.96 #define ASSERT_PAGE_IS_DOMAIN(_p, _d) \ 7.97 - ASSERT(((_p)->u.inuse.count_info & PGC_count_mask) != 0); \ 7.98 + ASSERT(((_p)->count_info & PGC_count_mask) != 0); \ 7.99 ASSERT((_p)->u.inuse.domain == (_d)) 7.100 7.101 int check_descriptor(unsigned long *d);
8.1 --- a/xen/include/asm-x86/shadow.h Wed Aug 25 15:40:30 2004 +0000 8.2 +++ b/xen/include/asm-x86/shadow.h Wed Aug 25 16:26:15 2004 +0000 8.3 @@ -121,7 +121,7 @@ static inline int __mark_dirty( struct m 8.4 mfn, pfn, m->shadow_dirty_bitmap_size, m ); 8.5 SH_LOG("dom=%u caf=%08x taf=%08x\n", 8.6 frame_table[mfn].u.inuse.domain->domain, 8.7 - frame_table[mfn].u.inuse.count_info, 8.8 + frame_table[mfn].count_info, 8.9 frame_table[mfn].u.inuse.type_info ); 8.10 } 8.11