ia64/xen-unstable
changeset 2423:ac282d5dd9d1
bitkeeper revision 1.1159.71.1 (4138ef7arWNBBMfQRCJaRyf93H0eqw)
Remove L1 PT pinning from Xen. Clean up the fixes for writable
page tables.
Remove L1 PT pinning from Xen. Clean up the fixes for writable
page tables.
author | kaf24@camelot.eng.3leafnetworks.com |
---|---|
date | Fri Sep 03 22:26:02 2004 +0000 (2004-09-03) |
parents | 5d7a7c656fa3 |
children | a0d6f6965951 |
files | BitKeeper/etc/logging_ok linux-2.4.27-xen-sparse/include/asm-xen/pgalloc.h linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/hypervisor.c linux-2.6.8.1-xen-sparse/include/asm-xen/hypervisor.h tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_netbsd_build.c xen/arch/x86/memory.c xen/common/schedule.c xen/include/asm-x86/mm.h xen/include/hypervisor-ifs/hypervisor-if.h |
line diff
1.1 --- a/BitKeeper/etc/logging_ok Fri Sep 03 20:44:22 2004 +0000 1.2 +++ b/BitKeeper/etc/logging_ok Fri Sep 03 22:26:02 2004 +0000 1.3 @@ -19,6 +19,7 @@ iap10@striker.cl.cam.ac.uk 1.4 iap10@tetris.cl.cam.ac.uk 1.5 jws22@gauntlet.cl.cam.ac.uk 1.6 jws@cairnwell.research 1.7 +kaf24@camelot.eng.3leafnetworks.com 1.8 kaf24@freefall.cl.cam.ac.uk 1.9 kaf24@labyrinth.cl.cam.ac.uk 1.10 kaf24@penguin.local
2.1 --- a/linux-2.4.27-xen-sparse/include/asm-xen/pgalloc.h Fri Sep 03 20:44:22 2004 +0000 2.2 +++ b/linux-2.4.27-xen-sparse/include/asm-xen/pgalloc.h Fri Sep 03 22:26:02 2004 +0000 2.3 @@ -134,7 +134,6 @@ static inline pte_t *pte_alloc_one(struc 2.4 { 2.5 clear_page(pte); 2.6 __make_page_readonly(pte); 2.7 - queue_pte_pin(__pa(pte)); 2.8 } 2.9 return pte; 2.10 2.11 @@ -153,7 +152,6 @@ static inline pte_t *pte_alloc_one_fast( 2.12 2.13 static __inline__ void pte_free_slow(pte_t *pte) 2.14 { 2.15 - queue_pte_unpin(__pa(pte)); 2.16 __make_page_writable(pte); 2.17 free_page((unsigned long)pte); 2.18 }
3.1 --- a/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/hypervisor.c Fri Sep 03 20:44:22 2004 +0000 3.2 +++ b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/hypervisor.c Fri Sep 03 22:26:02 2004 +0000 3.3 @@ -85,8 +85,6 @@ static void DEBUG_disallow_pt_read(unsig 3.4 #undef queue_invlpg 3.5 #undef queue_pgd_pin 3.6 #undef queue_pgd_unpin 3.7 -#undef queue_pte_pin 3.8 -#undef queue_pte_unpin 3.9 #undef queue_set_ldt 3.10 #endif 3.11 3.12 @@ -219,7 +217,7 @@ void queue_pgd_pin(unsigned long ptr) 3.13 spin_lock_irqsave(&update_lock, flags); 3.14 update_queue[idx].ptr = phys_to_machine(ptr); 3.15 update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.16 - update_queue[idx].val = MMUEXT_PIN_L2_TABLE; 3.17 + update_queue[idx].val = MMUEXT_PIN_TABLE; 3.18 increment_index(); 3.19 spin_unlock_irqrestore(&update_lock, flags); 3.20 } 3.21 @@ -235,28 +233,6 @@ void queue_pgd_unpin(unsigned long ptr) 3.22 spin_unlock_irqrestore(&update_lock, flags); 3.23 } 3.24 3.25 -void queue_pte_pin(unsigned long ptr) 3.26 -{ 3.27 - unsigned long flags; 3.28 - spin_lock_irqsave(&update_lock, flags); 3.29 - update_queue[idx].ptr = phys_to_machine(ptr); 3.30 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.31 - update_queue[idx].val = MMUEXT_PIN_L1_TABLE; 3.32 - increment_index(); 3.33 - spin_unlock_irqrestore(&update_lock, flags); 3.34 -} 3.35 - 3.36 -void queue_pte_unpin(unsigned long ptr) 3.37 -{ 3.38 - unsigned long flags; 3.39 - spin_lock_irqsave(&update_lock, flags); 3.40 - update_queue[idx].ptr = phys_to_machine(ptr); 3.41 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.42 - update_queue[idx].val = MMUEXT_UNPIN_TABLE; 3.43 - increment_index(); 3.44 - spin_unlock_irqrestore(&update_lock, flags); 3.45 -} 3.46 - 3.47 void queue_set_ldt(unsigned long ptr, unsigned long len) 3.48 { 3.49 unsigned long flags; 3.50 @@ -339,7 +315,7 @@ void xen_pgd_pin(unsigned long ptr) 3.51 spin_lock_irqsave(&update_lock, flags); 3.52 update_queue[idx].ptr = phys_to_machine(ptr); 3.53 update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.54 - update_queue[idx].val = MMUEXT_PIN_L2_TABLE; 3.55 + update_queue[idx].val = MMUEXT_PIN_TABLE; 3.56 increment_index_and_flush(); 3.57 spin_unlock_irqrestore(&update_lock, flags); 3.58 } 3.59 @@ -355,28 +331,6 @@ void xen_pgd_unpin(unsigned long ptr) 3.60 spin_unlock_irqrestore(&update_lock, flags); 3.61 } 3.62 3.63 -void xen_pte_pin(unsigned long ptr) 3.64 -{ 3.65 - unsigned long flags; 3.66 - spin_lock_irqsave(&update_lock, flags); 3.67 - update_queue[idx].ptr = phys_to_machine(ptr); 3.68 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.69 - update_queue[idx].val = MMUEXT_PIN_L1_TABLE; 3.70 - increment_index_and_flush(); 3.71 - spin_unlock_irqrestore(&update_lock, flags); 3.72 -} 3.73 - 3.74 -void xen_pte_unpin(unsigned long ptr) 3.75 -{ 3.76 - unsigned long flags; 3.77 - spin_lock_irqsave(&update_lock, flags); 3.78 - update_queue[idx].ptr = phys_to_machine(ptr); 3.79 - update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.80 - update_queue[idx].val = MMUEXT_UNPIN_TABLE; 3.81 - increment_index_and_flush(); 3.82 - spin_unlock_irqrestore(&update_lock, flags); 3.83 -} 3.84 - 3.85 void xen_set_ldt(unsigned long ptr, unsigned long len) 3.86 { 3.87 unsigned long flags;
4.1 --- a/linux-2.6.8.1-xen-sparse/include/asm-xen/hypervisor.h Fri Sep 03 20:44:22 2004 +0000 4.2 +++ b/linux-2.6.8.1-xen-sparse/include/asm-xen/hypervisor.h Fri Sep 03 22:26:02 2004 +0000 4.3 @@ -54,8 +54,6 @@ void queue_tlb_flush(void); 4.4 void queue_invlpg(unsigned long ptr); 4.5 void queue_pgd_pin(unsigned long ptr); 4.6 void queue_pgd_unpin(unsigned long ptr); 4.7 -void queue_pte_pin(unsigned long ptr); 4.8 -void queue_pte_unpin(unsigned long ptr); 4.9 void queue_set_ldt(unsigned long ptr, unsigned long bytes); 4.10 void queue_machphys_update(unsigned long mfn, unsigned long pfn); 4.11 void xen_l1_entry_update(pte_t *ptr, unsigned long val); 4.12 @@ -65,8 +63,6 @@ void xen_tlb_flush(void); 4.13 void xen_invlpg(unsigned long ptr); 4.14 void xen_pgd_pin(unsigned long ptr); 4.15 void xen_pgd_unpin(unsigned long ptr); 4.16 -void xen_pte_pin(unsigned long ptr); 4.17 -void xen_pte_unpin(unsigned long ptr); 4.18 void xen_set_ldt(unsigned long ptr, unsigned long bytes); 4.19 void xen_machphys_update(unsigned long mfn, unsigned long pfn); 4.20 #define MMU_UPDATE_DEBUG 0 4.21 @@ -141,14 +137,6 @@ extern page_update_debug_t update_debug_ 4.22 printk("PGD UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \ 4.23 queue_pgd_unpin(_p); \ 4.24 }) 4.25 -#define queue_pte_pin(_p) ({ \ 4.26 - printk("PTE PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \ 4.27 - queue_pte_pin(_p); \ 4.28 -}) 4.29 -#define queue_pte_unpin(_p) ({ \ 4.30 - printk("PTE UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \ 4.31 - queue_pte_unpin(_p); \ 4.32 -}) 4.33 #define queue_set_ldt(_p,_l) ({ \ 4.34 printk("SETL LDT %s %d: %08lx %d\n", __FILE__, __LINE__, (_p), (_l)); \ 4.35 queue_set_ldt((_p), (_l)); \
5.1 --- a/tools/libxc/xc_linux_build.c Fri Sep 03 20:44:22 2004 +0000 5.2 +++ b/tools/libxc/xc_linux_build.c Fri Sep 03 22:26:02 2004 +0000 5.3 @@ -270,7 +270,7 @@ static int setup_guestos(int xc_handle, 5.4 * correct protection for the page 5.5 */ 5.6 if ( add_mmu_update(xc_handle, mmu, 5.7 - l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE) ) 5.8 + l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_TABLE) ) 5.9 goto error_out; 5.10 5.11 start_info = map_pfn_writeable(
6.1 --- a/tools/libxc/xc_linux_restore.c Fri Sep 03 20:44:22 2004 +0000 6.2 +++ b/tools/libxc/xc_linux_restore.c Fri Sep 03 22:26:02 2004 +0000 6.3 @@ -473,28 +473,16 @@ int xc_linux_restore(int xc_handle, XcIO 6.4 */ 6.5 for ( i = 0; i < nr_pfns; i++ ) 6.6 { 6.7 - if ( pfn_type[i] == (L1TAB|LPINTAB) ) 6.8 + if ( pfn_type[i] != (L2TAB|LPINTAB) ) 6.9 + continue; 6.10 + if ( add_mmu_update(xc_handle, mmu, 6.11 + (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 6.12 + MMU_EXTENDED_COMMAND, 6.13 + MMUEXT_PIN_TABLE) ) 6.14 { 6.15 - if ( add_mmu_update(xc_handle, mmu, 6.16 - (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 6.17 - MMU_EXTENDED_COMMAND, 6.18 - MMUEXT_PIN_L1_TABLE) ) { 6.19 - printf("ERR pin L1 pfn=%lx mfn=%lx\n", 6.20 - (unsigned long)i, pfn_to_mfn_table[i]); 6.21 - goto out; 6.22 - } 6.23 - } 6.24 - else if ( pfn_type[i] == (L2TAB|LPINTAB) ) 6.25 - { 6.26 - if ( add_mmu_update(xc_handle, mmu, 6.27 - (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 6.28 - MMU_EXTENDED_COMMAND, 6.29 - MMUEXT_PIN_L2_TABLE) ) 6.30 - { 6.31 - printf("ERR pin L2 pfn=%lx mfn=%lx\n", 6.32 - (unsigned long)i, pfn_to_mfn_table[i]); 6.33 - goto out; 6.34 - } 6.35 + printf("ERR pin L2 pfn=%lx mfn=%lx\n", 6.36 + (unsigned long)i, pfn_to_mfn_table[i]); 6.37 + goto out; 6.38 } 6.39 } 6.40
7.1 --- a/tools/libxc/xc_netbsd_build.c Fri Sep 03 20:44:22 2004 +0000 7.2 +++ b/tools/libxc/xc_netbsd_build.c Fri Sep 03 22:26:02 2004 +0000 7.3 @@ -163,7 +163,7 @@ static int setup_guestos(int xc_handle, 7.4 * correct protection for the page 7.5 */ 7.6 if ( add_mmu_update(xc_handle, mmu, 7.7 - l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE) ) 7.8 + l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_TABLE) ) 7.9 goto error_out; 7.10 7.11 *virt_startinfo_addr =
8.1 --- a/xen/arch/x86/memory.c Fri Sep 03 20:44:22 2004 +0000 8.2 +++ b/xen/arch/x86/memory.c Fri Sep 03 22:26:02 2004 +0000 8.3 @@ -455,7 +455,8 @@ get_page_from_l1e( 8.4 /* NB. Virtual address 'l2e' maps to a machine address within frame 'pfn'. */ 8.5 static int 8.6 get_page_from_l2e( 8.7 - l2_pgentry_t l2e, unsigned long pfn, struct domain *d, unsigned long va_idx) 8.8 + l2_pgentry_t l2e, unsigned long pfn, 8.9 + struct domain *d, unsigned long va_idx) 8.10 { 8.11 int rc; 8.12 8.13 @@ -471,7 +472,7 @@ get_page_from_l2e( 8.14 8.15 rc = get_page_and_type_from_pagenr( 8.16 l2_pgentry_to_pagenr(l2e), 8.17 - PGT_l1_page_table | (va_idx<<PGT_va_shift), d); 8.18 + PGT_l1_page_table | (va_idx<<PGT_va_shift), d); 8.19 8.20 if ( unlikely(!rc) ) 8.21 return get_linear_pagetable(l2e, pfn, d); 8.22 @@ -671,8 +672,8 @@ static int mod_l2_entry(l2_pgentry_t *pl 8.23 return update_l2e(pl2e, ol2e, nl2e); 8.24 8.25 if ( unlikely(!get_page_from_l2e(nl2e, pfn, current, 8.26 - ((unsigned long) 8.27 - pl2e & ~PAGE_MASK) >> 2 )) ) 8.28 + ((unsigned long)pl2e & 8.29 + ~PAGE_MASK) >> 2)) ) 8.30 return 0; 8.31 8.32 if ( unlikely(!update_l2e(pl2e, ol2e, nl2e)) ) 8.33 @@ -826,21 +827,9 @@ static int do_extended_command(unsigned 8.34 8.35 switch ( cmd ) 8.36 { 8.37 - case MMUEXT_PIN_L1_TABLE: 8.38 - case MMUEXT_PIN_L2_TABLE: 8.39 - 8.40 - /* When we pin an L1 page we now insist that the va 8.41 - backpointer (used for writable page tables) must still be 8.42 - mutable. This is an additional restriction even for guests 8.43 - that don't use writable page tables, but I don't think it 8.44 - will break anything as guests typically pin pages before 8.45 - they are used, hence they'll still be mutable. */ 8.46 - 8.47 + case MMUEXT_PIN_TABLE: 8.48 okay = get_page_and_type_from_pagenr( 8.49 - pfn, 8.50 - ((cmd==MMUEXT_PIN_L2_TABLE) ? 8.51 - PGT_l2_page_table : (PGT_l1_page_table | PGT_va_mutable) ) , 8.52 - FOREIGNDOM); 8.53 + pfn, PGT_l2_page_table, FOREIGNDOM); 8.54 8.55 if ( unlikely(!okay) ) 8.56 { 8.57 @@ -1183,6 +1172,7 @@ int do_mmu_update(mmu_update_t *ureqs, i 8.58 unsigned long prev_spfn = 0; 8.59 l1_pgentry_t *prev_spl1e = 0; 8.60 struct domain *d = current; 8.61 + u32 type_info; 8.62 8.63 perfc_incrc(calls_to_mmu_update); 8.64 perfc_addc(num_page_updates, count); 8.65 @@ -1231,10 +1221,11 @@ int do_mmu_update(mmu_update_t *ureqs, i 8.66 } 8.67 8.68 page = &frame_table[pfn]; 8.69 - switch ( (page->u.inuse.type_info & PGT_type_mask) ) 8.70 + switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask ) 8.71 { 8.72 case PGT_l1_page_table: 8.73 - if ( likely(passive_get_page_type(page, PGT_l1_page_table)) ) 8.74 + if ( likely(get_page_type( 8.75 + page, type_info & (PGT_type_mask|PGT_va_mask))) ) 8.76 { 8.77 okay = mod_l1_entry((l1_pgentry_t *)va, 8.78 mk_l1_pgentry(req.val)); 8.79 @@ -1484,11 +1475,11 @@ void ptwr_reconnect_disconnected(unsigne 8.80 [ptwr_info[cpu].writable_l1>>PAGE_SHIFT]; 8.81 8.82 #ifdef PTWR_TRACK_DOMAIN 8.83 - if (ptwr_domain[cpu] != get_current()->domain) 8.84 + if (ptwr_domain[cpu] != current->domain) 8.85 printk("ptwr_reconnect_disconnected domain mismatch %d != %d\n", 8.86 - ptwr_domain[cpu], get_current()->domain); 8.87 + ptwr_domain[cpu], current->domain); 8.88 #endif 8.89 - PTWR_PRINTK(("[A] page fault in disconnected space: addr %08lx space %08lx\n", 8.90 + PTWR_PRINTK(("[A] page fault in disconn space: addr %08lx space %08lx\n", 8.91 addr, ptwr_info[cpu].disconnected << L2_PAGETABLE_SHIFT)); 8.92 pl2e = &linear_l2_table[ptwr_info[cpu].disconnected]; 8.93 8.94 @@ -1560,9 +1551,9 @@ void ptwr_flush_inactive(void) 8.95 int i, idx; 8.96 8.97 #ifdef PTWR_TRACK_DOMAIN 8.98 - if (ptwr_info[cpu].domain != get_current()->domain) 8.99 + if (ptwr_info[cpu].domain != current->domain) 8.100 printk("ptwr_flush_inactive domain mismatch %d != %d\n", 8.101 - ptwr_info[cpu].domain, get_current()->domain); 8.102 + ptwr_info[cpu].domain, current->domain); 8.103 #endif 8.104 #if 0 8.105 { 8.106 @@ -1643,9 +1634,9 @@ int ptwr_do_page_fault(unsigned long add 8.107 if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table ) 8.108 { 8.109 #ifdef PTWR_TRACK_DOMAIN 8.110 - if ( ptwr_info[cpu].domain != get_current()->domain ) 8.111 + if ( ptwr_info[cpu].domain != current->domain ) 8.112 printk("ptwr_do_page_fault domain mismatch %d != %d\n", 8.113 - ptwr_info[cpu].domain, get_current()->domain); 8.114 + ptwr_info[cpu].domain, current->domain); 8.115 #endif 8.116 pl2e = &linear_l2_table[(page->u.inuse.type_info & 8.117 PGT_va_mask) >> PGT_va_shift];
9.1 --- a/xen/common/schedule.c Fri Sep 03 20:44:22 2004 +0000 9.2 +++ b/xen/common/schedule.c Fri Sep 03 22:26:02 2004 +0000 9.3 @@ -374,20 +374,6 @@ void __enter_scheduler(void) 9.4 cleanup_writable_pagetable( 9.5 prev, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE); 9.6 9.7 -#ifdef PTWR_TRACK_DOMAIN 9.8 - { 9.9 - extern domid_t ptwr_domain[]; 9.10 - int cpu = smp_processor_id(); 9.11 - if (ptwr_domain[cpu] != prev->domain) 9.12 - printk("switch_to domain mismatch %d != %d\n", 9.13 - ptwr_domain[cpu], prev->domain); 9.14 - ptwr_domain[cpu] = next->domain; 9.15 - if (ptwr_disconnected[cpu] != ENTRIES_PER_L2_PAGETABLE || 9.16 - ptwr_writable_idx[cpu]) 9.17 - printk("switch_to ptwr dirty!!!\n"); 9.18 - } 9.19 -#endif 9.20 - 9.21 perfc_incrc(sched_ctx); 9.22 9.23 #if defined(WAKE_HISTO)
10.1 --- a/xen/include/asm-x86/mm.h Fri Sep 03 20:44:22 2004 +0000 10.2 +++ b/xen/include/asm-x86/mm.h Fri Sep 03 22:26:02 2004 +0000 10.3 @@ -71,10 +71,9 @@ struct pfn_info 10.4 /* Has this page been validated for use as its current type? */ 10.5 #define _PGT_validated 28 10.6 #define PGT_validated (1<<_PGT_validated) 10.7 - /* 10-bit most significant bits of va address if used as l1 page table */ 10.8 + /* The 10 most significant bits of virt address if this is a L1 page table. */ 10.9 #define PGT_va_shift 18 10.10 #define PGT_va_mask (((1<<10)-1)<<PGT_va_shift) 10.11 -#define PGT_va_mutable PGT_va_mask /* va backpointer is still mutable */ 10.12 /* 18-bit count of uses of this frame as its current type. */ 10.13 #define PGT_count_mask ((1<<18)-1) 10.14 10.15 @@ -199,13 +198,6 @@ static inline void put_page_type(struct 10.16 nx &= ~PGT_validated; 10.17 } 10.18 } 10.19 - else if ( unlikely( ((nx & PGT_count_mask) == 1) && 10.20 - test_bit(_PGC_guest_pinned, &page->count_info)) ) 10.21 - { 10.22 - /* if the page is pinned, but we're dropping the last reference 10.23 - then make the va backpointer mutable again */ 10.24 - nx |= PGT_va_mutable; 10.25 - } 10.26 } 10.27 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) ); 10.28 } 10.29 @@ -230,27 +222,15 @@ static inline int get_page_type(struct p 10.30 nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated); 10.31 nx |= type; 10.32 /* No extra validation needed for writable pages. */ 10.33 - if ( (type & PGT_type_mask) == PGT_writable_page ) 10.34 + if ( type == PGT_writable_page ) 10.35 nx |= PGT_validated; 10.36 } 10.37 } 10.38 - else if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) ) 10.39 + else if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) ) 10.40 { 10.41 - DPRINTK("Unexpected type (saw %08x != exp %08x) for pfn %08lx\n", 10.42 - x & PGT_type_mask, type, page_to_pfn(page)); 10.43 - return 0; 10.44 - } 10.45 - else if ( (x & PGT_va_mask) == PGT_va_mutable ) 10.46 - { 10.47 - /* The va_backpointer is currently mutable, hence we update it. */ 10.48 - nx &= ~PGT_va_mask; 10.49 - nx |= type; /* we know the actual type is correct */ 10.50 - } 10.51 - else if ( unlikely((x & PGT_va_mask) != (type & PGT_va_mask) ) ) 10.52 - { 10.53 - /* The va backpointer wasn't mutable, and is different :-( */ 10.54 - DPRINTK("Unexpected va backpointer (saw %08x != exp %08x) for pfn %08lx\n", 10.55 - x, type, page_to_pfn(page)); 10.56 + DPRINTK("Unexpected type or va backptr (saw %08x != exp %08x) " 10.57 + "for pfn %08lx\n", 10.58 + x & (PGT_type_mask|PGT_va_mask), type, page_to_pfn(page)); 10.59 return 0; 10.60 } 10.61 else if ( unlikely(!(x & PGT_validated)) ) 10.62 @@ -286,55 +266,6 @@ static inline int get_page_type(struct p 10.63 return 1; 10.64 } 10.65 10.66 -/* This 'passive' version of get_page_type doesn't attempt to validate 10.67 -the page, but just checks the type and increments the type count. The 10.68 -function is called while doing a NORMAL_PT_UPDATE of an entry in an L1 10.69 -page table: We want to 'lock' the page for the brief beriod while 10.70 -we're doing the update, but we're not actually linking it in to a 10.71 -pagetable. */ 10.72 - 10.73 -static inline int passive_get_page_type(struct pfn_info *page, u32 type) 10.74 -{ 10.75 - u32 nx, x, y = page->u.inuse.type_info; 10.76 - again: 10.77 - do { 10.78 - x = y; 10.79 - nx = x + 1; 10.80 - if ( unlikely((nx & PGT_count_mask) == 0) ) 10.81 - { 10.82 - DPRINTK("Type count overflow on pfn %08lx\n", page_to_pfn(page)); 10.83 - return 0; 10.84 - } 10.85 - else if ( unlikely((x & PGT_count_mask) == 0) ) 10.86 - { 10.87 - if ( (x & (PGT_type_mask|PGT_va_mask)) != type ) 10.88 - { 10.89 - nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated); 10.90 - nx |= type; 10.91 - } 10.92 - } 10.93 - else if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) ) 10.94 - { 10.95 - DPRINTK("Unexpected type (saw %08x != exp %08x) for pfn %08lx\n", 10.96 - x & PGT_type_mask, type, page_to_pfn(page)); 10.97 - return 0; 10.98 - } 10.99 - else if ( unlikely(!(x & PGT_validated)) ) 10.100 - { 10.101 - /* Someone else is updating validation of this page. Wait... */ 10.102 - while ( (y = page->u.inuse.type_info) != x ) 10.103 - { 10.104 - rep_nop(); 10.105 - barrier(); 10.106 - } 10.107 - goto again; 10.108 - } 10.109 - } 10.110 - while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) ); 10.111 - 10.112 - return 1; 10.113 -} 10.114 - 10.115 10.116 static inline void put_page_and_type(struct pfn_info *page) 10.117 {
11.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h Fri Sep 03 20:44:22 2004 +0000 11.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h Fri Sep 03 22:26:02 2004 +0000 11.3 @@ -104,9 +104,9 @@ 11.4 * ptr[1:0] == MMU_EXTENDED_COMMAND: 11.5 * val[7:0] -- MMUEXT_* command. 11.6 * 11.7 - * val[7:0] == MMUEXT_(UN)PIN_*_TABLE: 11.8 - * ptr[:2] -- Machine address of frame to be (un)pinned as a p.t. page. 11.9 - * The frame must belong to the FD, if one is specified. 11.10 + * val[7:0] == MMUEXT_[UN]PIN_TABLE: 11.11 + * ptr[:2] -- Machine address of frame to be (un)pinned as a top-level p.t. 11.12 + * page. The frame must belong to the FD, if one is specified. 11.13 * 11.14 * val[7:0] == MMUEXT_NEW_BASEPTR: 11.15 * ptr[:2] -- Machine address of new page-table base to install in MMU. 11.16 @@ -142,10 +142,7 @@ 11.17 #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ 11.18 #define MMU_MACHPHYS_UPDATE 2 /* ptr = MA of frame to modify entry for */ 11.19 #define MMU_EXTENDED_COMMAND 3 /* least 8 bits of val demux further */ 11.20 -#define MMUEXT_PIN_L1_TABLE 0 /* ptr = MA of frame to pin */ 11.21 -#define MMUEXT_PIN_L2_TABLE 1 /* ptr = MA of frame to pin */ 11.22 -#define MMUEXT_PIN_L3_TABLE 2 /* ptr = MA of frame to pin */ 11.23 -#define MMUEXT_PIN_L4_TABLE 3 /* ptr = MA of frame to pin */ 11.24 +#define MMUEXT_PIN_TABLE 0 /* ptr = MA of frame to pin */ 11.25 #define MMUEXT_UNPIN_TABLE 4 /* ptr = MA of frame to unpin */ 11.26 #define MMUEXT_NEW_BASEPTR 5 /* ptr = MA of new pagetable base */ 11.27 #define MMUEXT_TLB_FLUSH 6 /* ptr = NULL */