ia64/xen-unstable
changeset 2427:6f9bf6379bbd
bitkeeper revision 1.1159.69.9 (413a0a4d7ODoJ_4kti38jM2_4EooQA)
Cset exclude: kaf24@camelot.eng.3leafnetworks.com|ChangeSet|20040903222602|39015
Cset exclude: kaf24@camelot.eng.3leafnetworks.com|ChangeSet|20040903222602|39015
author | kaf24@camelot.eng.3leafnetworks.com |
---|---|
date | Sat Sep 04 18:32:45 2004 +0000 (2004-09-04) |
parents | 0811d135075f |
children | e7ad903ca36c |
files | BitKeeper/etc/logging_ok linux-2.4.27-xen-sparse/include/asm-xen/pgalloc.h linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/hypervisor.c linux-2.6.8.1-xen-sparse/include/asm-xen/hypervisor.h tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_netbsd_build.c xen/arch/x86/memory.c xen/common/schedule.c xen/include/asm-x86/mm.h xen/include/hypervisor-ifs/hypervisor-if.h |
line diff
2.1 --- a/linux-2.4.27-xen-sparse/include/asm-xen/pgalloc.h Sat Sep 04 06:28:25 2004 +0000 2.2 +++ b/linux-2.4.27-xen-sparse/include/asm-xen/pgalloc.h Sat Sep 04 18:32:45 2004 +0000 2.3 @@ -134,6 +134,7 @@ static inline pte_t *pte_alloc_one(struc 2.4 { 2.5 clear_page(pte); 2.6 __make_page_readonly(pte); 2.7 + queue_pte_pin(__pa(pte)); 2.8 } 2.9 return pte; 2.10 2.11 @@ -152,6 +153,7 @@ static inline pte_t *pte_alloc_one_fast( 2.12 2.13 static __inline__ void pte_free_slow(pte_t *pte) 2.14 { 2.15 + queue_pte_unpin(__pa(pte)); 2.16 __make_page_writable(pte); 2.17 free_page((unsigned long)pte); 2.18 }
3.1 --- a/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/hypervisor.c Sat Sep 04 06:28:25 2004 +0000 3.2 +++ b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/hypervisor.c Sat Sep 04 18:32:45 2004 +0000 3.3 @@ -85,6 +85,8 @@ static void DEBUG_disallow_pt_read(unsig 3.4 #undef queue_invlpg 3.5 #undef queue_pgd_pin 3.6 #undef queue_pgd_unpin 3.7 +#undef queue_pte_pin 3.8 +#undef queue_pte_unpin 3.9 #undef queue_set_ldt 3.10 #endif 3.11 3.12 @@ -217,7 +219,7 @@ void queue_pgd_pin(unsigned long ptr) 3.13 spin_lock_irqsave(&update_lock, flags); 3.14 update_queue[idx].ptr = phys_to_machine(ptr); 3.15 update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.16 - update_queue[idx].val = MMUEXT_PIN_TABLE; 3.17 + update_queue[idx].val = MMUEXT_PIN_L2_TABLE; 3.18 increment_index(); 3.19 spin_unlock_irqrestore(&update_lock, flags); 3.20 } 3.21 @@ -233,6 +235,28 @@ void queue_pgd_unpin(unsigned long ptr) 3.22 spin_unlock_irqrestore(&update_lock, flags); 3.23 } 3.24 3.25 +void queue_pte_pin(unsigned long ptr) 3.26 +{ 3.27 + unsigned long flags; 3.28 + spin_lock_irqsave(&update_lock, flags); 3.29 + update_queue[idx].ptr = phys_to_machine(ptr); 3.30 + update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.31 + update_queue[idx].val = MMUEXT_PIN_L1_TABLE; 3.32 + increment_index(); 3.33 + spin_unlock_irqrestore(&update_lock, flags); 3.34 +} 3.35 + 3.36 +void queue_pte_unpin(unsigned long ptr) 3.37 +{ 3.38 + unsigned long flags; 3.39 + spin_lock_irqsave(&update_lock, flags); 3.40 + update_queue[idx].ptr = phys_to_machine(ptr); 3.41 + update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.42 + update_queue[idx].val = MMUEXT_UNPIN_TABLE; 3.43 + increment_index(); 3.44 + spin_unlock_irqrestore(&update_lock, flags); 3.45 +} 3.46 + 3.47 void queue_set_ldt(unsigned long ptr, unsigned long len) 3.48 { 3.49 unsigned long flags; 3.50 @@ -315,7 +339,7 @@ void xen_pgd_pin(unsigned long ptr) 3.51 spin_lock_irqsave(&update_lock, flags); 3.52 update_queue[idx].ptr = phys_to_machine(ptr); 3.53 update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.54 - update_queue[idx].val = MMUEXT_PIN_TABLE; 3.55 + update_queue[idx].val = MMUEXT_PIN_L2_TABLE; 3.56 increment_index_and_flush(); 3.57 spin_unlock_irqrestore(&update_lock, flags); 3.58 } 3.59 @@ -331,6 +355,28 @@ void xen_pgd_unpin(unsigned long ptr) 3.60 spin_unlock_irqrestore(&update_lock, flags); 3.61 } 3.62 3.63 +void xen_pte_pin(unsigned long ptr) 3.64 +{ 3.65 + unsigned long flags; 3.66 + spin_lock_irqsave(&update_lock, flags); 3.67 + update_queue[idx].ptr = phys_to_machine(ptr); 3.68 + update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.69 + update_queue[idx].val = MMUEXT_PIN_L1_TABLE; 3.70 + increment_index_and_flush(); 3.71 + spin_unlock_irqrestore(&update_lock, flags); 3.72 +} 3.73 + 3.74 +void xen_pte_unpin(unsigned long ptr) 3.75 +{ 3.76 + unsigned long flags; 3.77 + spin_lock_irqsave(&update_lock, flags); 3.78 + update_queue[idx].ptr = phys_to_machine(ptr); 3.79 + update_queue[idx].ptr |= MMU_EXTENDED_COMMAND; 3.80 + update_queue[idx].val = MMUEXT_UNPIN_TABLE; 3.81 + increment_index_and_flush(); 3.82 + spin_unlock_irqrestore(&update_lock, flags); 3.83 +} 3.84 + 3.85 void xen_set_ldt(unsigned long ptr, unsigned long len) 3.86 { 3.87 unsigned long flags;
4.1 --- a/linux-2.6.8.1-xen-sparse/include/asm-xen/hypervisor.h Sat Sep 04 06:28:25 2004 +0000 4.2 +++ b/linux-2.6.8.1-xen-sparse/include/asm-xen/hypervisor.h Sat Sep 04 18:32:45 2004 +0000 4.3 @@ -54,6 +54,8 @@ void queue_tlb_flush(void); 4.4 void queue_invlpg(unsigned long ptr); 4.5 void queue_pgd_pin(unsigned long ptr); 4.6 void queue_pgd_unpin(unsigned long ptr); 4.7 +void queue_pte_pin(unsigned long ptr); 4.8 +void queue_pte_unpin(unsigned long ptr); 4.9 void queue_set_ldt(unsigned long ptr, unsigned long bytes); 4.10 void queue_machphys_update(unsigned long mfn, unsigned long pfn); 4.11 void xen_l1_entry_update(pte_t *ptr, unsigned long val); 4.12 @@ -63,6 +65,8 @@ void xen_tlb_flush(void); 4.13 void xen_invlpg(unsigned long ptr); 4.14 void xen_pgd_pin(unsigned long ptr); 4.15 void xen_pgd_unpin(unsigned long ptr); 4.16 +void xen_pte_pin(unsigned long ptr); 4.17 +void xen_pte_unpin(unsigned long ptr); 4.18 void xen_set_ldt(unsigned long ptr, unsigned long bytes); 4.19 void xen_machphys_update(unsigned long mfn, unsigned long pfn); 4.20 #define MMU_UPDATE_DEBUG 0 4.21 @@ -137,6 +141,14 @@ extern page_update_debug_t update_debug_ 4.22 printk("PGD UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \ 4.23 queue_pgd_unpin(_p); \ 4.24 }) 4.25 +#define queue_pte_pin(_p) ({ \ 4.26 + printk("PTE PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \ 4.27 + queue_pte_pin(_p); \ 4.28 +}) 4.29 +#define queue_pte_unpin(_p) ({ \ 4.30 + printk("PTE UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p)); \ 4.31 + queue_pte_unpin(_p); \ 4.32 +}) 4.33 #define queue_set_ldt(_p,_l) ({ \ 4.34 printk("SETL LDT %s %d: %08lx %d\n", __FILE__, __LINE__, (_p), (_l)); \ 4.35 queue_set_ldt((_p), (_l)); \
5.1 --- a/tools/libxc/xc_linux_build.c Sat Sep 04 06:28:25 2004 +0000 5.2 +++ b/tools/libxc/xc_linux_build.c Sat Sep 04 18:32:45 2004 +0000 5.3 @@ -270,7 +270,7 @@ static int setup_guestos(int xc_handle, 5.4 * correct protection for the page 5.5 */ 5.6 if ( add_mmu_update(xc_handle, mmu, 5.7 - l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_TABLE) ) 5.8 + l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE) ) 5.9 goto error_out; 5.10 5.11 start_info = map_pfn_writeable(
6.1 --- a/tools/libxc/xc_linux_restore.c Sat Sep 04 06:28:25 2004 +0000 6.2 +++ b/tools/libxc/xc_linux_restore.c Sat Sep 04 18:32:45 2004 +0000 6.3 @@ -473,16 +473,28 @@ int xc_linux_restore(int xc_handle, XcIO 6.4 */ 6.5 for ( i = 0; i < nr_pfns; i++ ) 6.6 { 6.7 - if ( pfn_type[i] != (L2TAB|LPINTAB) ) 6.8 - continue; 6.9 - if ( add_mmu_update(xc_handle, mmu, 6.10 - (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 6.11 - MMU_EXTENDED_COMMAND, 6.12 - MMUEXT_PIN_TABLE) ) 6.13 + if ( pfn_type[i] == (L1TAB|LPINTAB) ) 6.14 { 6.15 - printf("ERR pin L2 pfn=%lx mfn=%lx\n", 6.16 - (unsigned long)i, pfn_to_mfn_table[i]); 6.17 - goto out; 6.18 + if ( add_mmu_update(xc_handle, mmu, 6.19 + (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 6.20 + MMU_EXTENDED_COMMAND, 6.21 + MMUEXT_PIN_L1_TABLE) ) { 6.22 + printf("ERR pin L1 pfn=%lx mfn=%lx\n", 6.23 + (unsigned long)i, pfn_to_mfn_table[i]); 6.24 + goto out; 6.25 + } 6.26 + } 6.27 + else if ( pfn_type[i] == (L2TAB|LPINTAB) ) 6.28 + { 6.29 + if ( add_mmu_update(xc_handle, mmu, 6.30 + (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 6.31 + MMU_EXTENDED_COMMAND, 6.32 + MMUEXT_PIN_L2_TABLE) ) 6.33 + { 6.34 + printf("ERR pin L2 pfn=%lx mfn=%lx\n", 6.35 + (unsigned long)i, pfn_to_mfn_table[i]); 6.36 + goto out; 6.37 + } 6.38 } 6.39 } 6.40
7.1 --- a/tools/libxc/xc_netbsd_build.c Sat Sep 04 06:28:25 2004 +0000 7.2 +++ b/tools/libxc/xc_netbsd_build.c Sat Sep 04 18:32:45 2004 +0000 7.3 @@ -163,7 +163,7 @@ static int setup_guestos(int xc_handle, 7.4 * correct protection for the page 7.5 */ 7.6 if ( add_mmu_update(xc_handle, mmu, 7.7 - l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_TABLE) ) 7.8 + l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE) ) 7.9 goto error_out; 7.10 7.11 *virt_startinfo_addr =
8.1 --- a/xen/arch/x86/memory.c Sat Sep 04 06:28:25 2004 +0000 8.2 +++ b/xen/arch/x86/memory.c Sat Sep 04 18:32:45 2004 +0000 8.3 @@ -455,8 +455,7 @@ get_page_from_l1e( 8.4 /* NB. Virtual address 'l2e' maps to a machine address within frame 'pfn'. */ 8.5 static int 8.6 get_page_from_l2e( 8.7 - l2_pgentry_t l2e, unsigned long pfn, 8.8 - struct domain *d, unsigned long va_idx) 8.9 + l2_pgentry_t l2e, unsigned long pfn, struct domain *d, unsigned long va_idx) 8.10 { 8.11 int rc; 8.12 8.13 @@ -472,7 +471,7 @@ get_page_from_l2e( 8.14 8.15 rc = get_page_and_type_from_pagenr( 8.16 l2_pgentry_to_pagenr(l2e), 8.17 - PGT_l1_page_table | (va_idx<<PGT_va_shift), d); 8.18 + PGT_l1_page_table | (va_idx<<PGT_va_shift), d); 8.19 8.20 if ( unlikely(!rc) ) 8.21 return get_linear_pagetable(l2e, pfn, d); 8.22 @@ -672,8 +671,8 @@ static int mod_l2_entry(l2_pgentry_t *pl 8.23 return update_l2e(pl2e, ol2e, nl2e); 8.24 8.25 if ( unlikely(!get_page_from_l2e(nl2e, pfn, current, 8.26 - ((unsigned long)pl2e & 8.27 - ~PAGE_MASK) >> 2)) ) 8.28 + ((unsigned long) 8.29 + pl2e & ~PAGE_MASK) >> 2 )) ) 8.30 return 0; 8.31 8.32 if ( unlikely(!update_l2e(pl2e, ol2e, nl2e)) ) 8.33 @@ -827,9 +826,21 @@ static int do_extended_command(unsigned 8.34 8.35 switch ( cmd ) 8.36 { 8.37 - case MMUEXT_PIN_TABLE: 8.38 + case MMUEXT_PIN_L1_TABLE: 8.39 + case MMUEXT_PIN_L2_TABLE: 8.40 + 8.41 + /* When we pin an L1 page we now insist that the va 8.42 + backpointer (used for writable page tables) must still be 8.43 + mutable. This is an additional restriction even for guests 8.44 + that don't use writable page tables, but I don't think it 8.45 + will break anything as guests typically pin pages before 8.46 + they are used, hence they'll still be mutable. */ 8.47 + 8.48 okay = get_page_and_type_from_pagenr( 8.49 - pfn, PGT_l2_page_table, FOREIGNDOM); 8.50 + pfn, 8.51 + ((cmd==MMUEXT_PIN_L2_TABLE) ? 8.52 + PGT_l2_page_table : (PGT_l1_page_table | PGT_va_mutable) ) , 8.53 + FOREIGNDOM); 8.54 8.55 if ( unlikely(!okay) ) 8.56 { 8.57 @@ -1184,7 +1195,6 @@ int do_mmu_update(mmu_update_t *ureqs, i 8.58 unsigned long prev_spfn = 0; 8.59 l1_pgentry_t *prev_spl1e = 0; 8.60 struct domain *d = current; 8.61 - u32 type_info; 8.62 8.63 perfc_incrc(calls_to_mmu_update); 8.64 perfc_addc(num_page_updates, count); 8.65 @@ -1233,11 +1243,10 @@ int do_mmu_update(mmu_update_t *ureqs, i 8.66 } 8.67 8.68 page = &frame_table[pfn]; 8.69 - switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask ) 8.70 + switch ( (page->u.inuse.type_info & PGT_type_mask) ) 8.71 { 8.72 case PGT_l1_page_table: 8.73 - if ( likely(get_page_type( 8.74 - page, type_info & (PGT_type_mask|PGT_va_mask))) ) 8.75 + if ( likely(passive_get_page_type(page, PGT_l1_page_table)) ) 8.76 { 8.77 okay = mod_l1_entry((l1_pgentry_t *)va, 8.78 mk_l1_pgentry(req.val)); 8.79 @@ -1487,11 +1496,11 @@ void ptwr_reconnect_disconnected(unsigne 8.80 [ptwr_info[cpu].writable_l1>>PAGE_SHIFT]; 8.81 8.82 #ifdef PTWR_TRACK_DOMAIN 8.83 - if (ptwr_domain[cpu] != current->domain) 8.84 + if (ptwr_domain[cpu] != get_current()->domain) 8.85 printk("ptwr_reconnect_disconnected domain mismatch %d != %d\n", 8.86 - ptwr_domain[cpu], current->domain); 8.87 + ptwr_domain[cpu], get_current()->domain); 8.88 #endif 8.89 - PTWR_PRINTK(("[A] page fault in disconn space: addr %08lx space %08lx\n", 8.90 + PTWR_PRINTK(("[A] page fault in disconnected space: addr %08lx space %08lx\n", 8.91 addr, ptwr_info[cpu].disconnected << L2_PAGETABLE_SHIFT)); 8.92 pl2e = &linear_l2_table[ptwr_info[cpu].disconnected]; 8.93 8.94 @@ -1563,9 +1572,9 @@ void ptwr_flush_inactive(void) 8.95 int i, idx; 8.96 8.97 #ifdef PTWR_TRACK_DOMAIN 8.98 - if (ptwr_info[cpu].domain != current->domain) 8.99 + if (ptwr_info[cpu].domain != get_current()->domain) 8.100 printk("ptwr_flush_inactive domain mismatch %d != %d\n", 8.101 - ptwr_info[cpu].domain, current->domain); 8.102 + ptwr_info[cpu].domain, get_current()->domain); 8.103 #endif 8.104 #if 0 8.105 { 8.106 @@ -1646,9 +1655,9 @@ int ptwr_do_page_fault(unsigned long add 8.107 if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table ) 8.108 { 8.109 #ifdef PTWR_TRACK_DOMAIN 8.110 - if ( ptwr_info[cpu].domain != current->domain ) 8.111 + if ( ptwr_info[cpu].domain != get_current()->domain ) 8.112 printk("ptwr_do_page_fault domain mismatch %d != %d\n", 8.113 - ptwr_info[cpu].domain, current->domain); 8.114 + ptwr_info[cpu].domain, get_current()->domain); 8.115 #endif 8.116 pl2e = &linear_l2_table[(page->u.inuse.type_info & 8.117 PGT_va_mask) >> PGT_va_shift];
9.1 --- a/xen/common/schedule.c Sat Sep 04 06:28:25 2004 +0000 9.2 +++ b/xen/common/schedule.c Sat Sep 04 18:32:45 2004 +0000 9.3 @@ -374,6 +374,20 @@ void __enter_scheduler(void) 9.4 cleanup_writable_pagetable( 9.5 prev, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE); 9.6 9.7 +#ifdef PTWR_TRACK_DOMAIN 9.8 + { 9.9 + extern domid_t ptwr_domain[]; 9.10 + int cpu = smp_processor_id(); 9.11 + if (ptwr_domain[cpu] != prev->domain) 9.12 + printk("switch_to domain mismatch %d != %d\n", 9.13 + ptwr_domain[cpu], prev->domain); 9.14 + ptwr_domain[cpu] = next->domain; 9.15 + if (ptwr_disconnected[cpu] != ENTRIES_PER_L2_PAGETABLE || 9.16 + ptwr_writable_idx[cpu]) 9.17 + printk("switch_to ptwr dirty!!!\n"); 9.18 + } 9.19 +#endif 9.20 + 9.21 perfc_incrc(sched_ctx); 9.22 9.23 #if defined(WAKE_HISTO)
10.1 --- a/xen/include/asm-x86/mm.h Sat Sep 04 06:28:25 2004 +0000 10.2 +++ b/xen/include/asm-x86/mm.h Sat Sep 04 18:32:45 2004 +0000 10.3 @@ -71,9 +71,10 @@ struct pfn_info 10.4 /* Has this page been validated for use as its current type? */ 10.5 #define _PGT_validated 28 10.6 #define PGT_validated (1<<_PGT_validated) 10.7 - /* The 10 most significant bits of virt address if this is a L1 page table. */ 10.8 + /* 10-bit most significant bits of va address if used as l1 page table */ 10.9 #define PGT_va_shift 18 10.10 #define PGT_va_mask (((1<<10)-1)<<PGT_va_shift) 10.11 +#define PGT_va_mutable PGT_va_mask /* va backpointer is still mutable */ 10.12 /* 18-bit count of uses of this frame as its current type. */ 10.13 #define PGT_count_mask ((1<<18)-1) 10.14 10.15 @@ -198,6 +199,13 @@ static inline void put_page_type(struct 10.16 nx &= ~PGT_validated; 10.17 } 10.18 } 10.19 + else if ( unlikely( ((nx & PGT_count_mask) == 1) && 10.20 + test_bit(_PGC_guest_pinned, &page->count_info)) ) 10.21 + { 10.22 + /* if the page is pinned, but we're dropping the last reference 10.23 + then make the va backpointer mutable again */ 10.24 + nx |= PGT_va_mutable; 10.25 + } 10.26 } 10.27 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) ); 10.28 } 10.29 @@ -222,15 +230,27 @@ static inline int get_page_type(struct p 10.30 nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated); 10.31 nx |= type; 10.32 /* No extra validation needed for writable pages. */ 10.33 - if ( type == PGT_writable_page ) 10.34 + if ( (type & PGT_type_mask) == PGT_writable_page ) 10.35 nx |= PGT_validated; 10.36 } 10.37 } 10.38 - else if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) ) 10.39 + else if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) ) 10.40 { 10.41 - DPRINTK("Unexpected type or va backptr (saw %08x != exp %08x) " 10.42 - "for pfn %08lx\n", 10.43 - x & (PGT_type_mask|PGT_va_mask), type, page_to_pfn(page)); 10.44 + DPRINTK("Unexpected type (saw %08x != exp %08x) for pfn %08lx\n", 10.45 + x & PGT_type_mask, type, page_to_pfn(page)); 10.46 + return 0; 10.47 + } 10.48 + else if ( (x & PGT_va_mask) == PGT_va_mutable ) 10.49 + { 10.50 + /* The va_backpointer is currently mutable, hence we update it. */ 10.51 + nx &= ~PGT_va_mask; 10.52 + nx |= type; /* we know the actual type is correct */ 10.53 + } 10.54 + else if ( unlikely((x & PGT_va_mask) != (type & PGT_va_mask) ) ) 10.55 + { 10.56 + /* The va backpointer wasn't mutable, and is different :-( */ 10.57 + DPRINTK("Unexpected va backpointer (saw %08x != exp %08x) for pfn %08lx\n", 10.58 + x, type, page_to_pfn(page)); 10.59 return 0; 10.60 } 10.61 else if ( unlikely(!(x & PGT_validated)) ) 10.62 @@ -266,6 +286,55 @@ static inline int get_page_type(struct p 10.63 return 1; 10.64 } 10.65 10.66 +/* This 'passive' version of get_page_type doesn't attempt to validate 10.67 +the page, but just checks the type and increments the type count. The 10.68 +function is called while doing a NORMAL_PT_UPDATE of an entry in an L1 10.69 +page table: We want to 'lock' the page for the brief beriod while 10.70 +we're doing the update, but we're not actually linking it in to a 10.71 +pagetable. */ 10.72 + 10.73 +static inline int passive_get_page_type(struct pfn_info *page, u32 type) 10.74 +{ 10.75 + u32 nx, x, y = page->u.inuse.type_info; 10.76 + again: 10.77 + do { 10.78 + x = y; 10.79 + nx = x + 1; 10.80 + if ( unlikely((nx & PGT_count_mask) == 0) ) 10.81 + { 10.82 + DPRINTK("Type count overflow on pfn %08lx\n", page_to_pfn(page)); 10.83 + return 0; 10.84 + } 10.85 + else if ( unlikely((x & PGT_count_mask) == 0) ) 10.86 + { 10.87 + if ( (x & (PGT_type_mask|PGT_va_mask)) != type ) 10.88 + { 10.89 + nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated); 10.90 + nx |= type; 10.91 + } 10.92 + } 10.93 + else if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) ) 10.94 + { 10.95 + DPRINTK("Unexpected type (saw %08x != exp %08x) for pfn %08lx\n", 10.96 + x & PGT_type_mask, type, page_to_pfn(page)); 10.97 + return 0; 10.98 + } 10.99 + else if ( unlikely(!(x & PGT_validated)) ) 10.100 + { 10.101 + /* Someone else is updating validation of this page. Wait... */ 10.102 + while ( (y = page->u.inuse.type_info) != x ) 10.103 + { 10.104 + rep_nop(); 10.105 + barrier(); 10.106 + } 10.107 + goto again; 10.108 + } 10.109 + } 10.110 + while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) ); 10.111 + 10.112 + return 1; 10.113 +} 10.114 + 10.115 10.116 static inline void put_page_and_type(struct pfn_info *page) 10.117 {
11.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h Sat Sep 04 06:28:25 2004 +0000 11.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h Sat Sep 04 18:32:45 2004 +0000 11.3 @@ -104,9 +104,9 @@ 11.4 * ptr[1:0] == MMU_EXTENDED_COMMAND: 11.5 * val[7:0] -- MMUEXT_* command. 11.6 * 11.7 - * val[7:0] == MMUEXT_[UN]PIN_TABLE: 11.8 - * ptr[:2] -- Machine address of frame to be (un)pinned as a top-level p.t. 11.9 - * page. The frame must belong to the FD, if one is specified. 11.10 + * val[7:0] == MMUEXT_(UN)PIN_*_TABLE: 11.11 + * ptr[:2] -- Machine address of frame to be (un)pinned as a p.t. page. 11.12 + * The frame must belong to the FD, if one is specified. 11.13 * 11.14 * val[7:0] == MMUEXT_NEW_BASEPTR: 11.15 * ptr[:2] -- Machine address of new page-table base to install in MMU. 11.16 @@ -145,7 +145,10 @@ 11.17 #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ 11.18 #define MMU_MACHPHYS_UPDATE 2 /* ptr = MA of frame to modify entry for */ 11.19 #define MMU_EXTENDED_COMMAND 3 /* least 8 bits of val demux further */ 11.20 -#define MMUEXT_PIN_TABLE 0 /* ptr = MA of frame to pin */ 11.21 +#define MMUEXT_PIN_L1_TABLE 0 /* ptr = MA of frame to pin */ 11.22 +#define MMUEXT_PIN_L2_TABLE 1 /* ptr = MA of frame to pin */ 11.23 +#define MMUEXT_PIN_L3_TABLE 2 /* ptr = MA of frame to pin */ 11.24 +#define MMUEXT_PIN_L4_TABLE 3 /* ptr = MA of frame to pin */ 11.25 #define MMUEXT_UNPIN_TABLE 1 /* ptr = MA of frame to unpin */ 11.26 #define MMUEXT_NEW_BASEPTR 2 /* ptr = MA of new pagetable base */ 11.27 #define MMUEXT_TLB_FLUSH 3 /* ptr = NULL */