ia64/xen-unstable
changeset 6073:4125b9fea242
merge?
author | cl349@firebug.cl.cam.ac.uk |
---|---|
date | Tue Aug 09 12:51:06 2005 +0000 (2005-08-09) |
parents | 3cd4f2c38aaf a1f7e01b0990 |
children | 356b14d5ffc8 |
files | tools/python/xen/xm/main.py xen/arch/x86/mm.c xen/include/asm-x86/page.h |
line diff
2.1 --- a/xen/arch/x86/mm.c Tue Aug 09 12:50:22 2005 +0000 2.2 +++ b/xen/arch/x86/mm.c Tue Aug 09 12:51:06 2005 +0000 2.3 @@ -138,7 +138,7 @@ static struct { 2.4 * Returns the current foreign domain; defaults to the currently-executing 2.5 * domain if a foreign override hasn't been specified. 2.6 */ 2.7 -#define FOREIGNDOM (percpu_info[smp_processor_id()].foreign ? : current->domain) 2.8 +#define FOREIGNDOM (percpu_info[smp_processor_id()].foreign ?: current->domain) 2.9 2.10 /* Private domain structs for DOMID_XEN and DOMID_IO. */ 2.11 static struct domain *dom_xen, *dom_io; 2.12 @@ -903,7 +903,8 @@ static int alloc_l4_table(struct pfn_inf 2.13 return 1; 2.14 ASSERT(!shadow_mode_refcounts(d)); 2.15 2.16 - for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ ) { 2.17 + for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ ) 2.18 + { 2.19 if ( !l3_backptr(&vaddr, i, type) ) 2.20 goto fail; 2.21 2.22 @@ -1122,10 +1123,9 @@ static int mod_l2_entry(l2_pgentry_t *pl 2.23 return 0; 2.24 } 2.25 } 2.26 - else 2.27 + else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e)) ) 2.28 { 2.29 - if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e)) ) 2.30 - return 0; 2.31 + return 0; 2.32 } 2.33 2.34 put_page_from_l2e(ol2e, pfn); 2.35 @@ -1188,23 +1188,16 @@ static int mod_l3_entry(l3_pgentry_t *pl 2.36 2.37 if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e)) ) 2.38 { 2.39 - BUG_ON(!create_pae_xen_mappings(pl3e)); 2.40 put_page_from_l3e(nl3e, pfn); 2.41 return 0; 2.42 } 2.43 - 2.44 - put_page_from_l3e(ol3e, pfn); 2.45 - return 1; 2.46 } 2.47 - else 2.48 - { 2.49 - if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e)) ) 2.50 - { 2.51 - BUG_ON(!create_pae_xen_mappings(pl3e)); 2.52 - return 0; 2.53 - } 2.54 - } 2.55 - 2.56 + else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e)) ) 2.57 + { 2.58 + return 0; 2.59 + } 2.60 + 2.61 + BUG_ON(!create_pae_xen_mappings(pl3e)); 2.62 put_page_from_l3e(ol3e, pfn); 2.63 return 1; 2.64 } 2.65 @@ -1254,11 +1247,10 @@ static int mod_l4_entry(l4_pgentry_t *pl 2.66 return 0; 2.67 } 2.68 } 2.69 - else 2.70 + else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e)) ) 2.71 { 2.72 - if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e)) ) 2.73 - return 0; 2.74 - } 2.75 + return 0; 2.76 + } 2.77 2.78 put_page_from_l4e(ol4e, pfn); 2.79 return 1; 2.80 @@ -1409,11 +1401,7 @@ int get_page_type(struct pfn_info *page, 2.81 } 2.82 else if ( unlikely((x & PGT_count_mask) == 0) ) 2.83 { 2.84 -#ifdef CONFIG_X86_64 2.85 - if ( (x & (PGT_type_mask|PGT_va_mask)) != (type & ~PGT_va_mask)) 2.86 -#else 2.87 if ( (x & (PGT_type_mask|PGT_va_mask)) != type ) 2.88 -#endif 2.89 { 2.90 if ( (x & PGT_type_mask) != (type & PGT_type_mask) ) 2.91 { 2.92 @@ -1445,17 +1433,14 @@ int get_page_type(struct pfn_info *page, 2.93 } 2.94 else 2.95 { 2.96 -#ifdef CONFIG_X86_64 2.97 - if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != (type & ~PGT_va_mask)) ) 2.98 -#else 2.99 if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) ) 2.100 -#endif 2.101 { 2.102 if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) ) 2.103 { 2.104 if ( ((x & PGT_type_mask) != PGT_l2_page_table) || 2.105 ((type & PGT_type_mask) != PGT_l1_page_table) ) 2.106 - MEM_LOG("Bad type (saw %" PRtype_info "!= exp %" PRtype_info ") for pfn %lx", 2.107 + MEM_LOG("Bad type (saw %" PRtype_info 2.108 + "!= exp %" PRtype_info ") for pfn %lx", 2.109 x, type, page_to_pfn(page)); 2.110 return 0; 2.111 } 2.112 @@ -1718,9 +1703,6 @@ int do_mmuext_op( 2.113 type = PGT_l1_page_table | PGT_va_mutable; 2.114 2.115 pin_page: 2.116 -#if CONFIG_PAGING_LEVELS >= 4 2.117 - type |= PGT_va_mutable; 2.118 -#endif 2.119 if ( shadow_mode_refcounts(FOREIGNDOM) ) 2.120 type = PGT_writable_page; 2.121 2.122 @@ -1744,16 +1726,16 @@ int do_mmuext_op( 2.123 2.124 #ifndef CONFIG_X86_PAE /* Unsafe on PAE because of Xen-private mappings. */ 2.125 case MMUEXT_PIN_L2_TABLE: 2.126 - type = PGT_l2_page_table; 2.127 + type = PGT_l2_page_table | PGT_va_mutable; 2.128 goto pin_page; 2.129 #endif 2.130 2.131 case MMUEXT_PIN_L3_TABLE: 2.132 - type = PGT_l3_page_table; 2.133 + type = PGT_l3_page_table | PGT_va_mutable; 2.134 goto pin_page; 2.135 2.136 case MMUEXT_PIN_L4_TABLE: 2.137 - type = PGT_l4_page_table; 2.138 + type = PGT_l4_page_table | PGT_va_mutable; 2.139 goto pin_page; 2.140 2.141 case MMUEXT_UNPIN_TABLE: 2.142 @@ -1946,9 +1928,9 @@ int do_mmuext_op( 2.143 unlikely(_nd != _d) ) 2.144 { 2.145 MEM_LOG("Bad page values %lx: ed=%p(%u), sd=%p," 2.146 - " caf=%08x, taf=%" PRtype_info "\n", page_to_pfn(page), 2.147 - d, d->domain_id, unpickle_domptr(_nd), x, 2.148 - page->u.inuse.type_info); 2.149 + " caf=%08x, taf=%" PRtype_info "\n", 2.150 + page_to_pfn(page), d, d->domain_id, 2.151 + unpickle_domptr(_nd), x, page->u.inuse.type_info); 2.152 okay = 0; 2.153 goto reassign_fail; 2.154 } 2.155 @@ -2111,7 +2093,8 @@ int do_mmu_update( 2.156 l1e = l1e_from_intpte(req.val); 2.157 okay = mod_l1_entry(va, l1e); 2.158 if ( okay && unlikely(shadow_mode_enabled(d)) ) 2.159 - shadow_l1_normal_pt_update(d, req.ptr, l1e, &sh_mapcache); 2.160 + shadow_l1_normal_pt_update( 2.161 + d, req.ptr, l1e, &sh_mapcache); 2.162 put_page_type(page); 2.163 } 2.164 break; 2.165 @@ -2124,9 +2107,11 @@ int do_mmu_update( 2.166 2.167 /* FIXME: doesn't work with PAE */ 2.168 l2e = l2e_from_intpte(req.val); 2.169 - okay = mod_l2_entry((l2_pgentry_t *)va, l2e, mfn, type_info); 2.170 + okay = mod_l2_entry( 2.171 + (l2_pgentry_t *)va, l2e, mfn, type_info); 2.172 if ( okay && unlikely(shadow_mode_enabled(d)) ) 2.173 - shadow_l2_normal_pt_update(d, req.ptr, l2e, &sh_mapcache); 2.174 + shadow_l2_normal_pt_update( 2.175 + d, req.ptr, l2e, &sh_mapcache); 2.176 put_page_type(page); 2.177 } 2.178 break; 2.179 @@ -2142,7 +2127,8 @@ int do_mmu_update( 2.180 l3e = l3e_from_intpte(req.val); 2.181 okay = mod_l3_entry(va, l3e, mfn, type_info); 2.182 if ( okay && unlikely(shadow_mode_enabled(d)) ) 2.183 - shadow_l3_normal_pt_update(d, req.ptr, l3e, &sh_mapcache); 2.184 + shadow_l3_normal_pt_update( 2.185 + d, req.ptr, l3e, &sh_mapcache); 2.186 put_page_type(page); 2.187 } 2.188 break; 2.189 @@ -2158,7 +2144,8 @@ int do_mmu_update( 2.190 l4e = l4e_from_intpte(req.val); 2.191 okay = mod_l4_entry(va, l4e, mfn, type_info); 2.192 if ( okay && unlikely(shadow_mode_enabled(d)) ) 2.193 - shadow_l4_normal_pt_update(d, req.ptr, l4e, &sh_mapcache); 2.194 + shadow_l4_normal_pt_update( 2.195 + d, req.ptr, l4e, &sh_mapcache); 2.196 put_page_type(page); 2.197 } 2.198 break; 2.199 @@ -2205,7 +2192,8 @@ int do_mmu_update( 2.200 if ( unlikely(shadow_mode_translate(FOREIGNDOM) && IS_PRIV(d)) ) 2.201 { 2.202 shadow_lock(FOREIGNDOM); 2.203 - printk("privileged guest dom%d requests pfn=%lx to map mfn=%lx for dom%d\n", 2.204 + printk("privileged guest dom%d requests pfn=%lx to " 2.205 + "map mfn=%lx for dom%d\n", 2.206 d->domain_id, gpfn, mfn, FOREIGNDOM->domain_id); 2.207 set_machinetophys(mfn, gpfn); 2.208 set_p2m_entry(FOREIGNDOM, gpfn, mfn, &sh_mapcache, &mapcache); 2.209 @@ -2629,18 +2617,12 @@ int ptwr_debug = 0x0; 2.210 #endif 2.211 2.212 /* Re-validate a given p.t. page, given its prior snapshot */ 2.213 -int revalidate_l1(struct domain *d, l1_pgentry_t *l1page, l1_pgentry_t *snapshot) 2.214 +int revalidate_l1( 2.215 + struct domain *d, l1_pgentry_t *l1page, l1_pgentry_t *snapshot) 2.216 { 2.217 l1_pgentry_t ol1e, nl1e; 2.218 int modified = 0, i; 2.219 2.220 -#if 0 2.221 - if ( d->domain_id ) 2.222 - printk("%s: l1page mfn=%lx snapshot mfn=%lx\n", __func__, 2.223 - l1e_get_pfn(linear_pg_table[l1_linear_offset((unsigned long)l1page)]), 2.224 - l1e_get_pfn(linear_pg_table[l1_linear_offset((unsigned long)snapshot)])); 2.225 -#endif 2.226 - 2.227 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) 2.228 { 2.229 ol1e = snapshot[i]; 2.230 @@ -2690,9 +2672,9 @@ void ptwr_flush(struct domain *d, const 2.231 l1_pgentry_t *pl1e; 2.232 l2_pgentry_t *pl2e; 2.233 unsigned int modified; 2.234 -#if defined(__x86_64__) 2.235 + 2.236 +#ifdef CONFIG_X86_64 2.237 struct vcpu *v = current; 2.238 - /* If in user mode, switch to kernel mode just to read LDT mapping. */ 2.239 extern void toggle_guest_mode(struct vcpu *); 2.240 int user_mode = !(v->arch.flags & TF_kernel_mode); 2.241 #endif 2.242 @@ -2700,8 +2682,10 @@ void ptwr_flush(struct domain *d, const 2.243 ASSERT(!shadow_mode_enabled(d)); 2.244 2.245 if ( unlikely(d->arch.ptwr[which].vcpu != current) ) 2.246 - write_ptbase(d->arch.ptwr[which].vcpu); 2.247 - else 2.248 + /* Don't use write_ptbase: it may switch to guest_user on x86/64! */ 2.249 + write_cr3(pagetable_get_paddr( 2.250 + d->arch.ptwr[which].vcpu->arch.guest_table)); 2.251 + else 2.252 TOGGLE_MODE(); 2.253 2.254 l1va = d->arch.ptwr[which].l1va; 2.255 @@ -2803,7 +2787,7 @@ static int ptwr_emulated_update( 2.256 /* Align address; read full word. */ 2.257 addr &= ~(sizeof(physaddr_t)-1); 2.258 if ( (rc = x86_emulate_read_std(addr, (unsigned long *)&full, 2.259 - sizeof(physaddr_t))) ) 2.260 + sizeof(physaddr_t))) ) 2.261 return rc; 2.262 /* Mask out bits provided by caller. */ 2.263 full &= ~((((physaddr_t)1 << (bytes*8)) - 1) << (offset*8)); 2.264 @@ -2829,7 +2813,8 @@ static int ptwr_emulated_update( 2.265 ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) || 2.266 (page_get_owner(page) != d) ) 2.267 { 2.268 - MEM_LOG("ptwr_emulate: Page is mistyped or bad pte (%lx, %" PRtype_info ")\n", 2.269 + MEM_LOG("ptwr_emulate: Page is mistyped or bad pte " 2.270 + "(%lx, %" PRtype_info ")\n", 2.271 l1e_get_pfn(pte), page->u.inuse.type_info); 2.272 return X86EMUL_UNHANDLEABLE; 2.273 } 2.274 @@ -2902,42 +2887,13 @@ static struct x86_mem_emulator ptwr_mem_ 2.275 .cmpxchg8b_emulated = ptwr_emulated_cmpxchg8b 2.276 }; 2.277 2.278 -#if defined(__x86_64__) 2.279 -/* 2.280 - * Returns zero on if mapped, or -1 otherwise 2.281 - */ 2.282 -static int __not_mapped(l2_pgentry_t *pl2e) 2.283 -{ 2.284 - unsigned long page = read_cr3(); 2.285 - 2.286 - page &= PAGE_MASK; 2.287 - page = ((unsigned long *) __va(page))[l4_table_offset((unsigned long)pl2e)]; 2.288 - if ( !(page & _PAGE_PRESENT) ) 2.289 - return -1; 2.290 - 2.291 - page &= PAGE_MASK; 2.292 - page = ((unsigned long *) __va(page))[l3_table_offset((unsigned long)pl2e)]; 2.293 - if ( !(page & _PAGE_PRESENT) ) 2.294 - return -1; 2.295 - 2.296 - page &= PAGE_MASK; 2.297 - page = ((unsigned long *) __va(page))[l2_table_offset((unsigned long)pl2e)]; 2.298 - if ( !(page & _PAGE_PRESENT) ) 2.299 - return -1; 2.300 - 2.301 - return 0; 2.302 -} 2.303 -#else 2.304 -#define __not_mapped(p) (0) 2.305 -#endif 2.306 - 2.307 /* Write page fault handler: check if guest is trying to modify a PTE. */ 2.308 int ptwr_do_page_fault(struct domain *d, unsigned long addr) 2.309 { 2.310 unsigned long pfn; 2.311 struct pfn_info *page; 2.312 l1_pgentry_t pte; 2.313 - l2_pgentry_t *pl2e; 2.314 + l2_pgentry_t *pl2e, l2e; 2.315 int which; 2.316 unsigned long l2_idx; 2.317 2.318 @@ -2984,10 +2940,7 @@ int ptwr_do_page_fault(struct domain *d, 2.319 pl2e = &__linear_l2_table[l2_idx]; 2.320 which = PTWR_PT_INACTIVE; 2.321 2.322 - if ( unlikely(__not_mapped(pl2e)) ) 2.323 - goto inactive; 2.324 - 2.325 - if ( (l2e_get_pfn(*pl2e)) == pfn ) 2.326 + if ( (__get_user(l2e.l2, &pl2e->l2) == 0) && (l2e_get_pfn(l2e) == pfn) ) 2.327 { 2.328 /* 2.329 * Check the PRESENT bit to set ACTIVE mode. 2.330 @@ -2995,14 +2948,12 @@ int ptwr_do_page_fault(struct domain *d, 2.331 * ACTIVE p.t. (it may be the same p.t. mapped at another virt addr). 2.332 * The ptwr_flush call below will restore the PRESENT bit. 2.333 */ 2.334 - if ( likely(l2e_get_flags(*pl2e) & _PAGE_PRESENT) || 2.335 + if ( likely(l2e_get_flags(l2e) & _PAGE_PRESENT) || 2.336 (d->arch.ptwr[PTWR_PT_ACTIVE].l1va && 2.337 (l2_idx == d->arch.ptwr[PTWR_PT_ACTIVE].l2_idx)) ) 2.338 which = PTWR_PT_ACTIVE; 2.339 } 2.340 2.341 - inactive: 2.342 - 2.343 /* 2.344 * If this is a multi-processor guest then ensure that the page is hooked 2.345 * into at most one L2 table, which must be the one running on this VCPU.
3.1 --- a/xen/include/asm-x86/page.h Tue Aug 09 12:50:22 2005 +0000 3.2 +++ b/xen/include/asm-x86/page.h Tue Aug 09 12:51:06 2005 +0000 3.3 @@ -208,21 +208,21 @@ typedef struct { u64 pfn; } pagetable_t; 3.4 + DOMAIN_ENTRIES_PER_L4_PAGETABLE) 3.5 #endif 3.6 3.7 -#define VA_LINEAR_PT_VIRT_START (LINEAR_PT_VIRT_START & VADDR_MASK) 3.8 -#define linear_l1_table \ 3.9 +#define LINEAR_PT_OFFSET (LINEAR_PT_VIRT_START & VADDR_MASK) 3.10 +#define linear_l1_table \ 3.11 ((l1_pgentry_t *)(LINEAR_PT_VIRT_START)) 3.12 -#define __linear_l2_table \ 3.13 - ((l2_pgentry_t *)(LINEAR_PT_VIRT_START + \ 3.14 - (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)))) 3.15 -#define __linear_l3_table \ 3.16 - ((l3_pgentry_t *)(LINEAR_PT_VIRT_START + \ 3.17 - (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) + \ 3.18 - (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1)))) 3.19 -#define __linear_l4_table \ 3.20 - ((l4_pgentry_t *)(LINEAR_PT_VIRT_START + \ 3.21 - (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) + \ 3.22 - (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1)) + \ 3.23 - (VA_LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<2)))) 3.24 +#define __linear_l2_table \ 3.25 + ((l2_pgentry_t *)(LINEAR_PT_VIRT_START + \ 3.26 + (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<0)))) 3.27 +#define __linear_l3_table \ 3.28 + ((l3_pgentry_t *)(LINEAR_PT_VIRT_START + \ 3.29 + (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<0)) + \ 3.30 + (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<1)))) 3.31 +#define __linear_l4_table \ 3.32 + ((l4_pgentry_t *)(LINEAR_PT_VIRT_START + \ 3.33 + (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<0)) + \ 3.34 + (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<1)) + \ 3.35 + (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<2)))) 3.36 3.37 #define linear_pg_table linear_l1_table 3.38 #define linear_l2_table(_ed) ((_ed)->arch.guest_vtable)