ia64/xen-unstable
changeset 18751:85ba96069dfb
x86: Use the right error code when testing for spurious pagefaults
Shadowed PV domains may take pagefaults with PFEC_reserved_bit bit
set, which are then turned into page-not-present faults by the shadow
code. Since that changes the error code in the regs structure, we need
to remember the old error code when we later check for spurious page
faults or we'll get false positives.
Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
Shadowed PV domains may take pagefaults with PFEC_reserved_bit bit
set, which are then turned into page-not-present faults by the shadow
code. Since that changes the error code in the regs structure, we need
to remember the old error code when we later check for spurious page
faults or we'll get false positives.
Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu Oct 30 15:04:27 2008 +0000 (2008-10-30) |
parents | ed30f4efb728 |
children | f12d9595d07c |
files | xen/arch/x86/traps.c |
line diff
1.1 --- a/xen/arch/x86/traps.c Thu Oct 30 14:53:24 2008 +0000 1.2 +++ b/xen/arch/x86/traps.c Thu Oct 30 15:04:27 2008 +0000 1.3 @@ -1030,7 +1030,7 @@ static int handle_gdt_ldt_mapping_fault( 1.4 #endif 1.5 1.6 static int __spurious_page_fault( 1.7 - unsigned long addr, struct cpu_user_regs *regs) 1.8 + unsigned long addr, unsigned int error_code) 1.9 { 1.10 unsigned long mfn, cr3 = read_cr3(); 1.11 #if CONFIG_PAGING_LEVELS >= 4 1.12 @@ -1052,17 +1052,17 @@ static int __spurious_page_fault( 1.13 return 0; 1.14 1.15 /* Reserved bit violations are never spurious faults. */ 1.16 - if ( regs->error_code & PFEC_reserved_bit ) 1.17 + if ( error_code & PFEC_reserved_bit ) 1.18 return 0; 1.19 1.20 required_flags = _PAGE_PRESENT; 1.21 - if ( regs->error_code & PFEC_write_access ) 1.22 + if ( error_code & PFEC_write_access ) 1.23 required_flags |= _PAGE_RW; 1.24 - if ( regs->error_code & PFEC_user_mode ) 1.25 + if ( error_code & PFEC_user_mode ) 1.26 required_flags |= _PAGE_USER; 1.27 1.28 disallowed_flags = 0; 1.29 - if ( regs->error_code & PFEC_insn_fetch ) 1.30 + if ( error_code & PFEC_insn_fetch ) 1.31 disallowed_flags |= _PAGE_NX; 1.32 1.33 mfn = cr3 >> PAGE_SHIFT; 1.34 @@ -1120,7 +1120,7 @@ static int __spurious_page_fault( 1.35 dprintk(XENLOG_WARNING, "Spurious fault in domain %u:%u " 1.36 "at addr %lx, e/c %04x\n", 1.37 current->domain->domain_id, current->vcpu_id, 1.38 - addr, regs->error_code); 1.39 + addr, error_code); 1.40 #if CONFIG_PAGING_LEVELS >= 4 1.41 dprintk(XENLOG_WARNING, " l4e = %"PRIpte"\n", l4e_get_intpte(l4e)); 1.42 #endif 1.43 @@ -1129,14 +1129,11 @@ static int __spurious_page_fault( 1.44 #endif 1.45 dprintk(XENLOG_WARNING, " l2e = %"PRIpte"\n", l2e_get_intpte(l2e)); 1.46 dprintk(XENLOG_WARNING, " l1e = %"PRIpte"\n", l1e_get_intpte(l1e)); 1.47 -#ifndef NDEBUG 1.48 - show_registers(regs); 1.49 -#endif 1.50 return 1; 1.51 } 1.52 1.53 static int spurious_page_fault( 1.54 - unsigned long addr, struct cpu_user_regs *regs) 1.55 + unsigned long addr, unsigned int error_code) 1.56 { 1.57 unsigned long flags; 1.58 int is_spurious; 1.59 @@ -1146,7 +1143,7 @@ static int spurious_page_fault( 1.60 * page tables from becoming invalid under our feet during the walk. 1.61 */ 1.62 local_irq_save(flags); 1.63 - is_spurious = __spurious_page_fault(addr, regs); 1.64 + is_spurious = __spurious_page_fault(addr, error_code); 1.65 local_irq_restore(flags); 1.66 1.67 return is_spurious; 1.68 @@ -1208,9 +1205,13 @@ static int fixup_page_fault(unsigned lon 1.69 asmlinkage void do_page_fault(struct cpu_user_regs *regs) 1.70 { 1.71 unsigned long addr, fixup; 1.72 + unsigned int error_code; 1.73 1.74 addr = read_cr2(); 1.75 1.76 + /* fixup_page_fault() might change regs->error_code, so cache it here. */ 1.77 + error_code = regs->error_code; 1.78 + 1.79 DEBUGGER_trap_entry(TRAP_page_fault, regs); 1.80 1.81 perfc_incr(page_faults); 1.82 @@ -1220,7 +1221,7 @@ asmlinkage void do_page_fault(struct cpu 1.83 1.84 if ( unlikely(!guest_mode(regs)) ) 1.85 { 1.86 - if ( spurious_page_fault(addr, regs) ) 1.87 + if ( spurious_page_fault(addr, error_code) ) 1.88 return; 1.89 1.90 if ( likely((fixup = search_exception_table(regs->eip)) != 0) ) 1.91 @@ -1239,11 +1240,11 @@ asmlinkage void do_page_fault(struct cpu 1.92 panic("FATAL PAGE FAULT\n" 1.93 "[error_code=%04x]\n" 1.94 "Faulting linear address: %p\n", 1.95 - regs->error_code, _p(addr)); 1.96 + error_code, _p(addr)); 1.97 } 1.98 1.99 if ( unlikely(current->domain->arch.suppress_spurious_page_faults 1.100 - && spurious_page_fault(addr, regs)) ) 1.101 + && spurious_page_fault(addr, error_code)) ) 1.102 return; 1.103 1.104 propagate_page_fault(addr, regs->error_code);