ia64/xen-unstable
changeset 7450:7d50f64aada7
Fix NMI race with context switch.
The machine used to auto reboot if an NMI was received in a critical
time window when context switching domains. There is a small time
window when the GDT may become unmapped (after CR3 is updated and
before setting GDTR with the new GDT during a domain context
switch. If an NMI is received during this time window a triple fault
is triggered causing the machine to auto reboot.
Bug found and original patch proposed by Jose Renato Santos
<jsantos@hpl.hp.com>.
Signed-off-by: Keir Fraser <keir@xensource.com>
The machine used to auto reboot if an NMI was received in a critical
time window when context switching domains. There is a small time
window when the GDT may become unmapped (after CR3 is updated and
before setting GDTR with the new GDT during a domain context
switch. If an NMI is received during this time window a triple fault
is triggered causing the machine to auto reboot.
Bug found and original patch proposed by Jose Renato Santos
<jsantos@hpl.hp.com>.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu Oct 20 11:25:55 2005 +0100 (2005-10-20) |
parents | 5d746b39e873 |
children | 6450e40ad21a |
files | xen/arch/x86/domain.c xen/arch/x86/setup.c |
line diff
1.1 --- a/xen/arch/x86/domain.c Thu Oct 20 06:01:11 2005 -0400 1.2 +++ b/xen/arch/x86/domain.c Thu Oct 20 11:25:55 2005 +0100 1.3 @@ -226,11 +226,9 @@ struct vcpu *alloc_vcpu_struct(struct do 1.4 1.5 if ( (v->vcpu_id = vcpu_id) != 0 ) 1.6 { 1.7 - v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail; 1.8 + v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail; 1.9 v->arch.perdomain_ptes = 1.10 d->arch.mm_perdomain_pt + (vcpu_id << PDPT_VCPU_SHIFT); 1.11 - v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] = 1.12 - l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR); 1.13 } 1.14 1.15 return v; 1.16 @@ -256,6 +254,7 @@ void free_perdomain_pt(struct domain *d) 1.17 void arch_do_createdomain(struct vcpu *v) 1.18 { 1.19 struct domain *d = v->domain; 1.20 + int vcpuid; 1.21 1.22 if ( is_idle_task(d) ) 1.23 return; 1.24 @@ -275,8 +274,20 @@ void arch_do_createdomain(struct vcpu *v 1.25 set_pfn_from_mfn(virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT, 1.26 INVALID_M2P_ENTRY); 1.27 v->arch.perdomain_ptes = d->arch.mm_perdomain_pt; 1.28 - v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] = 1.29 - l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR); 1.30 + 1.31 + /* 1.32 + * Map Xen segments into every VCPU's GDT, irrespective of whether every 1.33 + * VCPU will actually be used. This avoids an NMI race during context 1.34 + * switch: if we take an interrupt after switching CR3 but before switching 1.35 + * GDT, and the old VCPU# is invalid in the new domain, we would otherwise 1.36 + * try to load CS from an invalid table. 1.37 + */ 1.38 + for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ ) 1.39 + { 1.40 + d->arch.mm_perdomain_pt[ 1.41 + (vcpuid << PDPT_VCPU_SHIFT) + FIRST_RESERVED_GDT_PAGE] = 1.42 + l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR); 1.43 + } 1.44 1.45 v->arch.guest_vtable = __linear_l2_table; 1.46 v->arch.shadow_vtable = __shadow_linear_l2_table;
2.1 --- a/xen/arch/x86/setup.c Thu Oct 20 06:01:11 2005 -0400 2.2 +++ b/xen/arch/x86/setup.c Thu Oct 20 11:25:55 2005 +0100 2.3 @@ -141,6 +141,7 @@ static void __init do_initcalls(void) 2.4 static void __init start_of_day(void) 2.5 { 2.6 int i; 2.7 + unsigned long vgdt; 2.8 2.9 early_cpu_init(); 2.10 2.11 @@ -158,10 +159,17 @@ static void __init start_of_day(void) 2.12 2.13 arch_do_createdomain(current); 2.14 2.15 - /* Map default GDT into their final position in the idle page table. */ 2.16 - map_pages_to_xen( 2.17 - GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE, 2.18 - virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR); 2.19 + /* 2.20 + * Map default GDT into its final positions in the idle page table. As 2.21 + * noted in arch_do_createdomain(), we must map for every possible VCPU#. 2.22 + */ 2.23 + vgdt = GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE; 2.24 + for ( i = 0; i < MAX_VIRT_CPUS; i++ ) 2.25 + { 2.26 + map_pages_to_xen( 2.27 + vgdt, virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR); 2.28 + vgdt += 1 << PDPT_VCPU_VA_SHIFT; 2.29 + } 2.30 2.31 find_smp_config(); 2.32