ia64/xen-unstable
changeset 6128:57b3fdca5dae
Support VCPU migration for VMX guests.
Add a hook to support CPU migration for VMX domains
Reorganize the low level asm code to support relaunching a VMCS on a different
logical CPU.
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
Add a hook to support CPU migration for VMX domains
Reorganize the low level asm code to support relaunching a VMCS on a different
logical CPU.
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu Aug 11 21:38:58 2005 +0000 (2005-08-11) |
parents | f972da9a2dda |
children | 5ec5f6960507 |
files | xen/arch/ia64/domain.c xen/arch/x86/domain.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_64/entry.S xen/common/dom0_ops.c xen/include/asm-x86/vmx.h xen/include/asm-x86/vmx_vmcs.h xen/include/xen/domain.h |
line diff
1.1 --- a/xen/arch/ia64/domain.c Thu Aug 11 21:19:45 2005 +0000 1.2 +++ b/xen/arch/ia64/domain.c Thu Aug 11 21:38:58 2005 +0000 1.3 @@ -1398,3 +1398,12 @@ void domain_pend_keyboard_interrupt(int 1.4 { 1.5 vcpu_pend_interrupt(dom0->vcpu[0],irq); 1.6 } 1.7 + 1.8 +void vcpu_migrate_cpu(struct vcpu *v, int newcpu) 1.9 +{ 1.10 + if ( v->processor == newcpu ) 1.11 + return; 1.12 + 1.13 + set_bit(_VCPUF_cpu_migrated, &v->vcpu_flags); 1.14 + v->processor = newcpu; 1.15 +}
2.1 --- a/xen/arch/x86/domain.c Thu Aug 11 21:19:45 2005 +0000 2.2 +++ b/xen/arch/x86/domain.c Thu Aug 11 21:38:58 2005 +0000 2.3 @@ -295,27 +295,24 @@ void arch_do_boot_vcpu(struct vcpu *v) 2.4 l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR); 2.5 } 2.6 2.7 +void vcpu_migrate_cpu(struct vcpu *v, int newcpu) 2.8 +{ 2.9 + if ( v->processor == newcpu ) 2.10 + return; 2.11 + 2.12 + set_bit(_VCPUF_cpu_migrated, &v->vcpu_flags); 2.13 + v->processor = newcpu; 2.14 + 2.15 + if ( VMX_DOMAIN(v) ) 2.16 + { 2.17 + __vmpclear(virt_to_phys(v->arch.arch_vmx.vmcs)); 2.18 + v->arch.schedule_tail = arch_vmx_do_relaunch; 2.19 + } 2.20 +} 2.21 + 2.22 #ifdef CONFIG_VMX 2.23 static int vmx_switch_on; 2.24 2.25 -void arch_vmx_do_resume(struct vcpu *v) 2.26 -{ 2.27 - u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs); 2.28 - 2.29 - load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr); 2.30 - vmx_do_resume(v); 2.31 - reset_stack_and_jump(vmx_asm_do_resume); 2.32 -} 2.33 - 2.34 -void arch_vmx_do_launch(struct vcpu *v) 2.35 -{ 2.36 - u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs); 2.37 - 2.38 - load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr); 2.39 - vmx_do_launch(v); 2.40 - reset_stack_and_jump(vmx_asm_do_launch); 2.41 -} 2.42 - 2.43 static int vmx_final_setup_guest( 2.44 struct vcpu *v, struct vcpu_guest_context *ctxt) 2.45 { 2.46 @@ -346,7 +343,7 @@ static int vmx_final_setup_guest( 2.47 2.48 v->arch.schedule_tail = arch_vmx_do_launch; 2.49 2.50 -#if defined (__i386) 2.51 +#if defined (__i386__) 2.52 v->domain->arch.vmx_platform.real_mode_data = 2.53 (unsigned long *) regs->esi; 2.54 #endif
3.1 --- a/xen/arch/x86/vmx_vmcs.c Thu Aug 11 21:19:45 2005 +0000 3.2 +++ b/xen/arch/x86/vmx_vmcs.c Thu Aug 11 21:38:58 2005 +0000 3.3 @@ -198,7 +198,7 @@ void vmx_set_host_env(struct vcpu *v) 3.4 host_env.idtr_limit = desc.size; 3.5 host_env.idtr_base = desc.address; 3.6 error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base); 3.7 - 3.8 + 3.9 __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory"); 3.10 host_env.gdtr_limit = desc.size; 3.11 host_env.gdtr_base = desc.address; 3.12 @@ -210,7 +210,6 @@ void vmx_set_host_env(struct vcpu *v) 3.13 host_env.tr_base = (unsigned long) &init_tss[cpu]; 3.14 error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector); 3.15 error |= __vmwrite(HOST_TR_BASE, host_env.tr_base); 3.16 - 3.17 } 3.18 3.19 void vmx_do_launch(struct vcpu *v) 3.20 @@ -544,6 +543,36 @@ void vm_resume_fail(unsigned long eflags 3.21 __vmx_bug(guest_cpu_user_regs()); 3.22 } 3.23 3.24 +void arch_vmx_do_resume(struct vcpu *v) 3.25 +{ 3.26 + u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs); 3.27 + 3.28 + load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr); 3.29 + vmx_do_resume(v); 3.30 + reset_stack_and_jump(vmx_asm_do_resume); 3.31 +} 3.32 + 3.33 +void arch_vmx_do_launch(struct vcpu *v) 3.34 +{ 3.35 + u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs); 3.36 + 3.37 + load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr); 3.38 + vmx_do_launch(v); 3.39 + reset_stack_and_jump(vmx_asm_do_launch); 3.40 +} 3.41 + 3.42 +void arch_vmx_do_relaunch(struct vcpu *v) 3.43 +{ 3.44 + u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs); 3.45 + 3.46 + load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr); 3.47 + vmx_do_resume(v); 3.48 + vmx_set_host_env(v); 3.49 + v->arch.schedule_tail = arch_vmx_do_resume; 3.50 + 3.51 + reset_stack_and_jump(vmx_asm_do_relaunch); 3.52 +} 3.53 + 3.54 #endif /* CONFIG_VMX */ 3.55 3.56 /*
4.1 --- a/xen/arch/x86/x86_32/entry.S Thu Aug 11 21:19:45 2005 +0000 4.2 +++ b/xen/arch/x86/x86_32/entry.S Thu Aug 11 21:38:58 2005 +0000 4.3 @@ -108,31 +108,26 @@ 4.4 pushl %ecx; \ 4.5 pushl %ebx; 4.6 4.7 +#define VMX_RESTORE_ALL_NOSEGREGS \ 4.8 + popl %ebx; \ 4.9 + popl %ecx; \ 4.10 + popl %edx; \ 4.11 + popl %esi; \ 4.12 + popl %edi; \ 4.13 + popl %ebp; \ 4.14 + popl %eax; \ 4.15 + addl $(NR_SKIPPED_REGS*4), %esp 4.16 + 4.17 ENTRY(vmx_asm_vmexit_handler) 4.18 /* selectors are restored/saved by VMX */ 4.19 VMX_SAVE_ALL_NOSEGREGS 4.20 call vmx_vmexit_handler 4.21 jmp vmx_asm_do_resume 4.22 4.23 -ENTRY(vmx_asm_do_launch) 4.24 - popl %ebx 4.25 - popl %ecx 4.26 - popl %edx 4.27 - popl %esi 4.28 - popl %edi 4.29 - popl %ebp 4.30 - popl %eax 4.31 - addl $(NR_SKIPPED_REGS*4), %esp 4.32 - /* VMLUANCH */ 4.33 - .byte 0x0f,0x01,0xc2 4.34 - pushf 4.35 - call vm_launch_fail 4.36 - hlt 4.37 - 4.38 - ALIGN 4.39 - 4.40 -ENTRY(vmx_asm_do_resume) 4.41 -vmx_test_all_events: 4.42 +.macro vmx_asm_common launch initialized 4.43 +1: 4.44 +/* vmx_test_all_events */ 4.45 + .if \initialized 4.46 GET_CURRENT(%ebx) 4.47 /*test_all_events:*/ 4.48 xorl %ecx,%ecx 4.49 @@ -142,34 +137,50 @@ vmx_test_all_events: 4.50 movl VCPU_processor(%ebx),%eax 4.51 shl $IRQSTAT_shift,%eax 4.52 test %ecx,irq_stat(%eax,1) 4.53 - jnz vmx_process_softirqs 4.54 + jnz 2f 4.55 4.56 -vmx_restore_all_guest: 4.57 +/* vmx_restore_all_guest */ 4.58 call load_cr2 4.59 + .endif 4.60 + VMX_RESTORE_ALL_NOSEGREGS 4.61 /* 4.62 * Check if we are going back to VMX-based VM 4.63 * By this time, all the setups in the VMCS must be complete. 4.64 */ 4.65 - popl %ebx 4.66 - popl %ecx 4.67 - popl %edx 4.68 - popl %esi 4.69 - popl %edi 4.70 - popl %ebp 4.71 - popl %eax 4.72 - addl $(NR_SKIPPED_REGS*4), %esp 4.73 + .if \launch 4.74 + /* VMLUANCH */ 4.75 + .byte 0x0f,0x01,0xc2 4.76 + pushf 4.77 + call vm_launch_fail 4.78 + .else 4.79 /* VMRESUME */ 4.80 .byte 0x0f,0x01,0xc3 4.81 pushf 4.82 call vm_resume_fail 4.83 + .endif 4.84 /* Should never reach here */ 4.85 hlt 4.86 4.87 ALIGN 4.88 -vmx_process_softirqs: 4.89 + .if \initialized 4.90 +2: 4.91 +/* vmx_process_softirqs */ 4.92 sti 4.93 call do_softirq 4.94 - jmp vmx_test_all_events 4.95 + jmp 1b 4.96 + ALIGN 4.97 + .endif 4.98 +.endm 4.99 + 4.100 +ENTRY(vmx_asm_do_launch) 4.101 + vmx_asm_common 1 0 4.102 + 4.103 +ENTRY(vmx_asm_do_resume) 4.104 + vmx_asm_common 0 1 4.105 + 4.106 +ENTRY(vmx_asm_do_relaunch) 4.107 + vmx_asm_common 1 1 4.108 + 4.109 #endif 4.110 4.111 ALIGN
5.1 --- a/xen/arch/x86/x86_64/entry.S Thu Aug 11 21:19:45 2005 +0000 5.2 +++ b/xen/arch/x86/x86_64/entry.S Thu Aug 11 21:38:58 2005 +0000 5.3 @@ -194,39 +194,34 @@ test_all_events: 5.4 pushq %r14; \ 5.5 pushq %r15; \ 5.6 5.7 +#define VMX_RESTORE_ALL_NOSEGREGS \ 5.8 + popq %r15; \ 5.9 + popq %r14; \ 5.10 + popq %r13; \ 5.11 + popq %r12; \ 5.12 + popq %rbp; \ 5.13 + popq %rbx; \ 5.14 + popq %r11; \ 5.15 + popq %r10; \ 5.16 + popq %r9; \ 5.17 + popq %r8; \ 5.18 + popq %rax; \ 5.19 + popq %rcx; \ 5.20 + popq %rdx; \ 5.21 + popq %rsi; \ 5.22 + popq %rdi; \ 5.23 + addq $(NR_SKIPPED_REGS*8), %rsp; \ 5.24 + 5.25 ENTRY(vmx_asm_vmexit_handler) 5.26 /* selectors are restored/saved by VMX */ 5.27 VMX_SAVE_ALL_NOSEGREGS 5.28 call vmx_vmexit_handler 5.29 jmp vmx_asm_do_resume 5.30 5.31 -ENTRY(vmx_asm_do_launch) 5.32 - popq %r15 5.33 - popq %r14 5.34 - popq %r13 5.35 - popq %r12 5.36 - popq %rbp 5.37 - popq %rbx 5.38 - popq %r11 5.39 - popq %r10 5.40 - popq %r9 5.41 - popq %r8 5.42 - popq %rax 5.43 - popq %rcx 5.44 - popq %rdx 5.45 - popq %rsi 5.46 - popq %rdi 5.47 - addq $(NR_SKIPPED_REGS*8), %rsp 5.48 - /* VMLUANCH */ 5.49 - .byte 0x0f,0x01,0xc2 5.50 - pushfq 5.51 - call vm_launch_fail 5.52 - hlt 5.53 - 5.54 - ALIGN 5.55 - 5.56 -ENTRY(vmx_asm_do_resume) 5.57 -vmx_test_all_events: 5.58 +.macro vmx_asm_common launch initialized 5.59 +1: 5.60 + .if \initialized 5.61 +/* vmx_test_all_events */ 5.62 GET_CURRENT(%rbx) 5.63 /* test_all_events: */ 5.64 cli # tests must not race interrupts 5.65 @@ -235,42 +230,51 @@ vmx_test_all_events: 5.66 shl $IRQSTAT_shift,%rax 5.67 leaq irq_stat(%rip), %rdx 5.68 testl $~0,(%rdx,%rax,1) 5.69 - jnz vmx_process_softirqs 5.70 + jnz 2f 5.71 5.72 -vmx_restore_all_guest: 5.73 +/* vmx_restore_all_guest */ 5.74 call load_cr2 5.75 + .endif 5.76 /* 5.77 * Check if we are going back to VMX-based VM 5.78 * By this time, all the setups in the VMCS must be complete. 5.79 */ 5.80 - popq %r15 5.81 - popq %r14 5.82 - popq %r13 5.83 - popq %r12 5.84 - popq %rbp 5.85 - popq %rbx 5.86 - popq %r11 5.87 - popq %r10 5.88 - popq %r9 5.89 - popq %r8 5.90 - popq %rax 5.91 - popq %rcx 5.92 - popq %rdx 5.93 - popq %rsi 5.94 - popq %rdi 5.95 - addq $(NR_SKIPPED_REGS*8), %rsp 5.96 + VMX_RESTORE_ALL_NOSEGREGS 5.97 + .if \launch 5.98 + /* VMLUANCH */ 5.99 + .byte 0x0f,0x01,0xc2 5.100 + pushfq 5.101 + call vm_launch_fail 5.102 + .else 5.103 /* VMRESUME */ 5.104 .byte 0x0f,0x01,0xc3 5.105 pushfq 5.106 call vm_resume_fail 5.107 + .endif 5.108 /* Should never reach here */ 5.109 hlt 5.110 5.111 ALIGN 5.112 -vmx_process_softirqs: 5.113 + 5.114 + .if \initialized 5.115 +2: 5.116 +/* vmx_process_softirqs */ 5.117 sti 5.118 call do_softirq 5.119 - jmp vmx_test_all_events 5.120 + jmp 1b 5.121 + ALIGN 5.122 + .endif 5.123 +.endm 5.124 + 5.125 +ENTRY(vmx_asm_do_launch) 5.126 + vmx_asm_common 1 0 5.127 + 5.128 +ENTRY(vmx_asm_do_resume) 5.129 + vmx_asm_common 0 1 5.130 + 5.131 +ENTRY(vmx_asm_do_relaunch) 5.132 + vmx_asm_common 1 1 5.133 + 5.134 #endif 5.135 5.136 ALIGN
6.1 --- a/xen/common/dom0_ops.c Thu Aug 11 21:19:45 2005 +0000 6.2 +++ b/xen/common/dom0_ops.c Thu Aug 11 21:38:58 2005 +0000 6.3 @@ -293,17 +293,17 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 6.4 v->cpumap = cpumap; 6.5 6.6 if ( cpumap == CPUMAP_RUNANYWHERE ) 6.7 + { 6.8 clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags); 6.9 + } 6.10 else 6.11 { 6.12 /* pick a new cpu from the usable map */ 6.13 int new_cpu = (int)find_first_set_bit(cpumap) % num_online_cpus(); 6.14 6.15 vcpu_pause(v); 6.16 - if ( v->processor != new_cpu ) 6.17 - set_bit(_VCPUF_cpu_migrated, &v->vcpu_flags); 6.18 + vcpu_migrate_cpu(v, new_cpu); 6.19 set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags); 6.20 - v->processor = new_cpu; 6.21 vcpu_unpause(v); 6.22 } 6.23
7.1 --- a/xen/include/asm-x86/vmx.h Thu Aug 11 21:19:45 2005 +0000 7.2 +++ b/xen/include/asm-x86/vmx.h Thu Aug 11 21:38:58 2005 +0000 7.3 @@ -35,6 +35,7 @@ extern void vmx_intr_assist(struct vcpu 7.4 7.5 extern void arch_vmx_do_launch(struct vcpu *); 7.6 extern void arch_vmx_do_resume(struct vcpu *); 7.7 +extern void arch_vmx_do_relaunch(struct vcpu *); 7.8 7.9 extern int vmcs_size; 7.10 extern unsigned int cpu_rev;
8.1 --- a/xen/include/asm-x86/vmx_vmcs.h Thu Aug 11 21:19:45 2005 +0000 8.2 +++ b/xen/include/asm-x86/vmx_vmcs.h Thu Aug 11 21:38:58 2005 +0000 8.3 @@ -93,6 +93,7 @@ struct arch_vmx_struct { 8.4 8.5 void vmx_do_launch(struct vcpu *); 8.6 void vmx_do_resume(struct vcpu *); 8.7 +void vmx_set_host_env(struct vcpu *); 8.8 8.9 struct vmcs_struct *alloc_vmcs(void); 8.10 void free_vmcs(struct vmcs_struct *);
9.1 --- a/xen/include/xen/domain.h Thu Aug 11 21:19:45 2005 +0000 9.2 +++ b/xen/include/xen/domain.h Thu Aug 11 21:38:58 2005 +0000 9.3 @@ -15,7 +15,9 @@ extern void arch_do_createdomain(struct 9.4 extern void arch_do_boot_vcpu(struct vcpu *v); 9.5 9.6 extern int arch_set_info_guest( 9.7 - struct vcpu *d, struct vcpu_guest_context *c); 9.8 + struct vcpu *v, struct vcpu_guest_context *c); 9.9 + 9.10 +extern void vcpu_migrate_cpu(struct vcpu *v, int newcpu); 9.11 9.12 extern void free_perdomain_pt(struct domain *d); 9.13