From: Keir Fraser Date: Mon, 21 Apr 2008 11:19:15 +0000 (+0100) Subject: x86, hvm: Assembly stub cleanups. X-Git-Tag: 3.3.0-rc1~240^2~36 X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=9fcaa32c645087992ccd21ae9a0a394972359e3c;p=xen.git x86, hvm: Assembly stub cleanups. Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/hvm/svm/x86_32/exits.S b/xen/arch/x86/hvm/svm/x86_32/exits.S index 83d893a90d..b407558d7f 100644 --- a/xen/arch/x86/hvm/svm/x86_32/exits.S +++ b/xen/arch/x86/hvm/svm/x86_32/exits.S @@ -42,7 +42,7 @@ ENTRY(svm_asm_do_resume) movl VCPU_processor(%ebx),%eax shl $IRQSTAT_shift,%eax testl $~0,irq_stat(%eax,1) - jnz svm_process_softirqs + jnz .Lsvm_process_softirqs call svm_asid_handle_vmrun call svm_intr_assist @@ -52,8 +52,8 @@ ENTRY(svm_asm_do_resume) * instead of having a mostly taken branch over the unlikely code. */ cmpb $0,tb_init_done - jnz svm_trace -svm_trace_done: + jnz .Lsvm_trace +.Lsvm_trace_done: movl VCPU_svm_vmcb(%ebx),%ecx movl UREGS_eax(%esp),%eax @@ -108,7 +108,7 @@ svm_trace_done: #endif STGI -.globl svm_stgi_label; +.globl svm_stgi_label svm_stgi_label: movl %esp,%eax push %eax @@ -116,16 +116,11 @@ svm_stgi_label: addl $4,%esp jmp svm_asm_do_resume - ALIGN -svm_process_softirqs: +.Lsvm_process_softirqs: STGI call do_softirq jmp svm_asm_do_resume -svm_trace: - /* Call out to C, as this is not speed critical path - * Note: svm_trace_vmentry will recheck the tb_init_done, - * but this is on the slow path, so who cares - */ +.Lsvm_trace: call svm_trace_vmentry - jmp svm_trace_done + jmp .Lsvm_trace_done diff --git a/xen/arch/x86/hvm/svm/x86_64/exits.S b/xen/arch/x86/hvm/svm/x86_64/exits.S index 8c2f70a6bd..dccab2379a 100644 --- a/xen/arch/x86/hvm/svm/x86_64/exits.S +++ b/xen/arch/x86/hvm/svm/x86_64/exits.S @@ -43,7 +43,7 @@ ENTRY(svm_asm_do_resume) shl $IRQSTAT_shift,%rax leaq irq_stat(%rip),%rdx testl $~0,(%rdx,%rax,1) - jnz svm_process_softirqs + jnz .Lsvm_process_softirqs call svm_asid_handle_vmrun call svm_intr_assist @@ -53,8 +53,8 @@ ENTRY(svm_asm_do_resume) * instead of having a mostly taken branch over the unlikely code. */ cmpb $0,tb_init_done(%rip) - jnz svm_trace -svm_trace_done: + jnz .Lsvm_trace +.Lsvm_trace_done: movq VCPU_svm_vmcb(%rbx),%rcx movq UREGS_rax(%rsp),%rax @@ -127,22 +127,17 @@ svm_trace_done: #endif STGI -.globl svm_stgi_label; +.globl svm_stgi_label svm_stgi_label: movq %rsp,%rdi call svm_vmexit_handler jmp svm_asm_do_resume - ALIGN -svm_process_softirqs: +.Lsvm_process_softirqs: STGI call do_softirq jmp svm_asm_do_resume -svm_trace: - /* Call out to C, as this is not speed critical path - * Note: svm_trace_vmentry will recheck the tb_init_done, - * but this is on the slow path, so who cares - */ +.Lsvm_trace: call svm_trace_vmentry - jmp svm_trace_done + jmp .Lsvm_trace_done diff --git a/xen/arch/x86/hvm/vmx/x86_32/exits.S b/xen/arch/x86/hvm/vmx/x86_32/exits.S index eff089a112..fed13f4412 100644 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S @@ -58,10 +58,13 @@ popl %eax ALIGN -ENTRY(vmx_asm_vmexit_handler) +.globl vmx_asm_vmexit_handler +vmx_asm_vmexit_handler: HVM_SAVE_ALL_NOSEGREGS GET_CURRENT(%ebx) + movb $1,VCPU_vmx_launched(%ebx) + movl $GUEST_RIP,%eax VMREAD(UREGS_eip) movl $GUEST_RSP,%eax @@ -89,28 +92,21 @@ ENTRY(vmx_asm_vmexit_handler) push %eax call vmx_vmexit_handler addl $4,%esp - jmp vmx_asm_do_vmentry - ALIGN -vmx_process_softirqs: - sti - call do_softirq - jmp vmx_asm_do_vmentry - - ALIGN -ENTRY(vmx_asm_do_vmentry) +.globl vmx_asm_do_vmentry +vmx_asm_do_vmentry: GET_CURRENT(%ebx) cli # tests must not race interrupts movl VCPU_processor(%ebx),%eax shl $IRQSTAT_shift,%eax cmpl $0,irq_stat(%eax,1) - jnz vmx_process_softirqs + jnz .Lvmx_process_softirqs call vmx_intr_assist testb $0xff,VCPU_vmx_emul(%ebx) - jnz vmx_goto_realmode + jnz .Lvmx_goto_realmode movl VCPU_hvm_guest_cr2(%ebx),%eax movl %eax,%cr2 @@ -124,25 +120,28 @@ ENTRY(vmx_asm_do_vmentry) VMWRITE(UREGS_eflags) cmpb $0,VCPU_vmx_launched(%ebx) - je vmx_launch - -/*vmx_resume:*/ HVM_RESTORE_ALL_NOSEGREGS + je .Lvmx_launch + +/*.Lvmx_resume:*/ VMRESUME call vm_resume_fail ud2 -vmx_launch: - movb $1,VCPU_vmx_launched(%ebx) - HVM_RESTORE_ALL_NOSEGREGS +.Lvmx_launch: VMLAUNCH call vm_launch_fail ud2 -vmx_goto_realmode: +.Lvmx_goto_realmode: sti movl %esp,%eax push %eax call vmx_realmode addl $4,%esp jmp vmx_asm_do_vmentry + +.Lvmx_process_softirqs: + sti + call do_softirq + jmp vmx_asm_do_vmentry diff --git a/xen/arch/x86/hvm/vmx/x86_64/exits.S b/xen/arch/x86/hvm/vmx/x86_64/exits.S index 56fdb8ad54..b701dffaa9 100644 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S @@ -74,10 +74,13 @@ popq %rdi ALIGN -ENTRY(vmx_asm_vmexit_handler) +.globl vmx_asm_vmexit_handler +vmx_asm_vmexit_handler: HVM_SAVE_ALL_NOSEGREGS GET_CURRENT(%rbx) + movb $1,VCPU_vmx_launched(%rbx) + leaq UREGS_rip(%rsp),%rdi movl $GUEST_RIP,%eax /*VMREAD(UREGS_rip)*/ @@ -105,16 +108,9 @@ ENTRY(vmx_asm_vmexit_handler) movq %rsp,%rdi call vmx_vmexit_handler - jmp vmx_asm_do_vmentry - ALIGN -vmx_process_softirqs: - sti - call do_softirq - jmp vmx_asm_do_vmentry - - ALIGN -ENTRY(vmx_asm_do_vmentry) +.globl vmx_asm_do_vmentry +vmx_asm_do_vmentry: GET_CURRENT(%rbx) cli # tests must not race interrupts @@ -122,12 +118,12 @@ ENTRY(vmx_asm_do_vmentry) shl $IRQSTAT_shift,%rax leaq irq_stat(%rip),%rdx cmpl $0,(%rdx,%rax,1) - jnz vmx_process_softirqs + jnz .Lvmx_process_softirqs call vmx_intr_assist testb $0xff,VCPU_vmx_emul(%rbx) - jnz vmx_goto_realmode + jnz .Lvmx_goto_realmode movq VCPU_hvm_guest_cr2(%rbx),%rax movq %rax,%cr2 @@ -143,23 +139,27 @@ ENTRY(vmx_asm_do_vmentry) VMWRITE(UREGS_eflags) cmpb $0,VCPU_vmx_launched(%rbx) - je vmx_launch - -/*vmx_resume:*/ HVM_RESTORE_ALL_NOSEGREGS + je .Lvmx_launch + +/*.Lvmx_resume:*/ VMRESUME call vm_resume_fail ud2 -vmx_launch: - movb $1,VCPU_vmx_launched(%rbx) - HVM_RESTORE_ALL_NOSEGREGS +.Lvmx_launch: VMLAUNCH call vm_launch_fail ud2 -vmx_goto_realmode: +.Lvmx_goto_realmode: sti movq %rsp,%rdi call vmx_realmode jmp vmx_asm_do_vmentry + jmp vmx_asm_do_vmentry + +.Lvmx_process_softirqs: + sti + call do_softirq + jmp vmx_asm_do_vmentry