movl VCPU_processor(%ebx),%eax
shl $IRQSTAT_shift,%eax
testl $~0,irq_stat(%eax,1)
- jnz svm_process_softirqs
+ jnz .Lsvm_process_softirqs
call svm_asid_handle_vmrun
call svm_intr_assist
* instead of having a mostly taken branch over the unlikely code.
*/
cmpb $0,tb_init_done
- jnz svm_trace
-svm_trace_done:
+ jnz .Lsvm_trace
+.Lsvm_trace_done:
movl VCPU_svm_vmcb(%ebx),%ecx
movl UREGS_eax(%esp),%eax
#endif
STGI
-.globl svm_stgi_label;
+.globl svm_stgi_label
svm_stgi_label:
movl %esp,%eax
push %eax
addl $4,%esp
jmp svm_asm_do_resume
- ALIGN
-svm_process_softirqs:
+.Lsvm_process_softirqs:
STGI
call do_softirq
jmp svm_asm_do_resume
-svm_trace:
- /* Call out to C, as this is not speed critical path
- * Note: svm_trace_vmentry will recheck the tb_init_done,
- * but this is on the slow path, so who cares
- */
+.Lsvm_trace:
call svm_trace_vmentry
- jmp svm_trace_done
+ jmp .Lsvm_trace_done
shl $IRQSTAT_shift,%rax
leaq irq_stat(%rip),%rdx
testl $~0,(%rdx,%rax,1)
- jnz svm_process_softirqs
+ jnz .Lsvm_process_softirqs
call svm_asid_handle_vmrun
call svm_intr_assist
* instead of having a mostly taken branch over the unlikely code.
*/
cmpb $0,tb_init_done(%rip)
- jnz svm_trace
-svm_trace_done:
+ jnz .Lsvm_trace
+.Lsvm_trace_done:
movq VCPU_svm_vmcb(%rbx),%rcx
movq UREGS_rax(%rsp),%rax
#endif
STGI
-.globl svm_stgi_label;
+.globl svm_stgi_label
svm_stgi_label:
movq %rsp,%rdi
call svm_vmexit_handler
jmp svm_asm_do_resume
- ALIGN
-svm_process_softirqs:
+.Lsvm_process_softirqs:
STGI
call do_softirq
jmp svm_asm_do_resume
-svm_trace:
- /* Call out to C, as this is not speed critical path
- * Note: svm_trace_vmentry will recheck the tb_init_done,
- * but this is on the slow path, so who cares
- */
+.Lsvm_trace:
call svm_trace_vmentry
- jmp svm_trace_done
+ jmp .Lsvm_trace_done
popl %eax
ALIGN
-ENTRY(vmx_asm_vmexit_handler)
+.globl vmx_asm_vmexit_handler
+vmx_asm_vmexit_handler:
HVM_SAVE_ALL_NOSEGREGS
GET_CURRENT(%ebx)
+ movb $1,VCPU_vmx_launched(%ebx)
+
movl $GUEST_RIP,%eax
VMREAD(UREGS_eip)
movl $GUEST_RSP,%eax
push %eax
call vmx_vmexit_handler
addl $4,%esp
- jmp vmx_asm_do_vmentry
- ALIGN
-vmx_process_softirqs:
- sti
- call do_softirq
- jmp vmx_asm_do_vmentry
-
- ALIGN
-ENTRY(vmx_asm_do_vmentry)
+.globl vmx_asm_do_vmentry
+vmx_asm_do_vmentry:
GET_CURRENT(%ebx)
cli # tests must not race interrupts
movl VCPU_processor(%ebx),%eax
shl $IRQSTAT_shift,%eax
cmpl $0,irq_stat(%eax,1)
- jnz vmx_process_softirqs
+ jnz .Lvmx_process_softirqs
call vmx_intr_assist
testb $0xff,VCPU_vmx_emul(%ebx)
- jnz vmx_goto_realmode
+ jnz .Lvmx_goto_realmode
movl VCPU_hvm_guest_cr2(%ebx),%eax
movl %eax,%cr2
VMWRITE(UREGS_eflags)
cmpb $0,VCPU_vmx_launched(%ebx)
- je vmx_launch
-
-/*vmx_resume:*/
HVM_RESTORE_ALL_NOSEGREGS
+ je .Lvmx_launch
+
+/*.Lvmx_resume:*/
VMRESUME
call vm_resume_fail
ud2
-vmx_launch:
- movb $1,VCPU_vmx_launched(%ebx)
- HVM_RESTORE_ALL_NOSEGREGS
+.Lvmx_launch:
VMLAUNCH
call vm_launch_fail
ud2
-vmx_goto_realmode:
+.Lvmx_goto_realmode:
sti
movl %esp,%eax
push %eax
call vmx_realmode
addl $4,%esp
jmp vmx_asm_do_vmentry
+
+.Lvmx_process_softirqs:
+ sti
+ call do_softirq
+ jmp vmx_asm_do_vmentry
popq %rdi
ALIGN
-ENTRY(vmx_asm_vmexit_handler)
+.globl vmx_asm_vmexit_handler
+vmx_asm_vmexit_handler:
HVM_SAVE_ALL_NOSEGREGS
GET_CURRENT(%rbx)
+ movb $1,VCPU_vmx_launched(%rbx)
+
leaq UREGS_rip(%rsp),%rdi
movl $GUEST_RIP,%eax
/*VMREAD(UREGS_rip)*/
movq %rsp,%rdi
call vmx_vmexit_handler
- jmp vmx_asm_do_vmentry
- ALIGN
-vmx_process_softirqs:
- sti
- call do_softirq
- jmp vmx_asm_do_vmentry
-
- ALIGN
-ENTRY(vmx_asm_do_vmentry)
+.globl vmx_asm_do_vmentry
+vmx_asm_do_vmentry:
GET_CURRENT(%rbx)
cli # tests must not race interrupts
shl $IRQSTAT_shift,%rax
leaq irq_stat(%rip),%rdx
cmpl $0,(%rdx,%rax,1)
- jnz vmx_process_softirqs
+ jnz .Lvmx_process_softirqs
call vmx_intr_assist
testb $0xff,VCPU_vmx_emul(%rbx)
- jnz vmx_goto_realmode
+ jnz .Lvmx_goto_realmode
movq VCPU_hvm_guest_cr2(%rbx),%rax
movq %rax,%cr2
VMWRITE(UREGS_eflags)
cmpb $0,VCPU_vmx_launched(%rbx)
- je vmx_launch
-
-/*vmx_resume:*/
HVM_RESTORE_ALL_NOSEGREGS
+ je .Lvmx_launch
+
+/*.Lvmx_resume:*/
VMRESUME
call vm_resume_fail
ud2
-vmx_launch:
- movb $1,VCPU_vmx_launched(%rbx)
- HVM_RESTORE_ALL_NOSEGREGS
+.Lvmx_launch:
VMLAUNCH
call vm_launch_fail
ud2
-vmx_goto_realmode:
+.Lvmx_goto_realmode:
sti
movq %rsp,%rdi
call vmx_realmode
jmp vmx_asm_do_vmentry
+ jmp vmx_asm_do_vmentry
+
+.Lvmx_process_softirqs:
+ sti
+ call do_softirq
+ jmp vmx_asm_do_vmentry