]> xenbits.xensource.com Git - xen.git/commitdiff
x86, hvm: Assembly stub cleanups.
authorKeir Fraser <keir.fraser@citrix.com>
Mon, 21 Apr 2008 11:19:15 +0000 (12:19 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Mon, 21 Apr 2008 11:19:15 +0000 (12:19 +0100)
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/hvm/svm/x86_32/exits.S
xen/arch/x86/hvm/svm/x86_64/exits.S
xen/arch/x86/hvm/vmx/x86_32/exits.S
xen/arch/x86/hvm/vmx/x86_64/exits.S

index 83d893a90dbceed4925ec834b48eafd0b0e9f91e..b407558d7fdd16b5842eb011b3586d94b2c34198 100644 (file)
@@ -42,7 +42,7 @@ ENTRY(svm_asm_do_resume)
         movl VCPU_processor(%ebx),%eax
         shl  $IRQSTAT_shift,%eax
         testl $~0,irq_stat(%eax,1)
-        jnz  svm_process_softirqs
+        jnz  .Lsvm_process_softirqs
 
         call svm_asid_handle_vmrun
         call svm_intr_assist
@@ -52,8 +52,8 @@ ENTRY(svm_asm_do_resume)
          * instead of having a mostly taken branch over the unlikely code.
          */
         cmpb $0,tb_init_done
-        jnz  svm_trace
-svm_trace_done:
+        jnz  .Lsvm_trace
+.Lsvm_trace_done:
 
         movl VCPU_svm_vmcb(%ebx),%ecx
         movl UREGS_eax(%esp),%eax
@@ -108,7 +108,7 @@ svm_trace_done:
 #endif
 
         STGI
-.globl svm_stgi_label;
+.globl svm_stgi_label
 svm_stgi_label:
         movl %esp,%eax
         push %eax
@@ -116,16 +116,11 @@ svm_stgi_label:
         addl $4,%esp
         jmp  svm_asm_do_resume
 
-        ALIGN
-svm_process_softirqs:
+.Lsvm_process_softirqs:
         STGI
         call do_softirq
         jmp  svm_asm_do_resume
 
-svm_trace:
-        /* Call out to C, as this is not speed critical path
-         * Note: svm_trace_vmentry will recheck the tb_init_done,
-         * but this is on the slow path, so who cares 
-         */
+.Lsvm_trace:
         call svm_trace_vmentry
-        jmp  svm_trace_done
+        jmp  .Lsvm_trace_done
index 8c2f70a6bd2b4d79722279ff12e7fe248b9d599d..dccab2379a001d072383f96021e714873cfe5fb1 100644 (file)
@@ -43,7 +43,7 @@ ENTRY(svm_asm_do_resume)
         shl  $IRQSTAT_shift,%rax
         leaq irq_stat(%rip),%rdx
         testl $~0,(%rdx,%rax,1)
-        jnz  svm_process_softirqs
+        jnz  .Lsvm_process_softirqs
 
         call svm_asid_handle_vmrun
         call svm_intr_assist
@@ -53,8 +53,8 @@ ENTRY(svm_asm_do_resume)
          * instead of having a mostly taken branch over the unlikely code.
          */
         cmpb $0,tb_init_done(%rip)
-        jnz  svm_trace
-svm_trace_done:
+        jnz  .Lsvm_trace
+.Lsvm_trace_done:
 
         movq VCPU_svm_vmcb(%rbx),%rcx
         movq UREGS_rax(%rsp),%rax
@@ -127,22 +127,17 @@ svm_trace_done:
 #endif
 
         STGI
-.globl svm_stgi_label;
+.globl svm_stgi_label
 svm_stgi_label:
         movq %rsp,%rdi
         call svm_vmexit_handler
         jmp  svm_asm_do_resume
 
-        ALIGN
-svm_process_softirqs:
+.Lsvm_process_softirqs:
         STGI
         call do_softirq
         jmp  svm_asm_do_resume
 
-svm_trace:
-        /* Call out to C, as this is not speed critical path
-         * Note: svm_trace_vmentry will recheck the tb_init_done,
-         * but this is on the slow path, so who cares 
-         */
+.Lsvm_trace:
         call svm_trace_vmentry
-        jmp  svm_trace_done
+        jmp  .Lsvm_trace_done
index eff089a112730c7bb4feb5a71bf926cfa31cd4e4..fed13f44127f7a5af78795ee4108158c882f4e53 100644 (file)
         popl %eax
 
         ALIGN
-ENTRY(vmx_asm_vmexit_handler)
+.globl vmx_asm_vmexit_handler
+vmx_asm_vmexit_handler:
         HVM_SAVE_ALL_NOSEGREGS
         GET_CURRENT(%ebx)
 
+        movb $1,VCPU_vmx_launched(%ebx)
+
         movl $GUEST_RIP,%eax
         VMREAD(UREGS_eip)
         movl $GUEST_RSP,%eax
@@ -89,28 +92,21 @@ ENTRY(vmx_asm_vmexit_handler)
         push %eax
         call vmx_vmexit_handler
         addl $4,%esp
-        jmp vmx_asm_do_vmentry
 
-        ALIGN
-vmx_process_softirqs:
-        sti
-        call do_softirq
-        jmp vmx_asm_do_vmentry
-
-        ALIGN
-ENTRY(vmx_asm_do_vmentry)
+.globl vmx_asm_do_vmentry
+vmx_asm_do_vmentry:
         GET_CURRENT(%ebx)
         cli                             # tests must not race interrupts
 
         movl VCPU_processor(%ebx),%eax
         shl  $IRQSTAT_shift,%eax
         cmpl $0,irq_stat(%eax,1)
-        jnz  vmx_process_softirqs
+        jnz  .Lvmx_process_softirqs
 
         call vmx_intr_assist
 
         testb $0xff,VCPU_vmx_emul(%ebx)
-        jnz  vmx_goto_realmode
+        jnz  .Lvmx_goto_realmode
 
         movl VCPU_hvm_guest_cr2(%ebx),%eax
         movl %eax,%cr2
@@ -124,25 +120,28 @@ ENTRY(vmx_asm_do_vmentry)
         VMWRITE(UREGS_eflags)
 
         cmpb $0,VCPU_vmx_launched(%ebx)
-        je   vmx_launch
-
-/*vmx_resume:*/
         HVM_RESTORE_ALL_NOSEGREGS
+        je   .Lvmx_launch
+
+/*.Lvmx_resume:*/
         VMRESUME
         call vm_resume_fail
         ud2
 
-vmx_launch:
-        movb $1,VCPU_vmx_launched(%ebx)
-        HVM_RESTORE_ALL_NOSEGREGS
+.Lvmx_launch:
         VMLAUNCH
         call vm_launch_fail
         ud2
 
-vmx_goto_realmode:
+.Lvmx_goto_realmode:
         sti
         movl %esp,%eax
         push %eax
         call vmx_realmode
         addl $4,%esp
         jmp vmx_asm_do_vmentry
+
+.Lvmx_process_softirqs:
+        sti
+        call do_softirq
+        jmp vmx_asm_do_vmentry
index 56fdb8ad5443c0a3e8fab8b615b2cd62470757a4..b701dffaa9c95051cdc0fe1fa02107459851e7e0 100644 (file)
         popq %rdi
 
         ALIGN
-ENTRY(vmx_asm_vmexit_handler)
+.globl vmx_asm_vmexit_handler
+vmx_asm_vmexit_handler:
         HVM_SAVE_ALL_NOSEGREGS
         GET_CURRENT(%rbx)
 
+        movb $1,VCPU_vmx_launched(%rbx)
+
         leaq UREGS_rip(%rsp),%rdi
         movl $GUEST_RIP,%eax
         /*VMREAD(UREGS_rip)*/
@@ -105,16 +108,9 @@ ENTRY(vmx_asm_vmexit_handler)
 
         movq %rsp,%rdi
         call vmx_vmexit_handler
-        jmp vmx_asm_do_vmentry
 
-        ALIGN
-vmx_process_softirqs:
-        sti
-        call do_softirq
-        jmp vmx_asm_do_vmentry
-
-        ALIGN
-ENTRY(vmx_asm_do_vmentry)
+.globl vmx_asm_do_vmentry
+vmx_asm_do_vmentry:
         GET_CURRENT(%rbx)
         cli                             # tests must not race interrupts
 
@@ -122,12 +118,12 @@ ENTRY(vmx_asm_do_vmentry)
         shl   $IRQSTAT_shift,%rax
         leaq  irq_stat(%rip),%rdx
         cmpl  $0,(%rdx,%rax,1)
-        jnz   vmx_process_softirqs
+        jnz   .Lvmx_process_softirqs
 
         call vmx_intr_assist
 
         testb $0xff,VCPU_vmx_emul(%rbx)
-        jnz  vmx_goto_realmode
+        jnz  .Lvmx_goto_realmode
 
         movq VCPU_hvm_guest_cr2(%rbx),%rax
         movq %rax,%cr2
@@ -143,23 +139,27 @@ ENTRY(vmx_asm_do_vmentry)
         VMWRITE(UREGS_eflags)
 
         cmpb $0,VCPU_vmx_launched(%rbx)
-        je   vmx_launch
-
-/*vmx_resume:*/
         HVM_RESTORE_ALL_NOSEGREGS
+        je   .Lvmx_launch
+
+/*.Lvmx_resume:*/
         VMRESUME
         call vm_resume_fail
         ud2
 
-vmx_launch:
-        movb $1,VCPU_vmx_launched(%rbx)
-        HVM_RESTORE_ALL_NOSEGREGS
+.Lvmx_launch:
         VMLAUNCH
         call vm_launch_fail
         ud2
 
-vmx_goto_realmode:
+.Lvmx_goto_realmode:
         sti
         movq %rsp,%rdi
         call vmx_realmode
         jmp vmx_asm_do_vmentry
+        jmp vmx_asm_do_vmentry
+
+.Lvmx_process_softirqs:
+        sti
+        call do_softirq
+        jmp vmx_asm_do_vmentry