]> xenbits.xensource.com Git - xen.git/commitdiff
x86: don't rely on __softirq_pending to be the first field in irq_cpustat_t
authorJan Beulich <jbeulich@suse.com>
Mon, 4 Mar 2013 09:20:57 +0000 (10:20 +0100)
committerJan Beulich <jbeulich@suse.com>
Mon, 4 Mar 2013 09:20:57 +0000 (10:20 +0100)
This is even more so as the field doesn't have a comment to that effect
in the structure definition.

Once modifying the respective assembly code, also convert the
IRQSTAT_shift users to do a 32-bit shift only (as we won't support 48M
CPUs any time soon) and use "cmpl" instead of "testl" when checking the
field (both reducing code size).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
xen/arch/x86/hvm/svm/entry.S
xen/arch/x86/hvm/vmx/entry.S
xen/arch/x86/x86_64/asm-offsets.c
xen/arch/x86/x86_64/compat/entry.S
xen/arch/x86/x86_64/entry.S

index ada71d29f820d2f6d7f0a10967a4ab3429828c93..196962977425034b9007a9063821c0b991051578 100644 (file)
@@ -41,10 +41,10 @@ ENTRY(svm_asm_do_resume)
         CLGI
 
         mov  VCPU_processor(%rbx),%eax
-        shl  $IRQSTAT_shift,%rax
-        lea  irq_stat(%rip),%rdx
-        testl $~0,(%rdx,%rax,1)
-        jnz  .Lsvm_process_softirqs
+        shl  $IRQSTAT_shift,%eax
+        lea  irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
+        cmpl $0,(%rdx,%rax,1)
+        jne  .Lsvm_process_softirqs
 
         testb $0, VCPU_nsvm_hap_enabled(%rbx)
 UNLIKELY_START(nz, nsvm_hap)
index f0024d34fa00fd172f571f3ce1f345adbe1ea117..496a62c0964c6be6bc5e1f05a78bab645786c337 100644 (file)
@@ -97,8 +97,8 @@ vmx_asm_do_vmentry:
         cli
 
         mov  VCPU_processor(%rbx),%eax
-        shl  $IRQSTAT_shift,%rax
-        lea  irq_stat(%rip),%rdx
+        shl  $IRQSTAT_shift,%eax
+        lea  irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
         cmpl $0,(%rdx,%rax,1)
         jnz  .Lvmx_process_softirqs
 
index 6dc832c01c4f066c3a574ad26ce81361a579e5a8..b0098b35a4664612e65a1502b0cebf18af22200a 100644 (file)
@@ -156,6 +156,7 @@ void __dummy__(void)
 #endif
 
     DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
+    OFFSET(IRQSTAT_softirq_pending, irq_cpustat_t, __softirq_pending);
     BLANK();
 
     OFFSET(CPUINFO86_ext_features, struct cpuinfo_x86, x86_capability[1]);
index 7769019e2769bf2906fb24285f0caa2424261259..c0afe2cca5199737ae0bd3898d9ba33464ae3393 100644 (file)
@@ -96,10 +96,10 @@ ENTRY(compat_test_all_events)
         cli                             # tests must not race interrupts
 /*compat_test_softirqs:*/
         movl  VCPU_processor(%rbx),%eax
-        shlq  $IRQSTAT_shift,%rax
-        leaq  irq_stat(%rip),%rcx
-        testl $~0,(%rcx,%rax,1)
-        jnz   compat_process_softirqs
+        shll  $IRQSTAT_shift,%eax
+        leaq  irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
+        cmpl  $0,(%rcx,%rax,1)
+        jne   compat_process_softirqs
         testb $1,VCPU_mce_pending(%rbx)
         jnz   compat_process_mce
 .Lcompat_test_guest_nmi:
index 82d98819457d43fe36fd59a00459f72cb0b724c7..03e352bda3c96f53b05103d4c37cdcf4d6988f3e 100644 (file)
@@ -195,8 +195,8 @@ test_all_events:
         cli                             # tests must not race interrupts
 /*test_softirqs:*/  
         movl  VCPU_processor(%rbx),%eax
-        shl   $IRQSTAT_shift,%rax
-        leaq  irq_stat(%rip),%rcx
+        shll  $IRQSTAT_shift,%eax
+        leaq  irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
         cmpl  $0,(%rcx,%rax,1)
         jne   process_softirqs
         testb $1,VCPU_mce_pending(%rbx)
@@ -643,7 +643,7 @@ handle_ist_exception:
         /* Send an IPI to ourselves to cover for the lack of event checking. */
         movl  VCPU_processor(%rbx),%eax
         shll  $IRQSTAT_shift,%eax
-        leaq  irq_stat(%rip),%rcx
+        leaq  irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
         cmpl  $0,(%rcx,%rax,1)
         je    1f
         movl  $EVENT_CHECK_VECTOR,%edi