]> xenbits.xensource.com Git - people/sstabellini/xen-unstable.git/.git/commitdiff
x86/vmx: always sync PIR to IRR before vmentry
authorRoger Pau Monné <roger.pau@citrix.com>
Thu, 28 Nov 2019 10:58:25 +0000 (11:58 +0100)
committerJan Beulich <jbeulich@suse.com>
Thu, 28 Nov 2019 10:58:25 +0000 (11:58 +0100)
When using posted interrupts on Intel hardware it's possible that the
vCPU resumes execution with a stale local APIC IRR register because
depending on the interrupts to be injected vlapic_has_pending_irq
might not be called, and thus PIR won't be synced into IRR.

Fix this by making sure PIR is always synced to IRR in
hvm_vcpu_has_pending_irq regardless of what interrupts are pending.

Reported-by: Joe Jin <joe.jin@oracle.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Tested-by: Joe Jin <joe.jin@oracle.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Release-acked-by: Juergen Gross <jgross@suse.com>
xen/arch/x86/hvm/irq.c
xen/arch/x86/hvm/vlapic.c
xen/include/asm-x86/hvm/vlapic.h

index e03a87ad50135903acb829be9f02e2cd1f2f823d..c684422b249cc51ffdafc608a52f77b0b6772ef5 100644 (file)
@@ -517,6 +517,15 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
     struct hvm_domain *plat = &v->domain->arch.hvm;
     int vector;
 
+    /*
+     * Always call vlapic_sync_pir_to_irr so that PIR is synced into IRR when
+     * using posted interrupts. Note this is also done by
+     * vlapic_has_pending_irq but depending on which interrupts are pending
+     * hvm_vcpu_has_pending_irq will return early without calling
+     * vlapic_has_pending_irq.
+     */
+    vlapic_sync_pir_to_irr(v);
+
     if ( unlikely(v->nmi_pending) )
         return hvm_intack_nmi;
 
index b790ba6bbd6c249a682ea6b728e9a8654e1177c7..9b8afb72e84fdc1cc19c3522286f79a2c0365697 100644 (file)
@@ -106,15 +106,9 @@ static void vlapic_clear_irr(int vector, struct vlapic *vlapic)
     vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]);
 }
 
-static void sync_pir_to_irr(struct vcpu *v)
-{
-    if ( hvm_funcs.sync_pir_to_irr )
-        alternative_vcall(hvm_funcs.sync_pir_to_irr, v);
-}
-
 static int vlapic_find_highest_irr(struct vlapic *vlapic)
 {
-    sync_pir_to_irr(vlapic_vcpu(vlapic));
+    vlapic_sync_pir_to_irr(vlapic_vcpu(vlapic));
 
     return vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]);
 }
@@ -1494,7 +1488,7 @@ static int lapic_save_regs(struct vcpu *v, hvm_domain_context_t *h)
     if ( !has_vlapic(v->domain) )
         return 0;
 
-    sync_pir_to_irr(v);
+    vlapic_sync_pir_to_irr(v);
 
     return hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, vcpu_vlapic(v)->regs);
 }
index dde66b4f0fc446c0ebd8239f0af86ce07274b68e..f0d5e3fbc9abfc4a738752236827037cdfca1a89 100644 (file)
@@ -150,4 +150,10 @@ bool_t vlapic_match_dest(
     const struct vlapic *target, const struct vlapic *source,
     int short_hand, uint32_t dest, bool_t dest_mode);
 
+static inline void vlapic_sync_pir_to_irr(struct vcpu *v)
+{
+    if ( hvm_funcs.sync_pir_to_irr )
+        alternative_vcall(hvm_funcs.sync_pir_to_irr, v);
+}
+
 #endif /* __ASM_X86_HVM_VLAPIC_H__ */