struct hvm_domain *plat = &v->domain->arch.hvm;
int vector;
+ /*
+ * Always call vlapic_sync_pir_to_irr so that PIR is synced into IRR when
+ * using posted interrupts. Note this is also done by
+ * vlapic_has_pending_irq but depending on which interrupts are pending
+ * hvm_vcpu_has_pending_irq will return early without calling
+ * vlapic_has_pending_irq.
+ */
+ vlapic_sync_pir_to_irr(v);
+
if ( unlikely(v->nmi_pending) )
return hvm_intack_nmi;
vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]);
}
-static void sync_pir_to_irr(struct vcpu *v)
-{
- if ( hvm_funcs.sync_pir_to_irr )
- alternative_vcall(hvm_funcs.sync_pir_to_irr, v);
-}
-
static int vlapic_find_highest_irr(struct vlapic *vlapic)
{
- sync_pir_to_irr(vlapic_vcpu(vlapic));
+ vlapic_sync_pir_to_irr(vlapic_vcpu(vlapic));
return vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]);
}
if ( !has_vlapic(v->domain) )
return 0;
- sync_pir_to_irr(v);
+ vlapic_sync_pir_to_irr(v);
return hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, vcpu_vlapic(v)->regs);
}
const struct vlapic *target, const struct vlapic *source,
int short_hand, uint32_t dest, bool_t dest_mode);
+static inline void vlapic_sync_pir_to_irr(struct vcpu *v)
+{
+ if ( hvm_funcs.sync_pir_to_irr )
+ alternative_vcall(hvm_funcs.sync_pir_to_irr, v);
+}
+
#endif /* __ASM_X86_HVM_VLAPIC_H__ */