static void cf_check vmx_sync_pir_to_irr(struct vcpu *v)
{
- struct vlapic *vlapic = vcpu_vlapic(v);
- unsigned int group, i;
- DECLARE_BITMAP(pending_intr, X86_IDT_VECTORS);
+ struct pi_desc *desc = &v->arch.hvm.vmx.pi_desc;
+ union {
+ unsigned long _ul[X86_IDT_VECTORS / BITS_PER_LONG];
+ uint32_t _32[X86_IDT_VECTORS / (sizeof(uint32_t) * 8)];
+ } vec;
+ uint32_t *irr;
+ bool on;
- if ( !pi_test_and_clear_on(&v->arch.hvm.vmx.pi_desc) )
+ /*
+ * The PIR is a contended cacheline which bounces between the CPU(s) and
+ * IOMMU(s). An IOMMU updates the entire PIR atomically, but we can't
+ * express the same on the CPU side, so care has to be taken.
+ *
+ * First, do a plain read of ON. If the PIR hasn't been modified, this
+ * will keep the cacheline Shared and not pull it Exclusive on the current
+ * CPU.
+ */
+ if ( !pi_test_on(desc) )
return;
- for ( group = 0; group < ARRAY_SIZE(pending_intr); group++ )
- pending_intr[group] = pi_get_pir(&v->arch.hvm.vmx.pi_desc, group);
+ /*
+ * Second, if the plain read said that ON was set, we must clear it with
+ * an atomic action. This will bring the cacheline to Exclusive on the
+ * current CPU.
+ *
+ * This should always succeed because no-one else should be playing with
+ * the PIR behind our back, but assert so just in case.
+ */
+ on = pi_test_and_clear_on(desc);
+ ASSERT(on);
+
+ /*
+ * The cacheline will have become Exclusive on the current CPU, and
+ * because ON was set, some other entity (an IOMMU, or Xen on another CPU)
+ * has indicated that the PIR needs re-scanning.
+ *
+ * Note: Entities which can't update the entire cacheline atomically
+ * (i.e. Xen on another CPU) are required to update PIR first, then
+ * set ON. Therefore, there is a corner case where we may have
+ * found and processed the PIR updates "last time around" and only
+ * found ON this time around. This is fine; the logic still
+ * operates correctly.
+ *
+ * Atomically read and clear the entire pending bitmap as fast as we can,
+ * to reduce the window where another entity may steal the cacheline back
+ * from us. This is a performance concern, not a correctness concern; if
+ * another entity does steal the cacheline, we'll just wait for it to
+ * return.
+ */
+ for ( unsigned int i = 0; i < ARRAY_SIZE(vec._ul); ++i )
+ vec._ul[i] = xchg(&desc->pir[i], 0);
+
+ /*
+ * Finally, merge the pending vectors into IRR. The IRR register is
+ * scattered in memory, so we have to do this 32 bits at a time.
+ */
+ irr = (uint32_t *)&vcpu_vlapic(v)->regs->data[APIC_IRR];
+ for ( unsigned int i = 0; i < ARRAY_SIZE(vec._32); ++i )
+ {
+ if ( !vec._32[i] )
+ continue;
- bitmap_for_each ( i, pending_intr, X86_IDT_VECTORS )
- vlapic_set_vector(i, &vlapic->regs->data[APIC_IRR]);
+ asm ( "lock or %[val], %[irr]"
+ : [irr] "+m" (irr[i * 4])
+ : [val] "r" (vec._32[i]) );
+ }
}
static bool cf_check vmx_test_pir(const struct vcpu *v, uint8_t vec)