This enlightenment may improve performance of Windows guests running
on hosts with higher levels of (physical) CPU contention.
+=item B<apic_assist>
+
+This set incorporates use of the APIC assist page to avoid EOI of
+the local APIC.
+This enlightenment may improve performance of guests that make use of
+per-vcpu event channel upcall vectors.
+Note that this enlightenment will have no effect if the guest is
+using APICv posted interrupts.
+
=item B<defaults>
This is a special value that enables the default set of groups, which
-is currently the B<base>, B<freq> and B<time_ref_count> groups.
+is currently the B<base>, B<freq>, B<time_ref_count> and B<apic_assist>
+groups.
=item B<all>
*/
#define LIBXL_HAVE_SOFT_RESET 1
+/*
+ * LIBXL_HAVE_APIC_ASSIST indicates that the 'apic_assist' value
+ * is present in the viridian enlightenment enumeration.
+ */
+#define LIBXL_HAVE_APIC_ASSIST 1
+
/*
* libxl ABI compatibility
*
libxl_bitmap_set(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_BASE);
libxl_bitmap_set(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_FREQ);
libxl_bitmap_set(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_TIME_REF_COUNT);
+ libxl_bitmap_set(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_APIC_ASSIST);
}
libxl_for_each_set_bit(v, info->u.hvm.viridian_enable) {
if (libxl_bitmap_test(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_HCALL_REMOTE_TLB_FLUSH))
mask |= HVMPV_hcall_remote_tlb_flush;
+ if (libxl_bitmap_test(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_APIC_ASSIST))
+ mask |= HVMPV_apic_assist;
+
if (mask != 0 &&
xc_hvm_param_set(CTX->xch,
domid,
(2, "time_ref_count"),
(3, "reference_tsc"),
(4, "hcall_remote_tlb_flush"),
+ (5, "apic_assist"),
])
libxl_hdtype = Enumeration("hdtype", [
void *va;
/*
- * We don't yet make use of the APIC assist page but by setting
- * the CPUID3A_MSR_APIC_ACCESS bit in CPUID leaf 40000003 we are duty
- * bound to support the MSR. We therefore do just enough to keep windows
- * happy.
- *
* See section 13.3.4.1 of the specification for details of this
* enlightenment.
*/
*(uint32_t *)va = 0;
- v->arch.hvm_vcpu.viridian.apic_assist.va = va;
+ if ( viridian_feature_mask(v->domain) & HVMPV_apic_assist )
+ {
+ v->arch.hvm_vcpu.viridian.apic_assist.va = va;
+ v->arch.hvm_vcpu.viridian.apic_assist.vector = -1;
+ return;
+ }
+
+ unmap_domain_page_global(va);
+ put_page_and_type(page);
return;
fail:
put_page_and_type(page);
}
+void viridian_start_apic_assist(struct vcpu *v, int vector)
+{
+ uint32_t *va = v->arch.hvm_vcpu.viridian.apic_assist.va;
+
+ if ( !va )
+ return;
+
+ /*
+ * If there is already an assist pending then something has gone
+ * wrong and the VM will most likely hang so force a crash now
+ * to make the problem clear.
+ */
+ if ( v->arch.hvm_vcpu.viridian.apic_assist.vector >= 0 )
+ domain_crash(v->domain);
+
+ v->arch.hvm_vcpu.viridian.apic_assist.vector = vector;
+ *va |= 1u;
+}
+
+int viridian_complete_apic_assist(struct vcpu *v)
+{
+ uint32_t *va = v->arch.hvm_vcpu.viridian.apic_assist.va;
+ int vector;
+
+ if ( !va )
+ return -1;
+
+ if ( *va & 1u )
+ return -1; /* Interrupt not yet processed by the guest. */
+
+ vector = v->arch.hvm_vcpu.viridian.apic_assist.vector;
+ v->arch.hvm_vcpu.viridian.apic_assist.vector = -1;
+
+ return vector;
+}
+
+void viridian_abort_apic_assist(struct vcpu *v)
+{
+ uint32_t *va = v->arch.hvm_vcpu.viridian.apic_assist.va;
+
+ if ( !va )
+ return;
+
+ *va &= ~1u;
+ v->arch.hvm_vcpu.viridian.apic_assist.vector = -1;
+}
+
static void update_reference_tsc(struct domain *d, bool_t initialize)
{
unsigned long gmfn = d->arch.hvm_domain.viridian.reference_tsc.fields.pfn;
#include <asm/hvm/support.h>
#include <asm/hvm/vmx/vmx.h>
#include <asm/hvm/nestedhvm.h>
+#include <asm/hvm/viridian.h>
#include <public/hvm/ioreq.h>
#include <public/hvm/params.h>
return (fls(word[word_offset*4]) - 1) + (word_offset * 32);
}
+static int vlapic_find_lowest_vector(const void *bitmap)
+{
+ const uint32_t *word = bitmap;
+ unsigned int word_offset;
+
+ /* Work forwards through the bitmap (first 32-bit word in every four). */
+ for ( word_offset = 0; word_offset < NR_VECTORS / 32; word_offset++)
+ if ( word[word_offset * 4] )
+ return (ffs(word[word_offset * 4]) - 1) + (word_offset * 32);
+
+ return -1;
+}
/*
* IRR-specific bitmap update & search routines.
int vlapic_has_pending_irq(struct vcpu *v)
{
struct vlapic *vlapic = vcpu_vlapic(v);
- int irr, isr;
+ int irr, vector, isr;
if ( !vlapic_enabled(vlapic) )
return -1;
!nestedhvm_vcpu_in_guestmode(v) )
return irr;
+ /*
+ * If APIC assist was used then there may have been no EOI so
+ * we need to clear the requisite bit from the ISR here, before
+ * comparing with the IRR.
+ */
+ vector = viridian_complete_apic_assist(v);
+ if ( vector != -1 )
+ vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
+
isr = vlapic_find_highest_isr(vlapic);
isr = (isr != -1) ? isr : 0;
if ( (isr & 0xf0) >= (irr & 0xf0) )
+ {
+ /*
+ * There's already a higher priority vector pending so
+ * we need to abort any previous APIC assist to ensure there
+ * is an EOI.
+ */
+ viridian_abort_apic_assist(v);
return -1;
+ }
return irr;
}
int vlapic_ack_pending_irq(struct vcpu *v, int vector, bool_t force_ack)
{
struct vlapic *vlapic = vcpu_vlapic(v);
+ int isr;
- if ( force_ack || !vlapic_virtual_intr_delivery_enabled() )
- {
- vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
- vlapic_clear_irr(vector, vlapic);
- }
+ if ( !force_ack &&
+ vlapic_virtual_intr_delivery_enabled() )
+ return 1;
+
+ /* If there's no chance of using APIC assist then bail now. */
+ if ( !has_viridian_apic_assist(v->domain) ||
+ vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]) )
+ goto done;
+
+ isr = vlapic_find_lowest_vector(&vlapic->regs->data[APIC_ISR]);
+ if ( isr >= 0 && isr < vector )
+ goto done;
+
+ /*
+ * This vector is edge triggered and there are no lower priority
+ * vectors pending, so we can use APIC assist to avoid exiting
+ * for EOI.
+ */
+ viridian_start_apic_assist(v, vector);
+ done:
+ vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
+ vlapic_clear_irr(vector, vlapic);
return 1;
}
#define has_viridian_time_ref_count(d) \
(is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_time_ref_count))
+#define has_viridian_apic_assist(d) \
+ (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist))
+
void hvm_hypervisor_cpuid_leaf(uint32_t sub_idx,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
struct {
union viridian_apic_assist msr;
void *va;
+ int vector;
} apic_assist;
};
void viridian_vcpu_deinit(struct vcpu *v);
+void viridian_start_apic_assist(struct vcpu *v, int vector);
+int viridian_complete_apic_assist(struct vcpu *v);
+void viridian_abort_apic_assist(struct vcpu *v);
+
#endif /* __ASM_X86_HVM_VIRIDIAN_H__ */
/*
#define _HVMPV_hcall_remote_tlb_flush 4
#define HVMPV_hcall_remote_tlb_flush (1 << _HVMPV_hcall_remote_tlb_flush)
+/* Use APIC assist */
+#define _HVMPV_apic_assist 5
+#define HVMPV_apic_assist (1 << _HVMPV_apic_assist)
+
#define HVMPV_feature_mask \
(HVMPV_base_freq | \
HVMPV_no_freq | \
HVMPV_time_ref_count | \
HVMPV_reference_tsc | \
- HVMPV_hcall_remote_tlb_flush)
+ HVMPV_hcall_remote_tlb_flush | \
+ HVMPV_apic_assist)
#endif