*/
#define LIBXL_HAVE_VIRIDIAN_CRASH_CTL 1
+/*
+ * LIBXL_HAVE_VIRIDIAN_STIMER indicates that the 'stimer' value
+ * is present in the viridian enlightenment enumeration.
+ */
+#define LIBXL_HAVE_VIRIDIAN_STIMER 1
+
/*
* LIBXL_HAVE_BUILDINFO_HVM_ACPI_LAPTOP_SLATE indicates that
* libxl_domain_build_info has the u.hvm.acpi_laptop_slate field.
if (libxl_bitmap_test(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_CRASH_CTL))
mask |= HVMPV_crash_ctl;
+ if (libxl_bitmap_test(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_SYNIC))
+ mask |= HVMPV_stimer;
+
if (mask != 0 &&
xc_hvm_param_set(CTX->xch,
domid,
(4, "hcall_remote_tlb_flush"),
(5, "apic_assist"),
(6, "crash_ctl"),
+ (7, "stimer"),
])
libxl_hdtype = Enumeration("hdtype", [
void arch_domain_shutdown(struct domain *d)
{
- if ( has_viridian_time_ref_count(d) )
- viridian_time_ref_count_freeze(d);
+ if ( is_viridian_domain(d) )
+ viridian_time_domain_freeze(d);
}
void arch_domain_pause(struct domain *d)
{
- if ( has_viridian_time_ref_count(d) )
- viridian_time_ref_count_freeze(d);
+ if ( is_viridian_domain(d) )
+ viridian_time_domain_freeze(d);
}
void arch_domain_unpause(struct domain *d)
{
if ( has_viridian_time_ref_count(d) )
- viridian_time_ref_count_thaw(d);
+ viridian_time_domain_thaw(d);
}
int arch_domain_soft_reset(struct domain *d)
int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val);
int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val);
+bool viridian_synic_deliver_timer_msg(struct vcpu *v, unsigned int sintx,
+ uint64_t index, uint64_t expiration,
+ uint64_t delivery);
+
+void viridian_synic_vcpu_init(struct vcpu *v);
+void viridian_synic_vcpu_deinit(struct vcpu *v);
+
void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
struct hvm_viridian_vcpu_context *ctxt);
void viridian_synic_load_vcpu_ctxt(
int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val);
int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val);
+void viridian_time_poll_messages(struct vcpu *v);
+
+void viridian_time_vcpu_init(struct vcpu *v);
+void viridian_time_vcpu_deinit(struct vcpu *v);
+
+void viridian_time_save_vcpu_ctxt(const struct vcpu *v,
+ struct hvm_viridian_vcpu_context *ctxt);
+void viridian_time_load_vcpu_ctxt(
+ struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt);
+
void viridian_time_save_domain_ctxt(
const struct domain *d, struct hvm_viridian_domain_context *ctxt);
void viridian_time_load_domain_ctxt(
#include <xen/domain_page.h>
#include <xen/hypercall.h>
+#include <xen/nospec.h>
#include <xen/sched.h>
#include <xen/version.h>
#include <asm/apic.h>
#include <asm/hvm/support.h>
+#include <asm/hvm/vlapic.h>
#include "private.h"
uint8_t ReservedZBytePadding[PAGE_SIZE];
} HV_VP_ASSIST_PAGE;
+typedef enum HV_MESSAGE_TYPE {
+ HvMessageTypeNone,
+ HvMessageTimerExpired = 0x80000010,
+} HV_MESSAGE_TYPE;
+
+typedef struct HV_MESSAGE_FLAGS {
+ uint8_t MessagePending:1;
+ uint8_t Reserved:7;
+} HV_MESSAGE_FLAGS;
+
+typedef struct HV_MESSAGE_HEADER {
+ HV_MESSAGE_TYPE MessageType;
+ uint16_t Reserved1;
+ HV_MESSAGE_FLAGS MessageFlags;
+ uint8_t PayloadSize;
+ uint64_t Reserved2;
+} HV_MESSAGE_HEADER;
+
+#define HV_MESSAGE_SIZE 256
+#define HV_MESSAGE_MAX_PAYLOAD_QWORD_COUNT 30
+
+typedef struct HV_MESSAGE {
+ HV_MESSAGE_HEADER Header;
+ uint64_t Payload[HV_MESSAGE_MAX_PAYLOAD_QWORD_COUNT];
+} HV_MESSAGE;
+
void viridian_apic_assist_set(struct vcpu *v)
{
HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian->vp_assist.ptr;
int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
{
+ struct domain *d = v->domain;
+
switch ( idx )
{
case HV_X64_MSR_EOI:
viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
break;
+ case HV_X64_MSR_SCONTROL:
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ v->arch.hvm.viridian->scontrol = val;
+ break;
+
+ case HV_X64_MSR_SVERSION:
+ return X86EMUL_EXCEPTION;
+
+ case HV_X64_MSR_SIEFP:
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ v->arch.hvm.viridian->siefp = val;
+ break;
+
+ case HV_X64_MSR_SIMP:
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ viridian_unmap_guest_page(&v->arch.hvm.viridian->simp);
+ v->arch.hvm.viridian->simp.msr.raw = val;
+ viridian_dump_guest_page(v, "SIMP", &v->arch.hvm.viridian->simp);
+ if ( v->arch.hvm.viridian->simp.msr.fields.enabled )
+ viridian_map_guest_page(v, &v->arch.hvm.viridian->simp);
+ break;
+
+ case HV_X64_MSR_EOM:
+ {
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ v->arch.hvm.viridian->msg_pending = 0;
+ break;
+ }
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+ {
+ unsigned int sintx = idx - HV_X64_MSR_SINT0;
+ uint8_t vector = v->arch.hvm.viridian->sint[sintx].fields.vector;
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ /*
+ * Invalidate any previous mapping by setting an out-of-range
+ * index.
+ */
+ v->arch.hvm.viridian->vector_to_sintx[vector] =
+ ARRAY_SIZE(v->arch.hvm.viridian->sint);
+
+ v->arch.hvm.viridian->sint[sintx].raw = val;
+
+ /* Vectors must be in the range 16-255 inclusive */
+ vector = v->arch.hvm.viridian->sint[sintx].fields.vector;
+ if ( vector < 16 )
+ return X86EMUL_EXCEPTION;
+
+ printk(XENLOG_G_INFO "%pv: VIRIDIAN SINT%u: vector: %x\n", v, sintx,
+ vector);
+ v->arch.hvm.viridian->vector_to_sintx[vector] = sintx;
+
+ if ( v->arch.hvm.viridian->sint[sintx].fields.polling )
+ clear_bit(sintx, &v->arch.hvm.viridian->msg_pending);
+
+ break;
+ }
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n",
__func__, idx, val);
int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
{
+ struct domain *d = v->domain;
+
switch ( idx )
{
case HV_X64_MSR_EOI:
*val = v->arch.hvm.viridian->vp_assist.msr.raw;
break;
+ case HV_X64_MSR_SCONTROL:
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = v->arch.hvm.viridian->scontrol;
+ break;
+
+ case HV_X64_MSR_SVERSION:
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ /*
+ * The specification says that the version number is 0x00000001
+ * and should be in the lower 32-bits of the MSR, while the
+ * upper 32-bits are reserved... but it doesn't say what they
+ * should be set to. Assume everything but the bottom bit
+ * should be zero.
+ */
+ *val = 1ul;
+ break;
+
+ case HV_X64_MSR_SIEFP:
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = v->arch.hvm.viridian->siefp;
+ break;
+
+ case HV_X64_MSR_SIMP:
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = v->arch.hvm.viridian->simp.msr.raw;
+ break;
+
+ case HV_X64_MSR_EOM:
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = 0;
+ break;
+
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+ {
+ unsigned int sintx = idx - HV_X64_MSR_SINT0;
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = v->arch.hvm.viridian->sint[sintx].raw;
+ break;
+ }
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x\n", __func__, idx);
return X86EMUL_EXCEPTION;
return X86EMUL_OKAY;
}
+bool viridian_synic_deliver_timer_msg(struct vcpu *v, unsigned int sintx,
+ uint64_t index, uint64_t expiration,
+ uint64_t delivery)
+{
+ const union viridian_sint_msr *vs = &v->arch.hvm.viridian->sint[sintx];
+ HV_MESSAGE *msg = v->arch.hvm.viridian->simp.ptr;
+ struct {
+ uint32_t TimerIndex;
+ uint32_t Reserved;
+ uint64_t ExpirationTime;
+ uint64_t DeliveryTime;
+ } payload = {
+ .TimerIndex = index,
+ .ExpirationTime = expiration,
+ .DeliveryTime = delivery,
+ };
+
+ if ( test_bit(sintx, &v->arch.hvm.viridian->msg_pending) )
+ return false;
+
+ BUILD_BUG_ON(sizeof(*msg) != HV_MESSAGE_SIZE);
+ msg += sintx;
+
+ if ( msg->Header.MessageType != HvMessageTypeNone )
+ {
+ msg->Header.MessageFlags.MessagePending = 1;
+ set_bit(sintx, &v->arch.hvm.viridian->msg_pending);
+ return false;
+ }
+
+ msg->Header.MessageType = HvMessageTimerExpired;
+ msg->Header.MessageFlags.MessagePending = 0;
+ msg->Header.PayloadSize = sizeof(payload);
+ memcpy(msg->Payload, &payload, sizeof(payload));
+
+ if ( !vs->fields.mask )
+ vlapic_set_irq(vcpu_vlapic(v), vs->fields.vector, 0);
+
+ return true;
+}
+
+bool viridian_is_auto_eoi_sint(struct vcpu *v, uint8_t vector)
+{
+ int sintx = v->arch.hvm.viridian->vector_to_sintx[vector];
+
+ if ( sintx >= ARRAY_SIZE(v->arch.hvm.viridian->sint) )
+ return false;
+
+ return v->arch.hvm.viridian->sint[sintx].fields.auto_eoi;
+}
+
+void viridian_ack_sint(struct vcpu *v, uint8_t vector)
+{
+ int sintx = v->arch.hvm.viridian->vector_to_sintx[vector];
+
+ if ( sintx < ARRAY_SIZE(v->arch.hvm.viridian->sint) )
+ clear_bit(sintx, &v->arch.hvm.viridian->msg_pending);
+}
+
+void viridian_synic_vcpu_init(struct vcpu *v)
+{
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->sint); i++ )
+ v->arch.hvm.viridian->sint[i].fields.mask = 1;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->vector_to_sintx); i++ )
+ v->arch.hvm.viridian->vector_to_sintx[i] =
+ ARRAY_SIZE(v->arch.hvm.viridian->sint);
+}
+
+void viridian_synic_vcpu_deinit(struct vcpu *v)
+{
+ viridian_unmap_guest_page(&v->arch.hvm.viridian->vp_assist);
+ viridian_unmap_guest_page(&v->arch.hvm.viridian->simp);
+}
+
void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
struct hvm_viridian_vcpu_context *ctxt)
{
+ unsigned int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(v->arch.hvm.viridian->sint) !=
+ ARRAY_SIZE(ctxt->sint_msr));
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->sint); i++ )
+ ctxt->sint_msr[i] = v->arch.hvm.viridian->sint[i].raw;
+
+ ctxt->simp_msr = v->arch.hvm.viridian->simp.msr.raw;
+
ctxt->apic_assist_pending = v->arch.hvm.viridian->apic_assist_pending;
ctxt->vp_assist_msr = v->arch.hvm.viridian->vp_assist.msr.raw;
}
void viridian_synic_load_vcpu_ctxt(
struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
{
+ unsigned int i;
+
v->arch.hvm.viridian->vp_assist.msr.raw = ctxt->vp_assist_msr;
if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
v->arch.hvm.viridian->apic_assist_pending = ctxt->apic_assist_pending;
+
+ v->arch.hvm.viridian->simp.msr.raw = ctxt->simp_msr;
+ if ( v->arch.hvm.viridian->simp.msr.fields.enabled )
+ viridian_map_guest_page(v, &v->arch.hvm.viridian->simp);
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->sint); i++ )
+ {
+ uint8_t vector;
+
+ v->arch.hvm.viridian->sint[i].raw = ctxt->sint_msr[i];
+
+ vector = v->arch.hvm.viridian->sint[i].fields.vector;
+ if ( vector < 16 )
+ continue;
+
+ v->arch.hvm.viridian->vector_to_sintx[vector] = i;
+ }
}
/*
#include <xen/version.h>
#include <asm/apic.h>
+#include <asm/event.h>
#include <asm/hvm/support.h>
#include "private.h"
return scale_delta(tsc, &tsc_to_ns) / 100ul;
}
-void viridian_time_ref_count_freeze(struct domain *d)
+static int64_t time_ref_count(struct domain *d)
+{
+ struct viridian_time_ref_count *trc =
+ &d->arch.hvm.viridian->time_ref_count;
+
+ return raw_trc_val(d) + trc->off;
+}
+
+static void time_ref_count_freeze(struct domain *d)
{
struct viridian_time_ref_count *trc =
&d->arch.hvm.viridian->time_ref_count;
trc->val = raw_trc_val(d) + trc->off;
}
-void viridian_time_ref_count_thaw(struct domain *d)
+static void time_ref_count_thaw(struct domain *d)
{
struct viridian_time_ref_count *trc =
&d->arch.hvm.viridian->time_ref_count;
trc->off = (int64_t)trc->val - raw_trc_val(d);
}
+static void stop_stimer(struct viridian_stimer *vs)
+{
+ struct vcpu *v = vs->v;
+ unsigned int stimerx = vs - &v->arch.hvm.viridian->stimer[0];
+
+ if ( !vs->started )
+ return;
+
+ stop_timer(&vs->timer);
+ clear_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+ vs->started = false;
+}
+
+static void stimer_expire(void *data)
+{
+ struct viridian_stimer *vs = data;
+ struct vcpu *v = vs->v;
+ unsigned int stimerx = vs - &v->arch.hvm.viridian->stimer[0];
+
+ if ( !vs->config.fields.enabled )
+ return;
+
+ set_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+ vcpu_kick(v);
+}
+
+static void start_stimer(struct viridian_stimer *vs)
+{
+ struct vcpu *v = vs->v;
+ unsigned int stimerx = vs - &v->arch.hvm.viridian->stimer[0];
+ int64_t now = time_ref_count(v->domain);
+ s_time_t timeout;
+
+ if ( !test_and_set_bit(stimerx, &v->arch.hvm.viridian->stimer_enabled) )
+ printk(XENLOG_G_INFO "%pv: VIRIDIAN STIMER%u: enabled\n", v,
+ stimerx);
+
+ if ( vs->config.fields.periodic )
+ {
+ if ( vs->started )
+ {
+ unsigned int missed = 0;
+ uint64_t next;
+
+ /* Advance the timer expiration by one tick */
+ vs->expiration += vs->count;
+
+ /* Check to see if any expirations have been missed */
+ next = vs->expiration;
+ while (next <= now)
+ {
+ next += vs->count;
+ missed++;
+ }
+
+ /*
+ * The specification says that if the timer is lazy then we
+ * skip over any missed expirations otherwise a non-zero
+ * missed count should be used to reduce the period of the
+ * timer until it catches up, unless the count has reached a
+ * 'significant number', in which case the timer should also
+ * be treated as lazy. Unfortunately the specification does
+ * not state what that number is so the choice of number here
+ * is a pure guess.
+ */
+ if ( vs->config.fields.lazy || missed > 3 )
+ {
+ missed = 0;
+ vs->expiration = next;
+ }
+
+ timeout = ((next - now) * 100ull) / (missed + 1);
+ }
+ else
+ {
+ vs->expiration = now + vs->count;
+ timeout = (vs->expiration - now) * 100ull;
+ }
+ }
+ else
+ {
+ vs->expiration = vs->count;
+ if (vs->count <= now)
+ {
+ set_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+ return;
+ }
+
+ timeout = (vs->expiration - now) * 100ull;
+ }
+
+ vs->started = true;
+ migrate_timer(&vs->timer, smp_processor_id());
+ set_timer(&vs->timer, timeout + NOW());
+}
+
+static void poll_stimer(struct vcpu *v, unsigned int stimerx)
+{
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[stimerx];
+
+ if ( !test_bit(stimerx, &v->arch.hvm.viridian->stimer_pending) )
+ return;
+
+ if ( !viridian_synic_deliver_timer_msg(v, vs->config.fields.sintx,
+ stimerx, vs->expiration,
+ time_ref_count(v->domain)) )
+ return;
+
+ clear_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+
+ if ( vs->config.fields.periodic )
+ start_stimer(vs);
+ else
+ vs->config.fields.enabled = 0;
+}
+
+void viridian_time_vcpu_freeze(struct vcpu *v)
+{
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ if ( vs->started )
+ stop_timer(&vs->timer);
+ }
+}
+
+void viridian_time_vcpu_thaw(struct vcpu *v)
+{
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ if ( vs->config.fields.enabled )
+ start_stimer(vs);
+ }
+}
+
+void viridian_time_domain_freeze(struct domain *d)
+{
+ struct vcpu *v;
+
+ for_each_vcpu ( d, v )
+ viridian_time_vcpu_freeze(v);
+
+ time_ref_count_freeze(d);
+}
+
+void viridian_time_domain_thaw(struct domain *d)
+{
+ struct vcpu *v;
+
+ time_ref_count_thaw(d);
+
+ for_each_vcpu ( d, v )
+ viridian_time_vcpu_thaw(v);
+}
+
int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
{
struct domain *d = v->domain;
update_reference_tsc(d, true);
break;
+ case HV_X64_MSR_TIME_REF_COUNT:
+ return X86EMUL_EXCEPTION;
+
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_CONFIG) / 2;
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[stimerx];
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ stop_stimer(vs);
+
+ vs->config.raw = val;
+
+ if ( !vs->config.fields.sintx )
+ vs->config.fields.enabled = 0;
+
+ if ( vs->config.fields.enabled )
+ start_stimer(vs);
+
+ break;
+ }
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_COUNT) / 2;
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[stimerx];
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ stop_stimer(vs);
+
+ vs->count = val;
+
+ if ( !vs->count )
+ vs->config.fields.enabled = 0;
+ else if ( vs->config.fields.auto_enable )
+ vs->config.fields.enabled = 1;
+
+ if ( vs->config.fields.enabled )
+ start_stimer(vs);
+
+ break;
+ }
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n",
__func__, idx, val);
return X86EMUL_OKAY;
}
+void viridian_time_poll_messages(struct vcpu *v)
+{
+ unsigned int i;
+
+ if ( !v->arch.hvm.viridian->stimer_pending )
+ return;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ poll_stimer(v, i);
+}
+
int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
{
struct domain *d = v->domain;
printk(XENLOG_G_INFO "d%d: VIRIDIAN MSR_TIME_REF_COUNT: accessed\n",
d->domain_id);
- *val = raw_trc_val(d) + trc->off;
+ *val = time_ref_count(d);
break;
}
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_CONFIG) / 2;
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = v->arch.hvm.viridian->stimer[stimerx].config.raw;
+ break;
+ }
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_COUNT) / 2;
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = v->arch.hvm.viridian->stimer[stimerx].count;
+ break;
+ }
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x\n", __func__, idx);
return X86EMUL_EXCEPTION;
return X86EMUL_OKAY;
}
+void viridian_time_vcpu_init(struct vcpu *v)
+{
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ vs->v = v;
+ init_timer(&vs->timer, stimer_expire, vs, v->processor);
+ }
+}
+
+void viridian_time_vcpu_deinit(struct vcpu *v)
+{
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ kill_timer(&vs->timer);
+ vs->v = NULL;
+ }
+}
+
+void viridian_time_save_vcpu_ctxt(const struct vcpu *v,
+ struct hvm_viridian_vcpu_context *ctxt)
+{
+ unsigned int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(v->arch.hvm.viridian->stimer) !=
+ ARRAY_SIZE(ctxt->stimer_config_msr));
+ BUILD_BUG_ON(ARRAY_SIZE(v->arch.hvm.viridian->stimer) !=
+ ARRAY_SIZE(ctxt->stimer_count_msr));
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ ctxt->stimer_config_msr[i] = vs->config.raw;
+ ctxt->stimer_count_msr[i] = vs->count;
+ }
+}
+
+void viridian_time_load_vcpu_ctxt(
+ struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
+{
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ vs->config.raw = ctxt->stimer_config_msr[i];
+ vs->count = ctxt->stimer_count_msr[i];
+ }
+}
+
void viridian_time_save_domain_ctxt(
const struct domain *d, struct hvm_viridian_domain_context *ctxt)
{
mask.AccessPartitionReferenceCounter = 1;
if ( viridian_feature_mask(d) & HVMPV_reference_tsc )
mask.AccessPartitionReferenceTsc = 1;
+ if ( viridian_feature_mask(d) & HVMPV_stimer )
+ {
+ mask.AccessSynicRegs = 1;
+ mask.AccessSyntheticTimerRegs = 1;
+ }
u.mask = mask;
case HV_X64_MSR_ICR:
case HV_X64_MSR_TPR:
case HV_X64_MSR_VP_ASSIST_PAGE:
+ case HV_X64_MSR_SCONTROL:
+ case HV_X64_MSR_SVERSION:
+ case HV_X64_MSR_SIEFP:
+ case HV_X64_MSR_SIMP:
+ case HV_X64_MSR_EOM:
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
return viridian_synic_wrmsr(v, idx, val);
+ case HV_X64_MSR_TSC_FREQUENCY:
+ case HV_X64_MSR_APIC_FREQUENCY:
case HV_X64_MSR_REFERENCE_TSC:
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ case HV_X64_MSR_STIMER3_COUNT:
return viridian_time_wrmsr(v, idx, val);
case HV_X64_MSR_CRASH_P0:
case HV_X64_MSR_ICR:
case HV_X64_MSR_TPR:
case HV_X64_MSR_VP_ASSIST_PAGE:
+ case HV_X64_MSR_SCONTROL:
+ case HV_X64_MSR_SVERSION:
+ case HV_X64_MSR_SIEFP:
+ case HV_X64_MSR_SIMP:
+ case HV_X64_MSR_EOM:
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
return viridian_synic_rdmsr(v, idx, val);
case HV_X64_MSR_TSC_FREQUENCY:
case HV_X64_MSR_APIC_FREQUENCY:
case HV_X64_MSR_REFERENCE_TSC:
case HV_X64_MSR_TIME_REF_COUNT:
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ case HV_X64_MSR_STIMER3_COUNT:
return viridian_time_rdmsr(v, idx, val);
case HV_X64_MSR_CRASH_P0:
if ( !v->arch.hvm.viridian )
return -ENOMEM;
+ viridian_synic_vcpu_init(v);
+ viridian_time_vcpu_init(v);
+
return 0;
}
return;
if ( is_viridian_vcpu(v) )
- viridian_synic_wrmsr(v, HV_X64_MSR_VP_ASSIST_PAGE, 0);
+ {
+ viridian_time_vcpu_deinit(v);
+ viridian_synic_vcpu_deinit(v);
+ }
xfree(v->arch.hvm.viridian);
v->arch.hvm.viridian = NULL;
d->arch.hvm.viridian = NULL;
}
+void viridian_poll_messages(struct vcpu *v)
+{
+ viridian_time_poll_messages(v);
+}
+
static DEFINE_PER_CPU(cpumask_t, ipi_cpumask);
int viridian_hypercall(struct cpu_user_regs *regs)
if ( !is_viridian_vcpu(v) )
return 0;
+ viridian_time_save_vcpu_ctxt(v, &ctxt);
viridian_synic_save_vcpu_ctxt(v, &ctxt);
return hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt);
return -EINVAL;
viridian_synic_load_vcpu_ctxt(v, &ctxt);
+ viridian_time_load_vcpu_ctxt(v, &ctxt);
return 0;
}
void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector)
{
+ struct vcpu *v = vlapic_vcpu(vlapic);
struct domain *d = vlapic_domain(vlapic);
if ( vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]) )
vioapic_update_EOI(d, vector);
+ if ( is_viridian_vcpu(v) )
+ viridian_ack_sint(v, vector);
+
hvm_dpci_msi_eoi(d, vector);
}
if ( !vlapic_enabled(vlapic) )
return -1;
+ if ( is_viridian_vcpu(v) )
+ viridian_poll_messages(v);
+
irr = vlapic_find_highest_irr(vlapic);
if ( irr == -1 )
return -1;
}
done:
- vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
+ if ( !is_viridian_vcpu(v) || !viridian_is_auto_eoi_sint(v, vector) )
+ vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
+
vlapic_clear_irr(vector, vlapic);
return 1;
}
#define has_viridian_apic_assist(d) \
(is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist))
+#define has_viridian_stimer(d) \
+ (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_stimer))
+
static inline void hvm_inject_exception(
unsigned int vector, unsigned int type,
unsigned int insn_len, int error_code)
void *ptr;
};
+union viridian_sint_msr
+{
+ uint64_t raw;
+ struct
+ {
+ uint64_t vector:8;
+ uint64_t reserved_preserved1:8;
+ uint64_t mask:1;
+ uint64_t auto_eoi:1;
+ uint64_t polling:1;
+ uint64_t reserved_preserved2:45;
+ } fields;
+};
+
+union viridian_stimer_config_msr
+{
+ uint64_t raw;
+ struct
+ {
+ uint64_t enabled:1;
+ uint64_t periodic:1;
+ uint64_t lazy:1;
+ uint64_t auto_enable:1;
+ uint64_t vector:8;
+ uint64_t direct_mode:1;
+ uint64_t reserved_zero1:3;
+ uint64_t sintx:4;
+ uint64_t reserved_zero2:44;
+ } fields;
+};
+
+struct viridian_stimer {
+ struct vcpu *v;
+ struct timer timer;
+ union viridian_stimer_config_msr config;
+ uint64_t count;
+ uint64_t expiration;
+ s_time_t timeout;
+ bool started;
+};
+
struct viridian_vcpu
{
struct viridian_page vp_assist;
bool apic_assist_pending;
+ uint64_t scontrol;
+ uint64_t siefp;
+ struct viridian_page simp;
+ union viridian_sint_msr sint[16];
+ uint8_t vector_to_sintx[256];
+ unsigned long msg_pending;
+ struct viridian_stimer stimer[4];
+ unsigned long stimer_enabled;
+ unsigned long stimer_pending;
uint64_t crash_param[5];
};
int
viridian_hypercall(struct cpu_user_regs *regs);
-void viridian_time_ref_count_freeze(struct domain *d);
-void viridian_time_ref_count_thaw(struct domain *d);
+void viridian_time_domain_freeze(struct domain *d);
+void viridian_time_domain_thaw(struct domain *d);
int viridian_vcpu_init(struct vcpu *v);
int viridian_domain_init(struct domain *d);
bool viridian_apic_assist_completed(struct vcpu *v);
void viridian_apic_assist_clear(struct vcpu *v);
+bool viridian_is_auto_eoi_sint(struct vcpu *v, uint8_t vector);
+void viridian_ack_sint(struct vcpu *v, uint8_t vector);
+void viridian_poll_messages(struct vcpu *v);
+
#endif /* __ASM_X86_HVM_VIRIDIAN_H__ */
/*
uint64_t vp_assist_msr;
uint8_t apic_assist_pending;
uint8_t _pad[7];
+ uint64_t simp_msr;
+ uint64_t sint_msr[16];
+ uint64_t stimer_config_msr[4];
+ uint64_t stimer_count_msr[4];
};
DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context);
#define _HVMPV_crash_ctl 6
#define HVMPV_crash_ctl (1 << _HVMPV_crash_ctl)
+/* Enable STIMER MSRs */
+#define _HVMPV_stimer 7
+#define HVMPV_stimer (1 << _HVMPV_stimer)
+
#define HVMPV_feature_mask \
(HVMPV_base_freq | \
HVMPV_no_freq | \
HVMPV_reference_tsc | \
HVMPV_hcall_remote_tlb_flush | \
HVMPV_apic_assist | \
- HVMPV_crash_ctl)
+ HVMPV_crash_ctl | \
+ HVMPV_stimer)
#endif