]> xenbits.xensource.com Git - people/pauldu/xen.git/commitdiff
stuff
authorPaul Durrant <paul.durrant@citrix.com>
Wed, 19 Dec 2018 10:00:12 +0000 (10:00 +0000)
committerPaul Durrant <paul.durrant@citrix.com>
Wed, 19 Dec 2018 18:01:35 +0000 (18:01 +0000)
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
13 files changed:
tools/libxl/libxl.h
tools/libxl/libxl_dom.c
tools/libxl/libxl_types.idl
xen/arch/x86/domain.c
xen/arch/x86/hvm/viridian/private.h
xen/arch/x86/hvm/viridian/synic.c
xen/arch/x86/hvm/viridian/time.c
xen/arch/x86/hvm/viridian/viridian.c
xen/arch/x86/hvm/vlapic.c
xen/include/asm-x86/hvm/hvm.h
xen/include/asm-x86/hvm/viridian.h
xen/include/public/arch-x86/hvm/save.h
xen/include/public/hvm/params.h

index a38e5cdba2d60e8c82b9cc4aed81163f31509cb7..a8b5452c28f8d1df1e421ba7cee20ef13d54eb45 100644 (file)
  */
 #define LIBXL_HAVE_VIRIDIAN_CRASH_CTL 1
 
+/*
+ * LIBXL_HAVE_VIRIDIAN_STIMER indicates that the 'stimer' value
+ * is present in the viridian enlightenment enumeration.
+ */
+#define LIBXL_HAVE_VIRIDIAN_STIMER 1
+
 /*
  * LIBXL_HAVE_BUILDINFO_HVM_ACPI_LAPTOP_SLATE indicates that
  * libxl_domain_build_info has the u.hvm.acpi_laptop_slate field.
index 6160991af316ca0fac6c58ccad94cd8651c51f16..5a5c7c8bd196807e6e8c20858a73a34d83644c62 100644 (file)
@@ -317,6 +317,9 @@ static int hvm_set_viridian_features(libxl__gc *gc, uint32_t domid,
     if (libxl_bitmap_test(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_CRASH_CTL))
         mask |= HVMPV_crash_ctl;
 
+    if (libxl_bitmap_test(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_SYNIC))
+        mask |= HVMPV_stimer;
+
     if (mask != 0 &&
         xc_hvm_param_set(CTX->xch,
                          domid,
index 51cf06a3a2cd63715b0b403455aae41574f271b3..5769449f4b44245d766999b0d0334d7ef408112d 100644 (file)
@@ -228,6 +228,7 @@ libxl_viridian_enlightenment = Enumeration("viridian_enlightenment", [
     (4, "hcall_remote_tlb_flush"),
     (5, "apic_assist"),
     (6, "crash_ctl"),
+    (7, "stimer"),
     ])
 
 libxl_hdtype = Enumeration("hdtype", [
index 32dc4253ff92ec08dd3610658ec8c0c260331e39..e8904f67776cc0df24a8a36cb6582f99cacf09fc 100644 (file)
@@ -658,20 +658,20 @@ void arch_domain_destroy(struct domain *d)
 
 void arch_domain_shutdown(struct domain *d)
 {
-    if ( has_viridian_time_ref_count(d) )
-        viridian_time_ref_count_freeze(d);
+    if ( is_viridian_domain(d) )
+        viridian_time_domain_freeze(d);
 }
 
 void arch_domain_pause(struct domain *d)
 {
-    if ( has_viridian_time_ref_count(d) )
-        viridian_time_ref_count_freeze(d);
+    if ( is_viridian_domain(d) )
+        viridian_time_domain_freeze(d);
 }
 
 void arch_domain_unpause(struct domain *d)
 {
     if ( has_viridian_time_ref_count(d) )
-        viridian_time_ref_count_thaw(d);
+        viridian_time_domain_thaw(d);
 }
 
 int arch_domain_soft_reset(struct domain *d)
index 398b22f12d965dfe3f6607dada414ce4787752bb..066544b3802cd8d54792df8813957a0745ba3e46 100644 (file)
 int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val);
 int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val);
 
+bool viridian_synic_deliver_timer_msg(struct vcpu *v, unsigned int sintx,
+                                      uint64_t index, uint64_t expiration,
+                                      uint64_t delivery);
+
+void viridian_synic_vcpu_init(struct vcpu *v);
+void viridian_synic_vcpu_deinit(struct vcpu *v);
+
 void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
                                    struct hvm_viridian_vcpu_context *ctxt);
 void viridian_synic_load_vcpu_ctxt(
@@ -82,6 +89,16 @@ void viridian_synic_load_vcpu_ctxt(
 int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val);
 int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val);
 
+void viridian_time_poll_messages(struct vcpu *v);
+
+void viridian_time_vcpu_init(struct vcpu *v);
+void viridian_time_vcpu_deinit(struct vcpu *v);
+
+void viridian_time_save_vcpu_ctxt(const struct vcpu *v,
+                                  struct hvm_viridian_vcpu_context *ctxt);
+void viridian_time_load_vcpu_ctxt(
+    struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt);
+
 void viridian_time_save_domain_ctxt(
     const struct domain *d, struct hvm_viridian_domain_context *ctxt);
 void viridian_time_load_domain_ctxt(
index 20731c23795ee342fd4d579ec12e5e4ee49182e3..518a05362dafa096b61179b7a2a51e8cf0bb469a 100644 (file)
@@ -8,11 +8,13 @@
 
 #include <xen/domain_page.h>
 #include <xen/hypercall.h>
+#include <xen/nospec.h>
 #include <xen/sched.h>
 #include <xen/version.h>
 
 #include <asm/apic.h>
 #include <asm/hvm/support.h>
+#include <asm/hvm/vlapic.h>
 
 #include "private.h"
 
@@ -28,6 +30,32 @@ typedef union _HV_VP_ASSIST_PAGE
     uint8_t ReservedZBytePadding[PAGE_SIZE];
 } HV_VP_ASSIST_PAGE;
 
+typedef enum HV_MESSAGE_TYPE {
+    HvMessageTypeNone,
+    HvMessageTimerExpired = 0x80000010,
+} HV_MESSAGE_TYPE;
+
+typedef struct HV_MESSAGE_FLAGS {
+    uint8_t MessagePending:1;
+    uint8_t Reserved:7;
+} HV_MESSAGE_FLAGS;
+
+typedef struct HV_MESSAGE_HEADER {
+    HV_MESSAGE_TYPE MessageType;
+    uint16_t Reserved1;
+    HV_MESSAGE_FLAGS MessageFlags;
+    uint8_t PayloadSize;
+    uint64_t Reserved2;
+} HV_MESSAGE_HEADER;
+
+#define HV_MESSAGE_SIZE 256
+#define HV_MESSAGE_MAX_PAYLOAD_QWORD_COUNT 30
+
+typedef struct HV_MESSAGE {
+    HV_MESSAGE_HEADER Header;
+    uint64_t Payload[HV_MESSAGE_MAX_PAYLOAD_QWORD_COUNT];
+} HV_MESSAGE;
+
 void viridian_apic_assist_set(struct vcpu *v)
 {
     HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian->vp_assist.ptr;
@@ -78,6 +106,8 @@ void viridian_apic_assist_clear(struct vcpu *v)
 
 int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
 {
+    struct domain *d = v->domain;
+
     switch ( idx )
     {
     case HV_X64_MSR_EOI:
@@ -103,6 +133,73 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
             viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
         break;
 
+    case HV_X64_MSR_SCONTROL:
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        v->arch.hvm.viridian->scontrol = val;
+        break;
+
+    case HV_X64_MSR_SVERSION:
+        return X86EMUL_EXCEPTION;
+
+    case HV_X64_MSR_SIEFP:
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        v->arch.hvm.viridian->siefp = val;
+        break;
+
+    case HV_X64_MSR_SIMP:
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        viridian_unmap_guest_page(&v->arch.hvm.viridian->simp);
+        v->arch.hvm.viridian->simp.msr.raw = val;
+        viridian_dump_guest_page(v, "SIMP", &v->arch.hvm.viridian->simp);
+        if ( v->arch.hvm.viridian->simp.msr.fields.enabled )
+            viridian_map_guest_page(v, &v->arch.hvm.viridian->simp);
+        break;
+
+    case HV_X64_MSR_EOM:
+    {
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        v->arch.hvm.viridian->msg_pending = 0;
+        break;
+    }
+    case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+    {
+        unsigned int sintx = idx - HV_X64_MSR_SINT0;
+        uint8_t vector = v->arch.hvm.viridian->sint[sintx].fields.vector;
+
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        /*
+         * Invalidate any previous mapping by setting an out-of-range
+         * index.
+         */
+        v->arch.hvm.viridian->vector_to_sintx[vector] =
+            ARRAY_SIZE(v->arch.hvm.viridian->sint);
+
+        v->arch.hvm.viridian->sint[sintx].raw = val;
+
+        /* Vectors must be in the range 16-255 inclusive */
+        vector = v->arch.hvm.viridian->sint[sintx].fields.vector;
+        if ( vector < 16 )
+            return X86EMUL_EXCEPTION;
+
+        printk(XENLOG_G_INFO "%pv: VIRIDIAN SINT%u: vector: %x\n", v, sintx,
+               vector);
+        v->arch.hvm.viridian->vector_to_sintx[vector] = sintx;
+
+        if ( v->arch.hvm.viridian->sint[sintx].fields.polling )
+            clear_bit(sintx, &v->arch.hvm.viridian->msg_pending);
+
+        break;
+    }
     default:
         gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n",
                  __func__, idx, val);
@@ -114,6 +211,8 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
 
 int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
 {
+    struct domain *d = v->domain;
+
     switch ( idx )
     {
     case HV_X64_MSR_EOI:
@@ -135,6 +234,58 @@ int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
         *val = v->arch.hvm.viridian->vp_assist.msr.raw;
         break;
 
+    case HV_X64_MSR_SCONTROL:
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        *val = v->arch.hvm.viridian->scontrol;
+        break;
+
+    case HV_X64_MSR_SVERSION:
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        /*
+         * The specification says that the version number is 0x00000001
+         * and should be in the lower 32-bits of the MSR, while the
+         * upper 32-bits are reserved... but it doesn't say what they
+         * should be set to. Assume everything but the bottom bit
+         * should be zero.
+         */
+        *val = 1ul;
+        break;
+
+    case HV_X64_MSR_SIEFP:
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        *val = v->arch.hvm.viridian->siefp;
+        break;
+
+    case HV_X64_MSR_SIMP:
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        *val = v->arch.hvm.viridian->simp.msr.raw;
+        break;
+
+    case HV_X64_MSR_EOM:
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        *val = 0;
+        break;
+
+    case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+    {
+        unsigned int sintx = idx - HV_X64_MSR_SINT0;
+
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        *val = v->arch.hvm.viridian->sint[sintx].raw;
+        break;
+    }
     default:
         gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x\n", __func__, idx);
         return X86EMUL_EXCEPTION;
@@ -143,9 +294,96 @@ int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
     return X86EMUL_OKAY;
 }
 
+bool viridian_synic_deliver_timer_msg(struct vcpu *v, unsigned int sintx,
+                                      uint64_t index, uint64_t expiration,
+                                      uint64_t delivery)
+{
+    const union viridian_sint_msr *vs = &v->arch.hvm.viridian->sint[sintx];
+    HV_MESSAGE *msg = v->arch.hvm.viridian->simp.ptr;
+    struct {
+        uint32_t TimerIndex;
+        uint32_t Reserved;
+        uint64_t ExpirationTime;
+        uint64_t DeliveryTime;
+    } payload = {
+        .TimerIndex = index,
+        .ExpirationTime = expiration,
+        .DeliveryTime = delivery,
+    };
+
+    if ( test_bit(sintx, &v->arch.hvm.viridian->msg_pending) )
+        return false;
+
+    BUILD_BUG_ON(sizeof(*msg) != HV_MESSAGE_SIZE);
+    msg += sintx;
+
+    if ( msg->Header.MessageType != HvMessageTypeNone )
+    {
+        msg->Header.MessageFlags.MessagePending = 1;
+        set_bit(sintx, &v->arch.hvm.viridian->msg_pending);
+        return false;
+    }
+
+    msg->Header.MessageType = HvMessageTimerExpired;
+    msg->Header.MessageFlags.MessagePending = 0;
+    msg->Header.PayloadSize = sizeof(payload);
+    memcpy(msg->Payload, &payload, sizeof(payload));
+
+    if ( !vs->fields.mask )
+        vlapic_set_irq(vcpu_vlapic(v), vs->fields.vector, 0);
+
+    return true;
+}
+
+bool viridian_is_auto_eoi_sint(struct vcpu *v, uint8_t vector)
+{
+    int sintx = v->arch.hvm.viridian->vector_to_sintx[vector];
+
+    if ( sintx >= ARRAY_SIZE(v->arch.hvm.viridian->sint) )
+        return false;
+
+    return v->arch.hvm.viridian->sint[sintx].fields.auto_eoi;
+}
+
+void viridian_ack_sint(struct vcpu *v, uint8_t vector)
+{
+    int sintx = v->arch.hvm.viridian->vector_to_sintx[vector];
+
+    if ( sintx < ARRAY_SIZE(v->arch.hvm.viridian->sint) )
+        clear_bit(sintx, &v->arch.hvm.viridian->msg_pending);
+}
+
+void viridian_synic_vcpu_init(struct vcpu *v)
+{
+    unsigned int i;
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->sint); i++ )
+        v->arch.hvm.viridian->sint[i].fields.mask = 1;
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->vector_to_sintx); i++ )
+        v->arch.hvm.viridian->vector_to_sintx[i] =
+            ARRAY_SIZE(v->arch.hvm.viridian->sint);
+}
+
+void viridian_synic_vcpu_deinit(struct vcpu *v)
+{
+    viridian_unmap_guest_page(&v->arch.hvm.viridian->vp_assist);
+    viridian_unmap_guest_page(&v->arch.hvm.viridian->simp);
+}
+
 void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
                                    struct hvm_viridian_vcpu_context *ctxt)
 {
+    unsigned int i;
+
+    BUILD_BUG_ON(ARRAY_SIZE(v->arch.hvm.viridian->sint) !=
+                 ARRAY_SIZE(ctxt->sint_msr));
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->sint); i++ )
+        ctxt->sint_msr[i] = v->arch.hvm.viridian->sint[i].raw;
+
+    ctxt->simp_msr = v->arch.hvm.viridian->simp.msr.raw;
+
     ctxt->apic_assist_pending = v->arch.hvm.viridian->apic_assist_pending;
     ctxt->vp_assist_msr = v->arch.hvm.viridian->vp_assist.msr.raw;
 }
@@ -153,11 +391,30 @@ void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
 void viridian_synic_load_vcpu_ctxt(
     struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
 {
+    unsigned int i;
+
     v->arch.hvm.viridian->vp_assist.msr.raw = ctxt->vp_assist_msr;
     if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
         viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
 
     v->arch.hvm.viridian->apic_assist_pending = ctxt->apic_assist_pending;
+
+    v->arch.hvm.viridian->simp.msr.raw = ctxt->simp_msr;
+    if ( v->arch.hvm.viridian->simp.msr.fields.enabled )
+        viridian_map_guest_page(v, &v->arch.hvm.viridian->simp);
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->sint); i++ )
+    {
+        uint8_t vector;
+
+        v->arch.hvm.viridian->sint[i].raw = ctxt->sint_msr[i];
+
+        vector = v->arch.hvm.viridian->sint[i].fields.vector;
+        if ( vector < 16 )
+            continue;
+
+        v->arch.hvm.viridian->vector_to_sintx[vector] = i;
+    }
 }
 
 /*
index 42367f6460c5e4b5c4b8abdb1bd898b90cb794f1..6f86791d442765855e96a60c8fe7775cef3932ee 100644 (file)
@@ -12,6 +12,7 @@
 #include <xen/version.h>
 
 #include <asm/apic.h>
+#include <asm/event.h>
 #include <asm/hvm/support.h>
 
 #include "private.h"
@@ -119,7 +120,15 @@ static int64_t raw_trc_val(struct domain *d)
     return scale_delta(tsc, &tsc_to_ns) / 100ul;
 }
 
-void viridian_time_ref_count_freeze(struct domain *d)
+static int64_t time_ref_count(struct domain *d)
+{
+    struct viridian_time_ref_count *trc =
+        &d->arch.hvm.viridian->time_ref_count;
+
+    return raw_trc_val(d) + trc->off;
+}
+
+static void time_ref_count_freeze(struct domain *d)
 {
     struct viridian_time_ref_count *trc =
         &d->arch.hvm.viridian->time_ref_count;
@@ -128,7 +137,7 @@ void viridian_time_ref_count_freeze(struct domain *d)
         trc->val = raw_trc_val(d) + trc->off;
 }
 
-void viridian_time_ref_count_thaw(struct domain *d)
+static void time_ref_count_thaw(struct domain *d)
 {
     struct viridian_time_ref_count *trc =
         &d->arch.hvm.viridian->time_ref_count;
@@ -138,6 +147,168 @@ void viridian_time_ref_count_thaw(struct domain *d)
         trc->off = (int64_t)trc->val - raw_trc_val(d);
 }
 
+static void stop_stimer(struct viridian_stimer *vs)
+{
+    struct vcpu *v = vs->v;
+    unsigned int stimerx = vs - &v->arch.hvm.viridian->stimer[0];
+
+    if ( !vs->started )
+        return;
+
+    stop_timer(&vs->timer);
+    clear_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+    vs->started = false;
+}
+
+static void stimer_expire(void *data)
+{
+    struct viridian_stimer *vs = data;
+    struct vcpu *v = vs->v;
+    unsigned int stimerx = vs - &v->arch.hvm.viridian->stimer[0];
+
+    if ( !vs->config.fields.enabled )
+        return;
+
+    set_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+    vcpu_kick(v);
+}
+
+static void start_stimer(struct viridian_stimer *vs)
+{
+    struct vcpu *v = vs->v;
+    unsigned int stimerx = vs - &v->arch.hvm.viridian->stimer[0];
+    int64_t now = time_ref_count(v->domain);
+    s_time_t timeout;
+
+    if ( !test_and_set_bit(stimerx, &v->arch.hvm.viridian->stimer_enabled) )
+        printk(XENLOG_G_INFO "%pv: VIRIDIAN STIMER%u: enabled\n", v,
+               stimerx);
+
+    if ( vs->config.fields.periodic )
+    {
+        if ( vs->started )
+        {
+            unsigned int missed = 0;
+            uint64_t next;
+
+            /* Advance the timer expiration by one tick */
+            vs->expiration += vs->count;
+
+            /* Check to see if any expirations have been missed */
+            next = vs->expiration;
+            while (next <= now)
+            {
+                next += vs->count;
+                missed++;
+            }
+
+            /*
+             * The specification says that if the timer is lazy then we
+             * skip over any missed expirations otherwise a non-zero
+             * missed count should be used to reduce the period of the
+             * timer until it catches up, unless the count has reached a
+             * 'significant number', in which case the timer should also
+             * be treated as lazy. Unfortunately the specification does
+             * not state what that number is so the choice of number here
+             * is a pure guess.
+             */
+            if ( vs->config.fields.lazy || missed > 3 )
+            {
+                missed = 0;
+                vs->expiration = next;
+            }
+
+            timeout = ((next - now) * 100ull) / (missed + 1);
+        }
+        else
+        {
+            vs->expiration = now + vs->count;
+            timeout = (vs->expiration - now) * 100ull;
+        }
+    }
+    else
+    {
+        vs->expiration = vs->count;
+        if (vs->count <= now)
+        {
+            set_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+            return;
+        }
+
+        timeout = (vs->expiration - now) * 100ull;
+    }
+
+    vs->started = true;
+    migrate_timer(&vs->timer, smp_processor_id());
+    set_timer(&vs->timer, timeout + NOW());
+}
+
+static void poll_stimer(struct vcpu *v, unsigned int stimerx)
+{
+    struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[stimerx];
+
+    if ( !test_bit(stimerx, &v->arch.hvm.viridian->stimer_pending) )
+        return;
+
+    if ( !viridian_synic_deliver_timer_msg(v, vs->config.fields.sintx,
+                                           stimerx, vs->expiration,
+                                           time_ref_count(v->domain)) )
+        return;
+
+    clear_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+
+    if ( vs->config.fields.periodic )
+        start_stimer(vs);
+    else
+        vs->config.fields.enabled = 0;
+}
+
+void viridian_time_vcpu_freeze(struct vcpu *v)
+{
+    unsigned int i;
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+    {
+        struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+        if ( vs->started )
+            stop_timer(&vs->timer);
+    }
+}
+
+void viridian_time_vcpu_thaw(struct vcpu *v)
+{
+    unsigned int i;
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+    {
+        struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+        if ( vs->config.fields.enabled )
+            start_stimer(vs);
+    }
+}
+
+void viridian_time_domain_freeze(struct domain *d)
+{
+    struct vcpu *v;
+
+    for_each_vcpu ( d, v )
+        viridian_time_vcpu_freeze(v);
+
+    time_ref_count_freeze(d);
+}
+
+void viridian_time_domain_thaw(struct domain *d)
+{
+    struct vcpu *v;
+
+    time_ref_count_thaw(d);
+
+    for_each_vcpu ( d, v )
+        viridian_time_vcpu_thaw(v);
+}
+
 int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
 {
     struct domain *d = v->domain;
@@ -154,6 +325,57 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
             update_reference_tsc(d, true);
         break;
 
+    case HV_X64_MSR_TIME_REF_COUNT:
+        return X86EMUL_EXCEPTION;
+
+    case HV_X64_MSR_STIMER0_CONFIG:
+    case HV_X64_MSR_STIMER1_CONFIG:
+    case HV_X64_MSR_STIMER2_CONFIG:
+    case HV_X64_MSR_STIMER3_CONFIG:
+    {
+        unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_CONFIG) / 2;
+        struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[stimerx];
+
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        stop_stimer(vs);
+
+        vs->config.raw = val;
+
+        if ( !vs->config.fields.sintx )
+            vs->config.fields.enabled = 0;
+
+        if ( vs->config.fields.enabled )
+            start_stimer(vs);
+
+        break;
+    }
+    case HV_X64_MSR_STIMER0_COUNT:
+    case HV_X64_MSR_STIMER1_COUNT:
+    case HV_X64_MSR_STIMER2_COUNT:
+    case HV_X64_MSR_STIMER3_COUNT:
+    {
+        unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_COUNT) / 2;
+        struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[stimerx];
+
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        stop_stimer(vs);
+
+        vs->count = val;
+
+        if ( !vs->count  )
+            vs->config.fields.enabled = 0;
+        else if ( vs->config.fields.auto_enable )
+            vs->config.fields.enabled = 1;
+
+        if ( vs->config.fields.enabled )
+            start_stimer(vs);
+
+        break;
+    }
     default:
         gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n",
                  __func__, idx, val);
@@ -163,6 +385,17 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
     return X86EMUL_OKAY;
 }
 
+void viridian_time_poll_messages(struct vcpu *v)
+{
+    unsigned int i;
+
+    if ( !v->arch.hvm.viridian->stimer_pending )
+        return;
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+        poll_stimer(v, i);
+}
+
 int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
 {
     struct domain *d = v->domain;
@@ -202,10 +435,35 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
             printk(XENLOG_G_INFO "d%d: VIRIDIAN MSR_TIME_REF_COUNT: accessed\n",
                    d->domain_id);
 
-        *val = raw_trc_val(d) + trc->off;
+        *val = time_ref_count(d);
         break;
     }
+    case HV_X64_MSR_STIMER0_CONFIG:
+    case HV_X64_MSR_STIMER1_CONFIG:
+    case HV_X64_MSR_STIMER2_CONFIG:
+    case HV_X64_MSR_STIMER3_CONFIG:
+    {
+        unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_CONFIG) / 2;
+
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        *val = v->arch.hvm.viridian->stimer[stimerx].config.raw;
+        break;
+    }
+    case HV_X64_MSR_STIMER0_COUNT:
+    case HV_X64_MSR_STIMER1_COUNT:
+    case HV_X64_MSR_STIMER2_COUNT:
+    case HV_X64_MSR_STIMER3_COUNT:
+    {
+        unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_COUNT) / 2;
 
+        if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+            return X86EMUL_EXCEPTION;
+
+        *val = v->arch.hvm.viridian->stimer[stimerx].count;
+        break;
+    }
     default:
         gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x\n", __func__, idx);
         return X86EMUL_EXCEPTION;
@@ -214,6 +472,65 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
     return X86EMUL_OKAY;
 }
 
+void viridian_time_vcpu_init(struct vcpu *v)
+{
+    unsigned int i;
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+    {
+        struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+        vs->v = v;
+        init_timer(&vs->timer, stimer_expire, vs, v->processor);
+    }
+}
+
+void viridian_time_vcpu_deinit(struct vcpu *v)
+{
+    unsigned int i;
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+    {
+        struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+        kill_timer(&vs->timer);
+        vs->v = NULL;
+    }
+}
+
+void viridian_time_save_vcpu_ctxt(const struct vcpu *v,
+                                  struct hvm_viridian_vcpu_context *ctxt)
+{
+    unsigned int i;
+
+    BUILD_BUG_ON(ARRAY_SIZE(v->arch.hvm.viridian->stimer) !=
+                 ARRAY_SIZE(ctxt->stimer_config_msr));
+    BUILD_BUG_ON(ARRAY_SIZE(v->arch.hvm.viridian->stimer) !=
+                 ARRAY_SIZE(ctxt->stimer_count_msr));
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+    {
+        struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+        ctxt->stimer_config_msr[i] = vs->config.raw;
+        ctxt->stimer_count_msr[i] = vs->count;
+    }
+}
+
+void viridian_time_load_vcpu_ctxt(
+    struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
+{
+    unsigned int i;
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+    {
+        struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+        vs->config.raw = ctxt->stimer_config_msr[i];
+        vs->count = ctxt->stimer_count_msr[i];
+    }
+}
+
 void viridian_time_save_domain_ctxt(
     const struct domain *d, struct hvm_viridian_domain_context *ctxt)
 {
index e200e2ed1d93fad7464660066e484bcace1f00e3..873fb177681d7313288a7a6f9da5cb81e2d70a7a 100644 (file)
@@ -177,6 +177,11 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf,
             mask.AccessPartitionReferenceCounter = 1;
         if ( viridian_feature_mask(d) & HVMPV_reference_tsc )
             mask.AccessPartitionReferenceTsc = 1;
+        if ( viridian_feature_mask(d) & HVMPV_stimer )
+        {
+            mask.AccessSynicRegs = 1;
+            mask.AccessSyntheticTimerRegs = 1;
+        }
 
         u.mask = mask;
 
@@ -306,9 +311,25 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val)
     case HV_X64_MSR_ICR:
     case HV_X64_MSR_TPR:
     case HV_X64_MSR_VP_ASSIST_PAGE:
+    case HV_X64_MSR_SCONTROL:
+    case HV_X64_MSR_SVERSION:
+    case HV_X64_MSR_SIEFP:
+    case HV_X64_MSR_SIMP:
+    case HV_X64_MSR_EOM:
+    case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
         return viridian_synic_wrmsr(v, idx, val);
 
+    case HV_X64_MSR_TSC_FREQUENCY:
+    case HV_X64_MSR_APIC_FREQUENCY:
     case HV_X64_MSR_REFERENCE_TSC:
+    case HV_X64_MSR_STIMER0_CONFIG:
+    case HV_X64_MSR_STIMER0_COUNT:
+    case HV_X64_MSR_STIMER1_CONFIG:
+    case HV_X64_MSR_STIMER1_COUNT:
+    case HV_X64_MSR_STIMER2_CONFIG:
+    case HV_X64_MSR_STIMER2_COUNT:
+    case HV_X64_MSR_STIMER3_CONFIG:
+    case HV_X64_MSR_STIMER3_COUNT:
         return viridian_time_wrmsr(v, idx, val);
 
     case HV_X64_MSR_CRASH_P0:
@@ -379,12 +400,26 @@ int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val)
     case HV_X64_MSR_ICR:
     case HV_X64_MSR_TPR:
     case HV_X64_MSR_VP_ASSIST_PAGE:
+    case HV_X64_MSR_SCONTROL:
+    case HV_X64_MSR_SVERSION:
+    case HV_X64_MSR_SIEFP:
+    case HV_X64_MSR_SIMP:
+    case HV_X64_MSR_EOM:
+    case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
         return viridian_synic_rdmsr(v, idx, val);
 
     case HV_X64_MSR_TSC_FREQUENCY:
     case HV_X64_MSR_APIC_FREQUENCY:
     case HV_X64_MSR_REFERENCE_TSC:
     case HV_X64_MSR_TIME_REF_COUNT:
+    case HV_X64_MSR_STIMER0_CONFIG:
+    case HV_X64_MSR_STIMER0_COUNT:
+    case HV_X64_MSR_STIMER1_CONFIG:
+    case HV_X64_MSR_STIMER1_COUNT:
+    case HV_X64_MSR_STIMER2_CONFIG:
+    case HV_X64_MSR_STIMER2_COUNT:
+    case HV_X64_MSR_STIMER3_CONFIG:
+    case HV_X64_MSR_STIMER3_COUNT:
         return viridian_time_rdmsr(v, idx, val);
 
     case HV_X64_MSR_CRASH_P0:
@@ -424,6 +459,9 @@ int viridian_vcpu_init(struct vcpu *v)
     if ( !v->arch.hvm.viridian )
         return -ENOMEM;
 
+    viridian_synic_vcpu_init(v);
+    viridian_time_vcpu_init(v);
+
     return 0;
 }
 
@@ -443,7 +481,10 @@ void viridian_vcpu_deinit(struct vcpu *v)
         return;
 
     if ( is_viridian_vcpu(v) )
-        viridian_synic_wrmsr(v, HV_X64_MSR_VP_ASSIST_PAGE, 0);
+    {
+        viridian_time_vcpu_deinit(v);
+        viridian_synic_vcpu_deinit(v);
+    }
 
     xfree(v->arch.hvm.viridian);
     v->arch.hvm.viridian = NULL;
@@ -463,6 +504,11 @@ void viridian_domain_deinit(struct domain *d)
     d->arch.hvm.viridian = NULL;
 }
 
+void viridian_poll_messages(struct vcpu *v)
+{
+    viridian_time_poll_messages(v);
+}
+
 static DEFINE_PER_CPU(cpumask_t, ipi_cpumask);
 
 int viridian_hypercall(struct cpu_user_regs *regs)
@@ -723,6 +769,7 @@ static int viridian_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
     if ( !is_viridian_vcpu(v) )
         return 0;
 
+    viridian_time_save_vcpu_ctxt(v, &ctxt);
     viridian_synic_save_vcpu_ctxt(v, &ctxt);
 
     return hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt);
@@ -749,6 +796,7 @@ static int viridian_load_vcpu_ctxt(struct domain *d,
         return -EINVAL;
 
     viridian_synic_load_vcpu_ctxt(v, &ctxt);
+    viridian_time_load_vcpu_ctxt(v, &ctxt);
 
     return 0;
 }
index a1a43cd792ab9404a0de354f2e5b311e0abd514d..e9e144e5a7d7e4887b385fc2094ca256d7c944b8 100644 (file)
@@ -461,11 +461,15 @@ void vlapic_EOI_set(struct vlapic *vlapic)
 
 void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector)
 {
+    struct vcpu *v = vlapic_vcpu(vlapic);
     struct domain *d = vlapic_domain(vlapic);
 
     if ( vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]) )
         vioapic_update_EOI(d, vector);
 
+    if ( is_viridian_vcpu(v) )
+        viridian_ack_sint(v, vector);
+
     hvm_dpci_msi_eoi(d, vector);
 }
 
@@ -1301,6 +1305,9 @@ int vlapic_has_pending_irq(struct vcpu *v)
     if ( !vlapic_enabled(vlapic) )
         return -1;
 
+    if ( is_viridian_vcpu(v) )
+        viridian_poll_messages(v);
+
     irr = vlapic_find_highest_irr(vlapic);
     if ( irr == -1 )
         return -1;
@@ -1360,7 +1367,9 @@ int vlapic_ack_pending_irq(struct vcpu *v, int vector, bool_t force_ack)
     }
 
  done:
-    vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
+    if ( !is_viridian_vcpu(v) || !viridian_is_auto_eoi_sint(v, vector) )
+        vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
+
     vlapic_clear_irr(vector, vlapic);
     return 1;
 }
index d8df6f43525c32665dfd7c315e142b11941c2318..79a139aba7a13d0bef39dac234ca24b8c6c13051 100644 (file)
@@ -470,6 +470,9 @@ static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
 #define has_viridian_apic_assist(d) \
     (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist))
 
+#define has_viridian_stimer(d) \
+    (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_stimer))
+
 static inline void hvm_inject_exception(
     unsigned int vector, unsigned int type,
     unsigned int insn_len, int error_code)
index f072838955885072820b962e3b232a4b8ac131bd..dd2ea9a74d91833b07b843e544a6c480d95931f0 100644 (file)
@@ -26,10 +26,60 @@ struct viridian_page
     void *ptr;
 };
 
+union viridian_sint_msr
+{
+    uint64_t raw;
+    struct
+    {
+        uint64_t vector:8;
+        uint64_t reserved_preserved1:8;
+        uint64_t mask:1;
+        uint64_t auto_eoi:1;
+        uint64_t polling:1;
+        uint64_t reserved_preserved2:45;
+    } fields;
+};
+
+union viridian_stimer_config_msr
+{
+    uint64_t raw;
+    struct
+    {
+        uint64_t enabled:1;
+        uint64_t periodic:1;
+        uint64_t lazy:1;
+        uint64_t auto_enable:1;
+        uint64_t vector:8;
+        uint64_t direct_mode:1;
+        uint64_t reserved_zero1:3;
+        uint64_t sintx:4;
+        uint64_t reserved_zero2:44;
+    } fields;
+};
+
+struct viridian_stimer {
+    struct vcpu *v;
+    struct timer timer;
+    union viridian_stimer_config_msr config;
+    uint64_t count;
+    uint64_t expiration;
+    s_time_t timeout;
+    bool started;
+};
+
 struct viridian_vcpu
 {
     struct viridian_page vp_assist;
     bool apic_assist_pending;
+    uint64_t scontrol;
+    uint64_t siefp;
+    struct viridian_page simp;
+    union viridian_sint_msr sint[16];
+    uint8_t vector_to_sintx[256];
+    unsigned long msg_pending;
+    struct viridian_stimer stimer[4];
+    unsigned long stimer_enabled;
+    unsigned long stimer_pending;
     uint64_t crash_param[5];
 };
 
@@ -77,8 +127,8 @@ int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val);
 int
 viridian_hypercall(struct cpu_user_regs *regs);
 
-void viridian_time_ref_count_freeze(struct domain *d);
-void viridian_time_ref_count_thaw(struct domain *d);
+void viridian_time_domain_freeze(struct domain *d);
+void viridian_time_domain_thaw(struct domain *d);
 
 int viridian_vcpu_init(struct vcpu *v);
 int viridian_domain_init(struct domain *d);
@@ -90,6 +140,10 @@ void viridian_apic_assist_set(struct vcpu *v);
 bool viridian_apic_assist_completed(struct vcpu *v);
 void viridian_apic_assist_clear(struct vcpu *v);
 
+bool viridian_is_auto_eoi_sint(struct vcpu *v, uint8_t vector);
+void viridian_ack_sint(struct vcpu *v, uint8_t vector);
+void viridian_poll_messages(struct vcpu *v);
+
 #endif /* __ASM_X86_HVM_VIRIDIAN_H__ */
 
 /*
index 40be84ecda7f46e7dd42478616a62beebf7fb196..8344aa471f2a4c33d3de52ce9053dbbdf949f119 100644 (file)
@@ -602,6 +602,10 @@ struct hvm_viridian_vcpu_context {
     uint64_t vp_assist_msr;
     uint8_t  apic_assist_pending;
     uint8_t  _pad[7];
+    uint64_t simp_msr;
+    uint64_t sint_msr[16];
+    uint64_t stimer_config_msr[4];
+    uint64_t stimer_count_msr[4];
 };
 
 DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context);
index 72f633ef2dc97b92c68fe11c5c2d847428b9d444..7e131e8da3dd4cbbd416ec4381d85fe4589fbc02 100644 (file)
 #define _HVMPV_crash_ctl 6
 #define HVMPV_crash_ctl (1 << _HVMPV_crash_ctl)
 
+/* Enable STIMER MSRs */
+#define _HVMPV_stimer 7
+#define HVMPV_stimer (1 << _HVMPV_stimer)
+
 #define HVMPV_feature_mask \
         (HVMPV_base_freq | \
          HVMPV_no_freq | \
          HVMPV_reference_tsc | \
          HVMPV_hcall_remote_tlb_flush | \
          HVMPV_apic_assist | \
-         HVMPV_crash_ctl)
+         HVMPV_crash_ctl | \
+         HVMPV_stimer)
 
 #endif