]> xenbits.xensource.com Git - people/pauldu/xen.git/commitdiff
use viridian_page for reference_tsc synic8
authorPaul Durrant <paul.durrant@citrix.com>
Thu, 20 Dec 2018 09:23:57 +0000 (09:23 +0000)
committerPaul Durrant <paul.durrant@citrix.com>
Thu, 20 Dec 2018 09:58:24 +0000 (09:58 +0000)
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
xen/arch/x86/hvm/viridian/private.h
xen/arch/x86/hvm/viridian/synic.c
xen/arch/x86/hvm/viridian/time.c
xen/arch/x86/hvm/viridian/viridian.c
xen/include/asm-x86/hvm/viridian.h

index 066544b3802cd8d54792df8813957a0745ba3e46..3e24441815821b6a78853d99667b34ed46aa5bd0 100644 (file)
@@ -94,6 +94,9 @@ void viridian_time_poll_messages(struct vcpu *v);
 void viridian_time_vcpu_init(struct vcpu *v);
 void viridian_time_vcpu_deinit(struct vcpu *v);
 
+void viridian_time_domain_init(struct domain *d);
+void viridian_time_domain_deinit(struct domain *d);
+
 void viridian_time_save_vcpu_ctxt(const struct vcpu *v,
                                   struct hvm_viridian_vcpu_context *ctxt);
 void viridian_time_load_vcpu_ctxt(
@@ -106,7 +109,7 @@ void viridian_time_load_domain_ctxt(
 
 void viridian_dump_guest_page(const struct vcpu *v, const char *name,
                               const struct viridian_page *vp);
-void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp);
+void viridian_map_guest_page(struct domain *d, struct viridian_page *vp);
 void viridian_unmap_guest_page(struct viridian_page *vp);
 
 #endif /* X86_HVM_VIRIDIAN_PRIVATE_H */
index 518a05362dafa096b61179b7a2a51e8cf0bb469a..903a536ec4aaa72aaf62a548d92ddac899a5c012 100644 (file)
@@ -130,7 +130,7 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
         viridian_dump_guest_page(v, "VP_ASSIST",
                                  &v->arch.hvm.viridian->vp_assist);
         if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
-            viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
+            viridian_map_guest_page(d, &v->arch.hvm.viridian->vp_assist);
         break;
 
     case HV_X64_MSR_SCONTROL:
@@ -158,7 +158,7 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
         v->arch.hvm.viridian->simp.msr.raw = val;
         viridian_dump_guest_page(v, "SIMP", &v->arch.hvm.viridian->simp);
         if ( v->arch.hvm.viridian->simp.msr.fields.enabled )
-            viridian_map_guest_page(v, &v->arch.hvm.viridian->simp);
+            viridian_map_guest_page(d, &v->arch.hvm.viridian->simp);
         break;
 
     case HV_X64_MSR_EOM:
@@ -391,17 +391,18 @@ void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
 void viridian_synic_load_vcpu_ctxt(
     struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
 {
+    struct domain *d = v->domain;
     unsigned int i;
 
     v->arch.hvm.viridian->vp_assist.msr.raw = ctxt->vp_assist_msr;
     if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
-        viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
+        viridian_map_guest_page(d, &v->arch.hvm.viridian->vp_assist);
 
     v->arch.hvm.viridian->apic_assist_pending = ctxt->apic_assist_pending;
 
     v->arch.hvm.viridian->simp.msr.raw = ctxt->simp_msr;
     if ( v->arch.hvm.viridian->simp.msr.fields.enabled )
-        viridian_map_guest_page(v, &v->arch.hvm.viridian->simp);
+        viridian_map_guest_page(d, &v->arch.hvm.viridian->simp);
 
     for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->sint); i++ )
     {
index 6f86791d442765855e96a60c8fe7775cef3932ee..0f632d8b5f041d631af6a256091b63383c240996 100644 (file)
@@ -26,33 +26,10 @@ typedef struct _HV_REFERENCE_TSC_PAGE
     uint64_t Reserved2[509];
 } HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE;
 
-static void dump_reference_tsc(const struct domain *d)
-{
-    const union viridian_page_msr *rt = &d->arch.hvm.viridian->reference_tsc;
-
-    if ( !rt->fields.enabled )
-        return;
-
-    printk(XENLOG_G_INFO "d%d: VIRIDIAN REFERENCE_TSC: pfn: %lx\n",
-           d->domain_id, (unsigned long)rt->fields.pfn);
-}
-
 static void update_reference_tsc(struct domain *d, bool initialize)
 {
-    unsigned long gmfn = d->arch.hvm.viridian->reference_tsc.fields.pfn;
-    struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
-    HV_REFERENCE_TSC_PAGE *p;
-
-    if ( !page || !get_page_type(page, PGT_writable_page) )
-    {
-        if ( page )
-            put_page(page);
-        gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
-                 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
-        return;
-    }
-
-    p = __map_domain_page(page);
+    const struct viridian_page *rt = &d->arch.hvm.viridian->reference_tsc;
+    HV_REFERENCE_TSC_PAGE *p = rt->ptr;
 
     if ( initialize )
         clear_page(p);
@@ -83,7 +60,7 @@ static void update_reference_tsc(struct domain *d, bool initialize)
 
         printk(XENLOG_G_INFO "d%d: VIRIDIAN REFERENCE_TSC: invalidated\n",
                d->domain_id);
-        goto out;
+        return;
     }
 
     /*
@@ -101,11 +78,6 @@ static void update_reference_tsc(struct domain *d, bool initialize)
     if ( p->TscSequence == 0xFFFFFFFF ||
          p->TscSequence == 0 ) /* Avoid both 'invalid' values */
         p->TscSequence = 1;
-
- out:
-    unmap_domain_page(p);
-
-    put_page_and_type(page);
 }
 
 static int64_t raw_trc_val(struct domain *d)
@@ -120,12 +92,44 @@ static int64_t raw_trc_val(struct domain *d)
     return scale_delta(tsc, &tsc_to_ns) / 100ul;
 }
 
-static int64_t time_ref_count(struct domain *d)
+static int64_t time_now(struct domain *d)
 {
-    struct viridian_time_ref_count *trc =
-        &d->arch.hvm.viridian->time_ref_count;
+    const struct viridian_page *rt = &d->arch.hvm.viridian->reference_tsc;
+    HV_REFERENCE_TSC_PAGE *p = rt->ptr;
+    uint32_t start, end;
+    __uint128_t tsc;
+    __uint128_t scale;
+    uint64_t offset;
+
+    /*
+     * If the reference TSC page is not enabled, or has been invalidated
+     * fall back to the partition reference counter.
+     */
+    if ( !p || !p->TscSequence )
+    {
+        struct viridian_time_ref_count *trc =
+            &d->arch.hvm.viridian->time_ref_count;
+
+        return raw_trc_val(d) + trc->off;
+    }
+
+    /*
+     * The following is the time calculation algorithm documented in the
+     * specifiction.
+     */
+
+    start = p->TscSequence;
+
+    do {
+        tsc = rdtsc();
+        scale = p->TscScale;
+        offset = p->TscOffset;
+
+        smp_mb();
+        end = p->TscSequence;
+    } while (end != start);
 
-    return raw_trc_val(d) + trc->off;
+    return ((tsc * scale) >> 64) + offset;
 }
 
 static void time_ref_count_freeze(struct domain *d)
@@ -177,7 +181,7 @@ static void start_stimer(struct viridian_stimer *vs)
 {
     struct vcpu *v = vs->v;
     unsigned int stimerx = vs - &v->arch.hvm.viridian->stimer[0];
-    int64_t now = time_ref_count(v->domain);
+    int64_t now = time_now(v->domain);
     s_time_t timeout;
 
     if ( !test_and_set_bit(stimerx, &v->arch.hvm.viridian->stimer_enabled) )
@@ -252,7 +256,7 @@ static void poll_stimer(struct vcpu *v, unsigned int stimerx)
 
     if ( !viridian_synic_deliver_timer_msg(v, vs->config.fields.sintx,
                                            stimerx, vs->expiration,
-                                           time_ref_count(v->domain)) )
+                                           time_now(v->domain)) )
         return;
 
     clear_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
@@ -319,10 +323,15 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
         if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
             return X86EMUL_EXCEPTION;
 
-        d->arch.hvm.viridian->reference_tsc.raw = val;
-        dump_reference_tsc(d);
-        if ( d->arch.hvm.viridian->reference_tsc.fields.enabled )
+        viridian_unmap_guest_page(&d->arch.hvm.viridian->reference_tsc);
+        d->arch.hvm.viridian->reference_tsc.msr.raw = val;
+        viridian_dump_guest_page(v, "REFERENCE_TSC",
+                                 &d->arch.hvm.viridian->reference_tsc);
+        if ( d->arch.hvm.viridian->reference_tsc.msr.fields.enabled )
+        {
+            viridian_map_guest_page(d, &d->arch.hvm.viridian->reference_tsc);
             update_reference_tsc(d, true);
+        }
         break;
 
     case HV_X64_MSR_TIME_REF_COUNT:
@@ -420,7 +429,7 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
         if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
             return X86EMUL_EXCEPTION;
 
-        *val = d->arch.hvm.viridian->reference_tsc.raw;
+        *val = d->arch.hvm.viridian->reference_tsc.msr.raw;
         break;
 
     case HV_X64_MSR_TIME_REF_COUNT:
@@ -435,7 +444,7 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
             printk(XENLOG_G_INFO "d%d: VIRIDIAN MSR_TIME_REF_COUNT: accessed\n",
                    d->domain_id);
 
-        *val = time_ref_count(d);
+        *val = raw_trc_val(d) + trc->off;
         break;
     }
     case HV_X64_MSR_STIMER0_CONFIG:
@@ -498,6 +507,15 @@ void viridian_time_vcpu_deinit(struct vcpu *v)
     }
 }
 
+void viridian_time_domain_init(struct domain *d)
+{
+}
+
+void viridian_time_domain_deinit(struct domain *d)
+{
+    viridian_unmap_guest_page(&d->arch.hvm.viridian->reference_tsc);
+}
+
 void viridian_time_save_vcpu_ctxt(const struct vcpu *v,
                                   struct hvm_viridian_vcpu_context *ctxt)
 {
@@ -535,17 +553,20 @@ void viridian_time_save_domain_ctxt(
     const struct domain *d, struct hvm_viridian_domain_context *ctxt)
 {
     ctxt->time_ref_count = d->arch.hvm.viridian->time_ref_count.val;
-    ctxt->reference_tsc = d->arch.hvm.viridian->reference_tsc.raw;
+    ctxt->reference_tsc = d->arch.hvm.viridian->reference_tsc.msr.raw;
 }
 
 void viridian_time_load_domain_ctxt(
     struct domain *d, const struct hvm_viridian_domain_context *ctxt)
 {
     d->arch.hvm.viridian->time_ref_count.val = ctxt->time_ref_count;
-    d->arch.hvm.viridian->reference_tsc.raw = ctxt->reference_tsc;
+    d->arch.hvm.viridian->reference_tsc.msr.raw = ctxt->reference_tsc;
 
-    if ( d->arch.hvm.viridian->reference_tsc.fields.enabled )
+    if ( d->arch.hvm.viridian->reference_tsc.msr.fields.enabled )
+    {
+        viridian_map_guest_page(d, &d->arch.hvm.viridian->reference_tsc);
         update_reference_tsc(d, false);
+    }
 }
 
 /*
index 873fb177681d7313288a7a6f9da5cb81e2d70a7a..e2b62401928aa8ac4e71f605a09b001d99e80f27 100644 (file)
@@ -472,6 +472,8 @@ int viridian_domain_init(struct domain *d)
     if ( !d->arch.hvm.viridian )
         return -ENOMEM;
 
+    viridian_time_domain_init(d);
+
     return 0;
 }
 
@@ -494,6 +496,8 @@ void viridian_domain_deinit(struct domain *d)
 {
     struct vcpu *v;
 
+    viridian_time_domain_init(d);
+
     for_each_vcpu ( d, v )
         viridian_vcpu_deinit(v);
 
@@ -677,9 +681,8 @@ void viridian_dump_guest_page(const struct vcpu *v, const char *name,
            v, name, (unsigned long)vp->msr.fields.pfn);
 }
 
-void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp)
+void viridian_map_guest_page(struct domain *d, struct viridian_page *vp)
 {
-    struct domain *d = v->domain;
     unsigned long gmfn = vp->msr.fields.pfn;
     struct page_info *page;
 
index dd2ea9a74d91833b07b843e544a6c480d95931f0..9cb8baa808d7a38a40a64242938ccd878baaa742 100644 (file)
@@ -115,7 +115,7 @@ struct viridian_domain
     union viridian_guest_os_id_msr guest_os_id;
     union viridian_page_msr hypercall_gpa;
     struct viridian_time_ref_count time_ref_count;
-    union viridian_page_msr reference_tsc;
+    struct viridian_page reference_tsc;
 };
 
 void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf,