viridian_dump_guest_page(v, "VP_ASSIST",
&v->arch.hvm.viridian->vp_assist);
if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
- viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
+ viridian_map_guest_page(d, &v->arch.hvm.viridian->vp_assist);
break;
case HV_X64_MSR_SCONTROL:
v->arch.hvm.viridian->simp.msr.raw = val;
viridian_dump_guest_page(v, "SIMP", &v->arch.hvm.viridian->simp);
if ( v->arch.hvm.viridian->simp.msr.fields.enabled )
- viridian_map_guest_page(v, &v->arch.hvm.viridian->simp);
+ viridian_map_guest_page(d, &v->arch.hvm.viridian->simp);
break;
case HV_X64_MSR_EOM:
void viridian_synic_load_vcpu_ctxt(
struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
{
+ struct domain *d = v->domain;
unsigned int i;
v->arch.hvm.viridian->vp_assist.msr.raw = ctxt->vp_assist_msr;
if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
- viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
+ viridian_map_guest_page(d, &v->arch.hvm.viridian->vp_assist);
v->arch.hvm.viridian->apic_assist_pending = ctxt->apic_assist_pending;
v->arch.hvm.viridian->simp.msr.raw = ctxt->simp_msr;
if ( v->arch.hvm.viridian->simp.msr.fields.enabled )
- viridian_map_guest_page(v, &v->arch.hvm.viridian->simp);
+ viridian_map_guest_page(d, &v->arch.hvm.viridian->simp);
for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->sint); i++ )
{
uint64_t Reserved2[509];
} HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE;
-static void dump_reference_tsc(const struct domain *d)
-{
- const union viridian_page_msr *rt = &d->arch.hvm.viridian->reference_tsc;
-
- if ( !rt->fields.enabled )
- return;
-
- printk(XENLOG_G_INFO "d%d: VIRIDIAN REFERENCE_TSC: pfn: %lx\n",
- d->domain_id, (unsigned long)rt->fields.pfn);
-}
-
static void update_reference_tsc(struct domain *d, bool initialize)
{
- unsigned long gmfn = d->arch.hvm.viridian->reference_tsc.fields.pfn;
- struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
- HV_REFERENCE_TSC_PAGE *p;
-
- if ( !page || !get_page_type(page, PGT_writable_page) )
- {
- if ( page )
- put_page(page);
- gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
- gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
- return;
- }
-
- p = __map_domain_page(page);
+ const struct viridian_page *rt = &d->arch.hvm.viridian->reference_tsc;
+ HV_REFERENCE_TSC_PAGE *p = rt->ptr;
if ( initialize )
clear_page(p);
printk(XENLOG_G_INFO "d%d: VIRIDIAN REFERENCE_TSC: invalidated\n",
d->domain_id);
- goto out;
+ return;
}
/*
if ( p->TscSequence == 0xFFFFFFFF ||
p->TscSequence == 0 ) /* Avoid both 'invalid' values */
p->TscSequence = 1;
-
- out:
- unmap_domain_page(p);
-
- put_page_and_type(page);
}
static int64_t raw_trc_val(struct domain *d)
return scale_delta(tsc, &tsc_to_ns) / 100ul;
}
-static int64_t time_ref_count(struct domain *d)
+static int64_t time_now(struct domain *d)
{
- struct viridian_time_ref_count *trc =
- &d->arch.hvm.viridian->time_ref_count;
+ const struct viridian_page *rt = &d->arch.hvm.viridian->reference_tsc;
+ HV_REFERENCE_TSC_PAGE *p = rt->ptr;
+ uint32_t start, end;
+ __uint128_t tsc;
+ __uint128_t scale;
+ uint64_t offset;
+
+ /*
+ * If the reference TSC page is not enabled, or has been invalidated
+ * fall back to the partition reference counter.
+ */
+ if ( !p || !p->TscSequence )
+ {
+ struct viridian_time_ref_count *trc =
+ &d->arch.hvm.viridian->time_ref_count;
+
+ return raw_trc_val(d) + trc->off;
+ }
+
+ /*
+ * The following is the time calculation algorithm documented in the
+ * specifiction.
+ */
+
+ start = p->TscSequence;
+
+ do {
+ tsc = rdtsc();
+ scale = p->TscScale;
+ offset = p->TscOffset;
+
+ smp_mb();
+ end = p->TscSequence;
+ } while (end != start);
- return raw_trc_val(d) + trc->off;
+ return ((tsc * scale) >> 64) + offset;
}
static void time_ref_count_freeze(struct domain *d)
{
struct vcpu *v = vs->v;
unsigned int stimerx = vs - &v->arch.hvm.viridian->stimer[0];
- int64_t now = time_ref_count(v->domain);
+ int64_t now = time_now(v->domain);
s_time_t timeout;
if ( !test_and_set_bit(stimerx, &v->arch.hvm.viridian->stimer_enabled) )
if ( !viridian_synic_deliver_timer_msg(v, vs->config.fields.sintx,
stimerx, vs->expiration,
- time_ref_count(v->domain)) )
+ time_now(v->domain)) )
return;
clear_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
return X86EMUL_EXCEPTION;
- d->arch.hvm.viridian->reference_tsc.raw = val;
- dump_reference_tsc(d);
- if ( d->arch.hvm.viridian->reference_tsc.fields.enabled )
+ viridian_unmap_guest_page(&d->arch.hvm.viridian->reference_tsc);
+ d->arch.hvm.viridian->reference_tsc.msr.raw = val;
+ viridian_dump_guest_page(v, "REFERENCE_TSC",
+ &d->arch.hvm.viridian->reference_tsc);
+ if ( d->arch.hvm.viridian->reference_tsc.msr.fields.enabled )
+ {
+ viridian_map_guest_page(d, &d->arch.hvm.viridian->reference_tsc);
update_reference_tsc(d, true);
+ }
break;
case HV_X64_MSR_TIME_REF_COUNT:
if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
return X86EMUL_EXCEPTION;
- *val = d->arch.hvm.viridian->reference_tsc.raw;
+ *val = d->arch.hvm.viridian->reference_tsc.msr.raw;
break;
case HV_X64_MSR_TIME_REF_COUNT:
printk(XENLOG_G_INFO "d%d: VIRIDIAN MSR_TIME_REF_COUNT: accessed\n",
d->domain_id);
- *val = time_ref_count(d);
+ *val = raw_trc_val(d) + trc->off;
break;
}
case HV_X64_MSR_STIMER0_CONFIG:
}
}
+void viridian_time_domain_init(struct domain *d)
+{
+}
+
+void viridian_time_domain_deinit(struct domain *d)
+{
+ viridian_unmap_guest_page(&d->arch.hvm.viridian->reference_tsc);
+}
+
void viridian_time_save_vcpu_ctxt(const struct vcpu *v,
struct hvm_viridian_vcpu_context *ctxt)
{
const struct domain *d, struct hvm_viridian_domain_context *ctxt)
{
ctxt->time_ref_count = d->arch.hvm.viridian->time_ref_count.val;
- ctxt->reference_tsc = d->arch.hvm.viridian->reference_tsc.raw;
+ ctxt->reference_tsc = d->arch.hvm.viridian->reference_tsc.msr.raw;
}
void viridian_time_load_domain_ctxt(
struct domain *d, const struct hvm_viridian_domain_context *ctxt)
{
d->arch.hvm.viridian->time_ref_count.val = ctxt->time_ref_count;
- d->arch.hvm.viridian->reference_tsc.raw = ctxt->reference_tsc;
+ d->arch.hvm.viridian->reference_tsc.msr.raw = ctxt->reference_tsc;
- if ( d->arch.hvm.viridian->reference_tsc.fields.enabled )
+ if ( d->arch.hvm.viridian->reference_tsc.msr.fields.enabled )
+ {
+ viridian_map_guest_page(d, &d->arch.hvm.viridian->reference_tsc);
update_reference_tsc(d, false);
+ }
}
/*