#include <xen/version.h>
#include <asm/apic.h>
+#include <asm/event.h>
#include <asm/hvm/support.h>
#include "private.h"
* ticks per 100ns shifted left by 64.
*/
p->TscScale = ((10000ul << 32) / d->arch.tsc_khz) << 32;
+ smp_wmb();
p->TscSequence++;
if ( p->TscSequence == 0xFFFFFFFF ||
return raw_trc_val(d) + trc->off;
}
+/*
+ * The specification says: "The partition reference time is computed
+ * by the following formula:
+ *
+ * ReferenceTime = ((VirtualTsc * TscScale) >> 64) + TscOffset
+ *
+ * The multiplication is a 64 bit multiplication, which results in a
+ * 128 bit number which is then shifted 64 times to the right to obtain
+ * the high 64 bits."
+ */
+static uint64_t scale_tsc(uint64_t tsc, uint64_t scale, uint64_t offset)
+{
+ uint64_t result;
+
+ /*
+ * Quadword MUL takes an implicit operand in RAX, and puts the result
+ * in RDX:RAX. Because we only want the result of the multiplication
+ * after shifting right by 64 bits, we therefore only need the content
+ * of RDX.
+ */
+ asm ( "mulq %[scale]"
+ : "+a" (tsc), "=d" (result)
+ : [scale] "rm" (scale) );
+
+ return result + offset;
+}
+
+static uint64_t time_now(struct domain *d)
+{
+ const struct viridian_page *rt = &d->arch.hvm.viridian->reference_tsc;
+ HV_REFERENCE_TSC_PAGE *p = rt->ptr;
+ uint32_t start, end;
+ uint64_t tsc;
+ uint64_t scale;
+ uint64_t offset;
+
+ /*
+ * If the reference TSC page is not enabled, or has been invalidated
+ * fall back to the partition reference counter.
+ */
+ if ( !p || !p->TscSequence )
+ return time_ref_count(d);
+
+ /*
+ * The following sampling algorithm for tsc, scale and offset is
+ * documented in the specifiction.
+ */
+ do {
+ start = p->TscSequence;
+ smp_rmb();
+
+ tsc = hvm_get_guest_tsc(pt_global_vcpu_target(d));
+ scale = p->TscScale;
+ offset = p->TscOffset;
+
+ smp_rmb();
+ end = p->TscSequence;
+ } while (end != start);
+
+ return scale_tsc(tsc, scale, offset);
+}
+
+static void stop_stimer(struct viridian_stimer *vs)
+{
+ const struct vcpu *v = vs->v;
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int stimerx = vs - &vv->stimer[0];
+
+ if ( !vs->started )
+ return;
+
+ stop_timer(&vs->timer);
+ clear_bit(stimerx, &vv->stimer_pending);
+ vs->started = false;
+}
+
+static void stimer_expire(void *data)
+{
+ const struct viridian_stimer *vs = data;
+ struct vcpu *v = vs->v;
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int stimerx = vs - &vv->stimer[0];
+
+ if ( !vs->config.fields.enabled )
+ return;
+
+ set_bit(stimerx, &vv->stimer_pending);
+ vcpu_kick(v);
+}
+
+static void start_stimer(struct viridian_stimer *vs)
+{
+ const struct vcpu *v = vs->v;
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int stimerx = vs - &vv->stimer[0];
+ int64_t now = time_now(v->domain);
+ s_time_t timeout;
+
+ if ( !test_and_set_bit(stimerx, &vv->stimer_enabled) )
+ printk(XENLOG_G_INFO "%pv: VIRIDIAN STIMER%u: enabled\n", v,
+ stimerx);
+
+ if ( vs->config.fields.periodic )
+ {
+ unsigned int missed = 0;
+ int64_t next;
+
+ /*
+ * The specification says that if the timer is lazy then we
+ * skip over any missed expirations so we can treat this case
+ * as the same as if the timer is currently stopped, i.e. we
+ * just schedule expiration to be 'count' ticks from now.
+ */
+ if ( !vs->started || vs->config.fields.lazy )
+ next = now + vs->count;
+ else
+ {
+ /*
+ * The timer is already started, so we're re-scheduling.
+ * Hence advance the timer expiration by one tick.
+ */
+ next = vs->expiration + vs->count;
+
+ /* Now check to see if any expirations have been missed */
+ if ( now - next > 0 )
+ missed = (now - next) / vs->count;
+
+ /*
+ * The specification says that if the timer is not lazy then
+ * a non-zero missed count should be used to reduce the period
+ * of the timer until it catches up, unless the count has
+ * reached a 'significant number', in which case the timer
+ * should be treated as lazy. Unfortunately the specification
+ * does not state what that number is so the choice of number
+ * here is a pure guess.
+ */
+ if ( missed > 3 )
+ {
+ missed = 0;
+ next = now + vs->count;
+ }
+ }
+
+ vs->expiration = next;
+ timeout = ((next - now) * 100ull) / (missed + 1);
+ }
+ else
+ {
+ vs->expiration = vs->count;
+ if ( vs->count - now <= 0 )
+ {
+ set_bit(stimerx, &vv->stimer_pending);
+ return;
+ }
+
+ timeout = (vs->expiration - now) * 100ull;
+ }
+
+ vs->started = true;
+ migrate_timer(&vs->timer, smp_processor_id());
+ set_timer(&vs->timer, timeout + NOW());
+}
+
+static void poll_stimer(struct vcpu *v, unsigned int stimerx)
+{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ struct viridian_stimer *vs = &vv->stimer[stimerx];
+
+ if ( !test_bit(stimerx, &vv->stimer_pending) )
+ return;
+
+ if ( !viridian_synic_deliver_timer_msg(v, vs->config.fields.sintx,
+ stimerx, vs->expiration,
+ time_now(v->domain)) )
+ return;
+
+ clear_bit(stimerx, &vv->stimer_pending);
+
+ if ( vs->config.fields.periodic )
+ start_stimer(vs);
+ else
+ vs->config.fields.enabled = 0;
+}
+
+void viridian_time_poll_timers(struct vcpu *v)
+{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ if ( !vv->stimer_pending )
+ return;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ poll_stimer(v, i);
+}
+
+void viridian_time_vcpu_freeze(struct vcpu *v)
+{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ if ( !is_viridian_vcpu(v) )
+ return;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &vv->stimer[i];
+
+ if ( vs->started )
+ stop_timer(&vs->timer);
+ }
+}
+
+void viridian_time_vcpu_thaw(struct vcpu *v)
+{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ if ( !is_viridian_vcpu(v) )
+ return;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &vv->stimer[i];
+
+ if ( vs->config.fields.enabled )
+ start_stimer(vs);
+ }
+}
+
void viridian_time_domain_freeze(const struct domain *d)
{
+ struct vcpu *v;
+
+ if ( !is_viridian_domain(d) )
+ return;
+
+ for_each_vcpu ( d, v )
+ viridian_time_vcpu_freeze(v);
+
time_ref_count_freeze(d);
}
void viridian_time_domain_thaw(const struct domain *d)
{
+ struct vcpu *v;
+
+ if ( !is_viridian_domain(d) )
+ return;
+
time_ref_count_thaw(d);
+
+ for_each_vcpu ( d, v )
+ viridian_time_vcpu_thaw(v);
}
int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
struct domain *d = v->domain;
struct viridian_domain *vd = d->arch.hvm.viridian;
}
break;
+ case HV_X64_MSR_TIME_REF_COUNT:
+ return X86EMUL_EXCEPTION;
+
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ {
+ unsigned int stimerx =
+ array_index_nospec((idx - HV_X64_MSR_STIMER0_CONFIG) / 2,
+ ARRAY_SIZE(vv->stimer));
+ struct viridian_stimer *vs = &vv->stimer[stimerx];
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ stop_stimer(vs);
+
+ vs->config.raw = val;
+
+ if ( !vs->config.fields.sintx )
+ vs->config.fields.enabled = 0;
+
+ if ( vs->config.fields.enabled )
+ start_stimer(vs);
+
+ break;
+ }
+
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ {
+ unsigned int stimerx =
+ array_index_nospec((idx - HV_X64_MSR_STIMER0_CONFIG) / 2,
+ ARRAY_SIZE(vv->stimer));
+ struct viridian_stimer *vs = &vv->stimer[stimerx];
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ stop_stimer(vs);
+
+ vs->count = val;
+
+ if ( !vs->count )
+ vs->config.fields.enabled = 0;
+ else if ( vs->config.fields.auto_enable )
+ vs->config.fields.enabled = 1;
+
+ if ( vs->config.fields.enabled )
+ start_stimer(vs);
+
+ break;
+ }
+
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n",
__func__, idx, val);
int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
const struct domain *d = v->domain;
struct viridian_domain *vd = d->arch.hvm.viridian;
break;
}
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ {
+ unsigned int stimerx =
+ array_index_nospec((idx - HV_X64_MSR_STIMER0_CONFIG) / 2,
+ ARRAY_SIZE(vv->stimer));;
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = vv->stimer[stimerx].config.raw;
+ break;
+ }
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ {
+ unsigned int stimerx =
+ array_index_nospec((idx - HV_X64_MSR_STIMER0_CONFIG) / 2,
+ ARRAY_SIZE(vv->stimer));;
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = vv->stimer[stimerx].count;
+ break;
+ }
+
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x\n", __func__, idx);
return X86EMUL_EXCEPTION;
return X86EMUL_OKAY;
}
-int viridian_time_vcpu_init(const struct vcpu *v)
+int viridian_time_vcpu_init(struct vcpu *v)
{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &vv->stimer[i];
+
+ vs->v = v;
+ init_timer(&vs->timer, stimer_expire, vs, v->processor);
+ }
+
return 0;
}
void viridian_time_vcpu_deinit(const struct vcpu *v)
{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &vv->stimer[i];
+
+ kill_timer(&vs->timer);
+ vs->v = NULL;
+ }
}
void viridian_time_domain_deinit(const struct domain *d)
void viridian_time_save_vcpu_ctxt(
const struct vcpu *v, struct hvm_viridian_vcpu_context *ctxt)
{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(vv->stimer) !=
+ ARRAY_SIZE(ctxt->stimer_config_msr));
+ BUILD_BUG_ON(ARRAY_SIZE(vv->stimer) !=
+ ARRAY_SIZE(ctxt->stimer_count_msr));
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &vv->stimer[i];
+
+ ctxt->stimer_config_msr[i] = vs->config.raw;
+ ctxt->stimer_count_msr[i] = vs->count;
+ }
}
void viridian_time_load_vcpu_ctxt(
struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &vv->stimer[i];
+
+ vs->config.raw = ctxt->stimer_config_msr[i];
+ vs->count = ctxt->stimer_count_msr[i];
+ }
}
void viridian_time_save_domain_ctxt(