#include <xen/version.h>
#include <asm/apic.h>
+#include <asm/event.h>
#include <asm/hvm/support.h>
#include "private.h"
return raw_trc_val(d) + trc->off;
}
+static int64_t time_now(struct domain *d)
+{
+ const struct viridian_page *rt = &d->arch.hvm.viridian->reference_tsc;
+ HV_REFERENCE_TSC_PAGE *p = rt->ptr;
+ uint32_t start, end;
+ __int128_t tsc;
+ __int128_t scale;
+ int64_t offset;
+
+ /*
+ * If the reference TSC page is not enabled, or has been invalidated
+ * fall back to the partition reference counter.
+ */
+ if ( !p || !p->TscSequence )
+ return time_ref_count(d);
+
+ /*
+ * The following sampling algorithm for tsc, scale and offset is
+ * documented in the specifiction.
+ */
+ start = p->TscSequence;
+
+ do {
+ tsc = rdtsc();
+ scale = p->TscScale;
+ offset = p->TscOffset;
+
+ smp_mb();
+ end = p->TscSequence;
+ } while (end != start);
+
+ /*
+ * The specification says: "The partition reference time is computed
+ * by the following formula:
+ *
+ * ReferenceTime = ((VirtualTsc * TscScale) >> 64) + TscOffset
+ *
+ * The multiplication is a 64 bit multiplication, which results in a
+ * 128 bit number which is then shifted 64 times to the right to obtain
+ * the high 64 bits."
+ */
+ return ((tsc * scale) >> 64) + offset;
+}
+
+static void stop_stimer(struct viridian_stimer *vs)
+{
+ struct vcpu *v = vs->v;
+ unsigned int stimerx = vs - &v->arch.hvm.viridian->stimer[0];
+
+ if ( !vs->started )
+ return;
+
+ stop_timer(&vs->timer);
+ clear_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+ vs->started = false;
+}
+
+static void stimer_expire(void *data)
+{
+ struct viridian_stimer *vs = data;
+ struct vcpu *v = vs->v;
+ unsigned int stimerx = vs - &v->arch.hvm.viridian->stimer[0];
+
+ if ( !vs->config.fields.enabled )
+ return;
+
+ set_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+ vcpu_kick(v);
+}
+
+static void start_stimer(struct viridian_stimer *vs)
+{
+ struct vcpu *v = vs->v;
+ unsigned int stimerx = vs - &v->arch.hvm.viridian->stimer[0];
+ int64_t now = time_now(v->domain);
+ s_time_t timeout;
+
+ if ( !test_and_set_bit(stimerx, &v->arch.hvm.viridian->stimer_enabled) )
+ printk(XENLOG_G_INFO "%pv: VIRIDIAN STIMER%u: enabled\n", v,
+ stimerx);
+
+ if ( vs->config.fields.periodic )
+ {
+ unsigned int missed = 0;
+ int64_t next;
+
+ /*
+ * The specification says that if the timer is lazy then we
+ * skip over any missed expirations so we can treat this case
+ * as the same as if the timer is currently stopped, i.e. we
+ * just schedule expiration to be 'count' ticks from now.
+ */
+ if ( !vs->started || vs->config.fields.lazy )
+ {
+ next = now + vs->count;
+ }
+ else
+ {
+ /*
+ * The timer is already started, so we're re-scheduling.
+ * Hence advance the timer expiration by one tick.
+ */
+ next = vs->expiration + vs->count;
+
+ /* Now check to see if any expirations have been missed */
+ if ( now - next > 0 )
+ missed = (now - next) / vs->count;
+
+ /*
+ * The specification says that if the timer is not lazy then
+ * a non-zero missed count should be used to reduce the period
+ * of the timer until it catches up, unless the count has
+ * reached a 'significant number', in which case the timer
+ * should be treated as lazy. Unfortunately the specification
+ * does not state what that number is so the choice of number
+ * here is a pure guess.
+ */
+ if ( missed > 3 )
+ {
+ missed = 0;
+ next = now + vs->count;
+ }
+ }
+
+ vs->expiration = next;
+ timeout = ((next - now) * 100ull) / (missed + 1);
+ }
+ else
+ {
+ vs->expiration = vs->count;
+ if ( vs->count - now <= 0 )
+ {
+ set_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+ return;
+ }
+
+ timeout = (vs->expiration - now) * 100ull;
+ }
+
+ vs->started = true;
+ migrate_timer(&vs->timer, smp_processor_id());
+ set_timer(&vs->timer, timeout + NOW());
+}
+
+static void poll_stimer(struct vcpu *v, unsigned int stimerx)
+{
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[stimerx];
+
+ if ( !test_bit(stimerx, &v->arch.hvm.viridian->stimer_pending) )
+ return;
+
+ if ( !viridian_synic_deliver_timer_msg(v, vs->config.fields.sintx,
+ stimerx, vs->expiration,
+ time_now(v->domain)) )
+ return;
+
+ clear_bit(stimerx, &v->arch.hvm.viridian->stimer_pending);
+
+ if ( vs->config.fields.periodic )
+ start_stimer(vs);
+ else
+ vs->config.fields.enabled = 0;
+}
+
+void viridian_time_poll_timers(struct vcpu *v)
+{
+ unsigned int i;
+
+ if ( !v->arch.hvm.viridian->stimer_pending )
+ return;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ poll_stimer(v, i);
+}
+
+void viridian_time_vcpu_freeze(struct vcpu *v)
+{
+ unsigned int i;
+
+ if ( !is_viridian_vcpu(v) )
+ return;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ if ( vs->started )
+ stop_timer(&vs->timer);
+ }
+}
+
+void viridian_time_vcpu_thaw(struct vcpu *v)
+{
+ unsigned int i;
+
+ if ( !is_viridian_vcpu(v) )
+ return;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ if ( vs->config.fields.enabled )
+ start_stimer(vs);
+ }
+}
+
void viridian_time_domain_freeze(struct domain *d)
{
+ struct vcpu *v;
+
+ if ( !is_viridian_domain(d) )
+ return;
+
+ for_each_vcpu ( d, v )
+ viridian_time_vcpu_freeze(v);
+
time_ref_count_freeze(d);
}
void viridian_time_domain_thaw(struct domain *d)
{
+ struct vcpu *v;
+
+ if ( !is_viridian_domain(d) )
+ return;
+
time_ref_count_thaw(d);
+
+ for_each_vcpu ( d, v )
+ viridian_time_vcpu_thaw(v);
}
int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
}
break;
+ case HV_X64_MSR_TIME_REF_COUNT:
+ return X86EMUL_EXCEPTION;
+
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_CONFIG) / 2;
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[stimerx];
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ stop_stimer(vs);
+
+ vs->config.raw = val;
+
+ if ( !vs->config.fields.sintx )
+ vs->config.fields.enabled = 0;
+
+ if ( vs->config.fields.enabled )
+ start_stimer(vs);
+
+ break;
+ }
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_COUNT) / 2;
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[stimerx];
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ stop_stimer(vs);
+
+ vs->count = val;
+
+ if ( !vs->count )
+ vs->config.fields.enabled = 0;
+ else if ( vs->config.fields.auto_enable )
+ vs->config.fields.enabled = 1;
+
+ if ( vs->config.fields.enabled )
+ start_stimer(vs);
+
+ break;
+ }
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n",
__func__, idx, val);
break;
}
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_CONFIG) / 2;
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = v->arch.hvm.viridian->stimer[stimerx].config.raw;
+ break;
+ }
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_COUNT) / 2;
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = v->arch.hvm.viridian->stimer[stimerx].count;
+ break;
+ }
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x\n", __func__, idx);
return X86EMUL_EXCEPTION;
int viridian_time_vcpu_init(struct vcpu *v)
{
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ vs->v = v;
+ init_timer(&vs->timer, stimer_expire, vs, v->processor);
+ }
+
return 0;
}
void viridian_time_vcpu_deinit(struct vcpu *v)
{
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ kill_timer(&vs->timer);
+ vs->v = NULL;
+ }
}
void viridian_time_domain_deinit(struct domain *d)
void viridian_time_save_vcpu_ctxt(
const struct vcpu *v, struct hvm_viridian_vcpu_context *ctxt)
{
+ unsigned int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(v->arch.hvm.viridian->stimer) !=
+ ARRAY_SIZE(ctxt->stimer_config_msr));
+ BUILD_BUG_ON(ARRAY_SIZE(v->arch.hvm.viridian->stimer) !=
+ ARRAY_SIZE(ctxt->stimer_count_msr));
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ ctxt->stimer_config_msr[i] = vs->config.raw;
+ ctxt->stimer_count_msr[i] = vs->count;
+ }
}
void viridian_time_load_vcpu_ctxt(
struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
{
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.viridian->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &v->arch.hvm.viridian->stimer[i];
+
+ vs->config.raw = ctxt->stimer_config_msr[i];
+ vs->count = ctxt->stimer_count_msr[i];
+ }
}
void viridian_time_save_domain_ctxt(