void viridian_dump_guest_page(const struct vcpu *v, const char *name,
const struct viridian_page *vp);
-void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp);
+void viridian_map_guest_page(const struct vcpu *v, struct viridian_page *vp);
void viridian_unmap_guest_page(struct viridian_page *vp);
#endif /* X86_HVM_VIRIDIAN_PRIVATE_H */
uint8_t ReservedZBytePadding[PAGE_SIZE];
} HV_VP_ASSIST_PAGE;
-void viridian_apic_assist_set(struct vcpu *v)
+void viridian_apic_assist_set(const struct vcpu *v)
{
- HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian.vp_assist.ptr;
+ HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian->vp_assist.ptr;
if ( !ptr )
return;
* wrong and the VM will most likely hang so force a crash now
* to make the problem clear.
*/
- if ( v->arch.hvm.viridian.apic_assist_pending )
+ if ( v->arch.hvm.viridian->apic_assist_pending )
domain_crash(v->domain);
- v->arch.hvm.viridian.apic_assist_pending = true;
+ v->arch.hvm.viridian->apic_assist_pending = true;
ptr->ApicAssist.no_eoi = 1;
}
-bool viridian_apic_assist_completed(struct vcpu *v)
+bool viridian_apic_assist_completed(const struct vcpu *v)
{
- HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian.vp_assist.ptr;
+ HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian->vp_assist.ptr;
if ( !ptr )
return false;
- if ( v->arch.hvm.viridian.apic_assist_pending &&
+ if ( v->arch.hvm.viridian->apic_assist_pending &&
!ptr->ApicAssist.no_eoi )
{
/* An EOI has been avoided */
- v->arch.hvm.viridian.apic_assist_pending = false;
+ v->arch.hvm.viridian->apic_assist_pending = false;
return true;
}
return false;
}
-void viridian_apic_assist_clear(struct vcpu *v)
+void viridian_apic_assist_clear(const struct vcpu *v)
{
- HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian.vp_assist.ptr;
+ HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian->vp_assist.ptr;
if ( !ptr )
return;
ptr->ApicAssist.no_eoi = 0;
- v->arch.hvm.viridian.apic_assist_pending = false;
+ v->arch.hvm.viridian->apic_assist_pending = false;
}
int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
case HV_X64_MSR_VP_ASSIST_PAGE:
/* release any previous mapping */
- viridian_unmap_guest_page(&v->arch.hvm.viridian.vp_assist);
- v->arch.hvm.viridian.vp_assist.msr.raw = val;
+ viridian_unmap_guest_page(&v->arch.hvm.viridian->vp_assist);
+ v->arch.hvm.viridian->vp_assist.msr.raw = val;
viridian_dump_guest_page(v, "VP_ASSIST",
- &v->arch.hvm.viridian.vp_assist);
- if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled )
- viridian_map_guest_page(v, &v->arch.hvm.viridian.vp_assist);
+ &v->arch.hvm.viridian->vp_assist);
+ if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
+ viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
break;
default:
break;
case HV_X64_MSR_VP_ASSIST_PAGE:
- *val = v->arch.hvm.viridian.vp_assist.msr.raw;
+ *val = v->arch.hvm.viridian->vp_assist.msr.raw;
break;
default:
void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
struct hvm_viridian_vcpu_context *ctxt)
{
- ctxt->apic_assist_pending = v->arch.hvm.viridian.apic_assist_pending;
- ctxt->vp_assist_msr = v->arch.hvm.viridian.vp_assist.msr.raw;
+ ctxt->apic_assist_pending = v->arch.hvm.viridian->apic_assist_pending;
+ ctxt->vp_assist_msr = v->arch.hvm.viridian->vp_assist.msr.raw;
}
void viridian_synic_load_vcpu_ctxt(
struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
{
- v->arch.hvm.viridian.vp_assist.msr.raw = ctxt->vp_assist_msr;
- if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled )
- viridian_map_guest_page(v, &v->arch.hvm.viridian.vp_assist);
+ v->arch.hvm.viridian->vp_assist.msr.raw = ctxt->vp_assist_msr;
+ if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
+ viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
- v->arch.hvm.viridian.apic_assist_pending = ctxt->apic_assist_pending;
+ v->arch.hvm.viridian->apic_assist_pending = ctxt->apic_assist_pending;
}
/*
static void dump_reference_tsc(const struct domain *d)
{
- const union viridian_page_msr *rt = &d->arch.hvm.viridian.reference_tsc;
+ const union viridian_page_msr *rt = &d->arch.hvm.viridian->reference_tsc;
if ( !rt->fields.enabled )
return;
static void update_reference_tsc(struct domain *d, bool initialize)
{
- unsigned long gmfn = d->arch.hvm.viridian.reference_tsc.fields.pfn;
+ unsigned long gmfn = d->arch.hvm.viridian->reference_tsc.fields.pfn;
struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
HV_REFERENCE_TSC_PAGE *p;
put_page_and_type(page);
}
-static int64_t raw_trc_val(struct domain *d)
+static int64_t raw_trc_val(const struct domain *d)
{
uint64_t tsc;
struct time_scale tsc_to_ns;
return scale_delta(tsc, &tsc_to_ns) / 100ul;
}
-void viridian_time_ref_count_freeze(struct domain *d)
+void viridian_time_ref_count_freeze(const struct domain *d)
{
- struct viridian_time_ref_count *trc;
-
- trc = &d->arch.hvm.viridian.time_ref_count;
+ struct viridian_time_ref_count *trc =
+ &d->arch.hvm.viridian->time_ref_count;
if ( test_and_clear_bit(_TRC_running, &trc->flags) )
trc->val = raw_trc_val(d) + trc->off;
}
-void viridian_time_ref_count_thaw(struct domain *d)
+void viridian_time_ref_count_thaw(const struct domain *d)
{
- struct viridian_time_ref_count *trc;
-
- trc = &d->arch.hvm.viridian.time_ref_count;
+ struct viridian_time_ref_count *trc =
+ &d->arch.hvm.viridian->time_ref_count;
if ( !d->is_shutting_down &&
!test_and_set_bit(_TRC_running, &trc->flags) )
if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
return X86EMUL_EXCEPTION;
- d->arch.hvm.viridian.reference_tsc.raw = val;
+ d->arch.hvm.viridian->reference_tsc.raw = val;
dump_reference_tsc(d);
- if ( d->arch.hvm.viridian.reference_tsc.fields.enabled )
+ if ( d->arch.hvm.viridian->reference_tsc.fields.enabled )
update_reference_tsc(d, true);
break;
if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
return X86EMUL_EXCEPTION;
- *val = d->arch.hvm.viridian.reference_tsc.raw;
+ *val = d->arch.hvm.viridian->reference_tsc.raw;
break;
case HV_X64_MSR_TIME_REF_COUNT:
{
struct viridian_time_ref_count *trc =
- &d->arch.hvm.viridian.time_ref_count;
+ &d->arch.hvm.viridian->time_ref_count;
if ( !(viridian_feature_mask(d) & HVMPV_time_ref_count) )
return X86EMUL_EXCEPTION;
void viridian_time_save_domain_ctxt(
const struct domain *d, struct hvm_viridian_domain_context *ctxt)
{
- ctxt->time_ref_count = d->arch.hvm.viridian.time_ref_count.val;
- ctxt->reference_tsc = d->arch.hvm.viridian.reference_tsc.raw;
+ ctxt->time_ref_count = d->arch.hvm.viridian->time_ref_count.val;
+ ctxt->reference_tsc = d->arch.hvm.viridian->reference_tsc.raw;
}
void viridian_time_load_domain_ctxt(
struct domain *d, const struct hvm_viridian_domain_context *ctxt)
{
- d->arch.hvm.viridian.time_ref_count.val = ctxt->time_ref_count;
- d->arch.hvm.viridian.reference_tsc.raw = ctxt->reference_tsc;
+ d->arch.hvm.viridian->time_ref_count.val = ctxt->time_ref_count;
+ d->arch.hvm.viridian->reference_tsc.raw = ctxt->reference_tsc;
- if ( d->arch.hvm.viridian.reference_tsc.fields.enabled )
+ if ( d->arch.hvm.viridian->reference_tsc.fields.enabled )
update_reference_tsc(d, false);
}
* Hypervisor information, but only if the guest has set its
* own version number.
*/
- if ( d->arch.hvm.viridian.guest_os_id.raw == 0 )
+ if ( d->arch.hvm.viridian->guest_os_id.raw == 0 )
break;
res->a = viridian_build;
res->b = ((uint32_t)viridian_major << 16) | viridian_minor;
case 4:
/* Recommended hypercall usage. */
- if ( (d->arch.hvm.viridian.guest_os_id.raw == 0) ||
- (d->arch.hvm.viridian.guest_os_id.fields.os < 4) )
+ if ( (d->arch.hvm.viridian->guest_os_id.raw == 0) ||
+ (d->arch.hvm.viridian->guest_os_id.fields.os < 4) )
break;
res->a = CPUID4A_RELAX_TIMER_INT;
if ( viridian_feature_mask(d) & HVMPV_hcall_remote_tlb_flush )
{
const union viridian_guest_os_id_msr *goi;
- goi = &d->arch.hvm.viridian.guest_os_id;
+ goi = &d->arch.hvm.viridian->guest_os_id;
printk(XENLOG_G_INFO
"d%d: VIRIDIAN GUEST_OS_ID: vendor: %x os: %x major: %x minor: %x sp: %x build: %x\n",
{
const union viridian_page_msr *hg;
- hg = &d->arch.hvm.viridian.hypercall_gpa;
+ hg = &d->arch.hvm.viridian->hypercall_gpa;
printk(XENLOG_G_INFO "d%d: VIRIDIAN HYPERCALL: enabled: %x pfn: %lx\n",
d->domain_id,
static void enable_hypercall_page(struct domain *d)
{
- unsigned long gmfn = d->arch.hvm.viridian.hypercall_gpa.fields.pfn;
+ unsigned long gmfn = d->arch.hvm.viridian->hypercall_gpa.fields.pfn;
struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
uint8_t *p;
switch ( idx )
{
case HV_X64_MSR_GUEST_OS_ID:
- d->arch.hvm.viridian.guest_os_id.raw = val;
+ d->arch.hvm.viridian->guest_os_id.raw = val;
dump_guest_os_id(d);
break;
case HV_X64_MSR_HYPERCALL:
- d->arch.hvm.viridian.hypercall_gpa.raw = val;
+ d->arch.hvm.viridian->hypercall_gpa.raw = val;
dump_hypercall(d);
- if ( d->arch.hvm.viridian.hypercall_gpa.fields.enabled )
+ if ( d->arch.hvm.viridian->hypercall_gpa.fields.enabled )
enable_hypercall_page(d);
break;
case HV_X64_MSR_CRASH_P3:
case HV_X64_MSR_CRASH_P4:
BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
- ARRAY_SIZE(v->arch.hvm.viridian.crash_param));
+ ARRAY_SIZE(v->arch.hvm.viridian->crash_param));
idx -= HV_X64_MSR_CRASH_P0;
- v->arch.hvm.viridian.crash_param[idx] = val;
+ v->arch.hvm.viridian->crash_param[idx] = val;
break;
case HV_X64_MSR_CRASH_CTL:
spin_unlock(&d->shutdown_lock);
gprintk(XENLOG_WARNING, "VIRIDIAN CRASH: %lx %lx %lx %lx %lx\n",
- v->arch.hvm.viridian.crash_param[0],
- v->arch.hvm.viridian.crash_param[1],
- v->arch.hvm.viridian.crash_param[2],
- v->arch.hvm.viridian.crash_param[3],
- v->arch.hvm.viridian.crash_param[4]);
+ v->arch.hvm.viridian->crash_param[0],
+ v->arch.hvm.viridian->crash_param[1],
+ v->arch.hvm.viridian->crash_param[2],
+ v->arch.hvm.viridian->crash_param[3],
+ v->arch.hvm.viridian->crash_param[4]);
break;
}
switch ( idx )
{
case HV_X64_MSR_GUEST_OS_ID:
- *val = d->arch.hvm.viridian.guest_os_id.raw;
+ *val = d->arch.hvm.viridian->guest_os_id.raw;
break;
case HV_X64_MSR_HYPERCALL:
- *val = d->arch.hvm.viridian.hypercall_gpa.raw;
+ *val = d->arch.hvm.viridian->hypercall_gpa.raw;
break;
case HV_X64_MSR_VP_INDEX:
case HV_X64_MSR_CRASH_P3:
case HV_X64_MSR_CRASH_P4:
BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
- ARRAY_SIZE(v->arch.hvm.viridian.crash_param));
+ ARRAY_SIZE(v->arch.hvm.viridian->crash_param));
idx -= HV_X64_MSR_CRASH_P0;
- *val = v->arch.hvm.viridian.crash_param[idx];
+ *val = v->arch.hvm.viridian->crash_param[idx];
break;
case HV_X64_MSR_CRASH_CTL:
int viridian_vcpu_init(struct vcpu *v)
{
+ ASSERT(!v->arch.hvm.viridian);
+ v->arch.hvm.viridian = xzalloc(struct viridian_vcpu);
+ if ( !v->arch.hvm.viridian )
+ return -ENOMEM;
+
return 0;
}
int viridian_domain_init(struct domain *d)
{
+ ASSERT(!d->arch.hvm.viridian);
+ d->arch.hvm.viridian = xzalloc(struct viridian_domain);
+ if ( !d->arch.hvm.viridian )
+ return -ENOMEM;
+
return 0;
}
void viridian_vcpu_deinit(struct vcpu *v)
{
- viridian_synic_wrmsr(v, HV_X64_MSR_VP_ASSIST_PAGE, 0);
+ if ( !v->arch.hvm.viridian )
+ return;
+
+ if ( is_viridian_vcpu(v) )
+ viridian_synic_wrmsr(v, HV_X64_MSR_VP_ASSIST_PAGE, 0);
+
+ XFREE(v->arch.hvm.viridian);
}
void viridian_domain_deinit(struct domain *d)
for_each_vcpu ( d, v )
viridian_vcpu_deinit(v);
+
+ if ( !d->arch.hvm.viridian )
+ return;
+
+ XFREE(d->arch.hvm.viridian);
}
/*
v, name, (unsigned long)vp->msr.fields.pfn);
}
-void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp)
+void viridian_map_guest_page(const struct vcpu *v, struct viridian_page *vp)
{
struct domain *d = v->domain;
unsigned long gmfn = vp->msr.fields.pfn;
{
const struct domain *d = v->domain;
struct hvm_viridian_domain_context ctxt = {
- .hypercall_gpa = d->arch.hvm.viridian.hypercall_gpa.raw,
- .guest_os_id = d->arch.hvm.viridian.guest_os_id.raw,
+ .hypercall_gpa = d->arch.hvm.viridian->hypercall_gpa.raw,
+ .guest_os_id = d->arch.hvm.viridian->guest_os_id.raw,
};
if ( !is_viridian_domain(d) )
if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
return -EINVAL;
- d->arch.hvm.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa;
- d->arch.hvm.viridian.guest_os_id.raw = ctxt.guest_os_id;
+ d->arch.hvm.viridian->hypercall_gpa.raw = ctxt.hypercall_gpa;
+ d->arch.hvm.viridian->guest_os_id.raw = ctxt.guest_os_id;
viridian_time_load_domain_ctxt(d, &ctxt);
{
struct hvm_viridian_vcpu_context ctxt = {};
- if ( !is_viridian_domain(v->domain) )
+ if ( !is_viridian_vcpu(v) )
return 0;
viridian_synic_save_vcpu_ctxt(v, &ctxt);
/* hypervisor intercepted msix table */
struct list_head msixtbl_list;
- struct viridian_domain viridian;
+ struct viridian_domain *viridian;
bool_t hap_enabled;
bool_t mem_sharing_enabled;
#define is_viridian_domain(d) \
(is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
+#define is_viridian_vcpu(v) \
+ is_viridian_domain((v)->domain)
+
#define has_viridian_time_ref_count(d) \
(is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_time_ref_count))
}
#define is_viridian_domain(d) ((void)(d), false)
+#define is_viridian_vcpu(v) ((void)(v), false)
#define has_viridian_time_ref_count(d) ((void)(d), false)
#define hvm_long_mode_active(v) ((void)(v), false)
#define hvm_get_guest_time(v) ((void)(v), 0)
/* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
struct x86_event inject_event;
- struct viridian_vcpu viridian;
+ struct viridian_vcpu *viridian;
};
#endif /* __ASM_X86_HVM_VCPU_H__ */
int
viridian_hypercall(struct cpu_user_regs *regs);
-void viridian_time_ref_count_freeze(struct domain *d);
-void viridian_time_ref_count_thaw(struct domain *d);
+void viridian_time_ref_count_freeze(const struct domain *d);
+void viridian_time_ref_count_thaw(const struct domain *d);
int viridian_vcpu_init(struct vcpu *v);
int viridian_domain_init(struct domain *d);
void viridian_vcpu_deinit(struct vcpu *v);
void viridian_domain_deinit(struct domain *d);
-void viridian_apic_assist_set(struct vcpu *v);
-bool viridian_apic_assist_completed(struct vcpu *v);
-void viridian_apic_assist_clear(struct vcpu *v);
+void viridian_apic_assist_set(const struct vcpu *v);
+bool viridian_apic_assist_completed(const struct vcpu *v);
+void viridian_apic_assist_clear(const struct vcpu *v);
#endif /* __ASM_X86_HVM_VIRIDIAN_H__ */