This is more vestigial rementants of PVHv1.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
v->arch.hvm_vcpu.single_step = !v->arch.hvm_vcpu.single_step;
}
-int hvm_set_mode(struct vcpu *v, int mode)
-{
-
- switch ( mode )
- {
- case 4:
- v->arch.hvm_vcpu.guest_efer &= ~(EFER_LMA | EFER_LME);
- break;
- case 8:
- v->arch.hvm_vcpu.guest_efer |= (EFER_LMA | EFER_LME);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- hvm_update_guest_efer(v);
-
- if ( hvm_funcs.set_mode )
- return hvm_funcs.set_mode(v, mode);
-
- return 0;
-}
-
void hvm_domain_soft_reset(struct domain *d)
{
hvm_destroy_all_ioreq_servers(d);
return rc;
}
-static int vmx_set_mode(struct vcpu *v, int mode)
-{
- unsigned long attr;
-
- ASSERT((mode == 4) || (mode == 8));
-
- attr = (mode == 4) ? 0xc09b : 0xa09b;
-
- vmx_vmcs_enter(v);
- __vmwrite(GUEST_CS_AR_BYTES, attr);
- vmx_vmcs_exit(v);
-
- return 0;
-}
-
static bool vmx_get_pending_event(struct vcpu *v, struct x86_event *info)
{
unsigned long intr_info, error_code;
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
.enable_msr_interception = vmx_enable_msr_interception,
.is_singlestep_supported = vmx_is_singlestep_supported,
- .set_mode = vmx_set_mode,
.altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
.altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
.altp2m_vcpu_emulate_ve = vmx_vcpu_emulate_ve,
void (*enable_msr_interception)(struct domain *d, uint32_t msr);
bool_t (*is_singlestep_supported)(void);
- int (*set_mode)(struct vcpu *v, int mode);
/* Alternate p2m */
void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
u64 hvm_scale_tsc(const struct domain *d, u64 tsc);
u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz);
-int hvm_set_mode(struct vcpu *v, int mode);
void hvm_init_guest_time(struct domain *d);
void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
u64 hvm_get_guest_time_fixed(struct vcpu *v, u64 at_tsc);