if ( is_hvm_domain(d) )
{
struct segment_register sreg;
+ unsigned long gs_shadow;
c.nat->ctrlreg[0] = v->arch.hvm.guest_cr[0];
c.nat->ctrlreg[2] = v->arch.hvm.guest_cr[2];
c.nat->fs_base = sreg.base;
hvm_get_segment_register(v, x86_seg_gs, &sreg);
c.nat->user_regs.gs = sreg.sel;
+
+ gs_shadow = hvm_get_reg(v, MSR_SHADOW_GS_BASE);
+
if ( ring_0(&c.nat->user_regs) )
{
c.nat->gs_base_kernel = sreg.base;
- c.nat->gs_base_user = hvm_get_shadow_gs_base(v);
+ c.nat->gs_base_user = gs_shadow;
}
else
{
c.nat->gs_base_user = sreg.base;
- c.nat->gs_base_kernel = hvm_get_shadow_gs_base(v);
+ c.nat->gs_base_kernel = gs_shadow;
}
}
else
}
}
-static unsigned long cf_check svm_get_shadow_gs_base(struct vcpu *v)
-{
- return v->arch.hvm.svm.vmcb->kerngsbase;
-}
-
static int cf_check svm_set_guest_pat(struct vcpu *v, u64 gpat)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
static uint64_t cf_check svm_get_reg(struct vcpu *v, unsigned int reg)
{
+ struct vcpu *curr = current;
const struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
struct domain *d = v->domain;
case MSR_SPEC_CTRL:
return vmcb->spec_ctrl;
+ case MSR_SHADOW_GS_BASE:
+ if ( v == curr )
+ svm_sync_vmcb(v, vmcb_in_sync);
+ return vmcb->kerngsbase;
+
default:
printk(XENLOG_G_ERR "%s(%pv, 0x%08x) Bad register\n",
__func__, v, reg);
.get_cpl = svm_get_cpl,
.get_segment_register = svm_get_segment_register,
.set_segment_register = svm_set_segment_register,
- .get_shadow_gs_base = svm_get_shadow_gs_base,
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
.cpuid_policy_changed = svm_cpuid_policy_changed,
vmx_vmcs_exit(v);
}
-static unsigned long cf_check vmx_get_shadow_gs_base(struct vcpu *v)
-{
- return v->arch.hvm.vmx.shadow_gs;
-}
-
static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 gpat)
{
if ( !paging_mode_hap(v->domain) ||
static uint64_t cf_check vmx_get_reg(struct vcpu *v, unsigned int reg)
{
+ const struct vcpu *curr = current;
struct domain *d = v->domain;
uint64_t val = 0;
int rc;
domain_crash(d);
}
return val;
+
+ case MSR_SHADOW_GS_BASE:
+ if ( v != curr )
+ return v->arch.hvm.vmx.shadow_gs;
+ rdmsrl(MSR_SHADOW_GS_BASE, val);
+ return val;
}
/* Logic which maybe requires remote VMCS acquisition. */
.get_cpl = _vmx_get_cpl,
.get_segment_register = vmx_get_segment_register,
.set_segment_register = vmx_set_segment_register,
- .get_shadow_gs_base = vmx_get_shadow_gs_base,
.update_host_cr3 = vmx_update_host_cr3,
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
struct segment_register *reg);
void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg);
- unsigned long (*get_shadow_gs_base)(struct vcpu *v);
/*
* Re-set the value of CR3 that Xen runs on when handling VM exits.
return alternative_call(hvm_funcs.get_cpl, v);
}
-static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
-{
- return alternative_call(hvm_funcs.get_shadow_gs_base, v);
-}
-
#define has_hvm_params(d) \
((d)->arch.hvm.params != NULL)
* needed because DCE will kick in.
*/
int hvm_guest_x86_mode(struct vcpu *v);
-unsigned long hvm_get_shadow_gs_base(struct vcpu *v);
void hvm_cpuid_policy_changed(struct vcpu *v);
void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc);
hvm_get_segment_register(v, x86_seg_ss, &sreg);
regs->ss = sreg.sel;
- crs[7] = hvm_get_shadow_gs_base(v);
+ crs[7] = hvm_get_reg(v, MSR_SHADOW_GS_BASE);
}
static void _show_registers(