if ( has_hvm_container_vcpu(v) )
{
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
- if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
+ if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
goto out_err;
- if ( vmx_add_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
+ if ( vmx_add_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
goto out_err;
- vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, 0);
}
core2_vpmu_cxt = xzalloc_bytes(sizeof(*core2_vpmu_cxt) +
return -EINVAL;
if ( has_hvm_container_vcpu(v) )
- vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
+ vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
&core2_vpmu_cxt->global_ctrl);
else
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
return -EINVAL;
if ( has_hvm_container_vcpu(v) )
- vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
+ vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
&core2_vpmu_cxt->global_ctrl);
else
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
else
{
if ( has_hvm_container_vcpu(v) )
- vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+ vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
else
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
}
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
if ( has_hvm_container_vcpu(v) )
- vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+ vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
else
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
break;
return start;
}
-struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type)
+struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, uint32_t msr,
+ enum vmx_msr_list_type type)
{
- struct vcpu *curr = current;
- struct arch_vmx_struct *vmx = &curr->arch.hvm_vmx;
+ const struct arch_vmx_struct *vmx = &v->arch.hvm_vmx;
struct vmx_msr_entry *start = NULL, *ent, *end;
unsigned int total;
+ ASSERT(v == current || !vcpu_runnable(v));
+
switch ( type )
{
case VMX_MSR_HOST:
return ((ent < end) && (ent->index == msr)) ? ent : NULL;
}
-int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type)
+int vmx_add_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type)
{
- struct vcpu *curr = current;
- struct arch_vmx_struct *vmx = &curr->arch.hvm_vmx;
+ struct arch_vmx_struct *vmx = &v->arch.hvm_vmx;
struct vmx_msr_entry **ptr, *start = NULL, *ent, *end;
unsigned int total;
+ int rc;
+
+ ASSERT(v == current || !vcpu_runnable(v));
switch ( type )
{
return -EINVAL;
}
+ vmx_vmcs_enter(v);
+
/* Allocate memory on first use. */
if ( unlikely(!*ptr) )
{
paddr_t addr;
if ( (*ptr = alloc_xenheap_page()) == NULL )
- return -ENOMEM;
+ {
+ rc = -ENOMEM;
+ goto out;
+ }
addr = virt_to_maddr(*ptr);
ent = locate_msr_entry(start, end, msr);
if ( (ent < end) && (ent->index == msr) )
- return 0;
+ {
+ rc = 0;
+ goto out;
+ }
if ( total == (PAGE_SIZE / sizeof(*ent)) )
- return -ENOSPC;
+ {
+ rc = -ENOSPC;
+ goto out;
+ }
memmove(ent + 1, ent, sizeof(*ent) * (end - ent));
break;
}
- return 0;
+ rc = 0;
+
+ out:
+ vmx_vmcs_exit(v);
+
+ return rc;
}
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector)
static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
{
+ struct vcpu *curr = current;
+
HVM_DBG_LOG(DBG_LEVEL_MSR, "ecx=%#x", msr);
switch ( msr )
goto done;
}
- if ( vmx_read_guest_msr(msr, msr_content) == 0 )
+ if ( vmx_read_guest_msr(curr, msr, msr_content) == 0 )
break;
if ( is_last_branch_msr(msr) )
for ( ; (rc == 0) && lbr->count; lbr++ )
for ( i = 0; (rc == 0) && (i < lbr->count); i++ )
- if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 )
+ if ( (rc = vmx_add_guest_msr(v, lbr->base + i)) == 0 )
vmx_disable_intercept_for_msr(v, lbr->base + i, MSR_TYPE_R | MSR_TYPE_W);
}
if ( (rc < 0) ||
- (msr_content && (vmx_add_host_load_msr(msr) < 0)) )
+ (msr_content && (vmx_add_host_load_msr(v, msr) < 0)) )
hvm_inject_hw_exception(TRAP_machine_check, HVM_DELIVER_NO_ERROR_CODE);
else
__vmwrite(GUEST_IA32_DEBUGCTL, msr_content);
switch ( long_mode_do_msr_write(msr, msr_content) )
{
case HNDL_unhandled:
- if ( (vmx_write_guest_msr(msr, msr_content) != 0) &&
+ if ( (vmx_write_guest_msr(v, msr, msr_content) != 0) &&
!is_last_branch_msr(msr) )
switch ( wrmsr_hypervisor_regs(msr, msr_content) )
{
*
* Atomically reads the value of @v.
*/
-static inline int atomic_read(atomic_t *v)
+static inline int atomic_read(const atomic_t *v)
{
return read_atomic(&v->counter);
}
unsigned long cstar;
unsigned long *msr_bitmap;
- unsigned int msr_count;
+
+ /*
+ * Most accesses to the MSR host/guest load/save lists are in current
+ * context. However, the data can be modified by toolstack/migration
+ * actions. Remote access is only permitted for paused vcpus, and is
+ * protected under the domctl lock.
+ */
struct vmx_msr_entry *msr_area;
- unsigned int host_msr_count;
struct vmx_msr_entry *host_msr_area;
+ unsigned int msr_count;
+ unsigned int host_msr_count;
unsigned long eoi_exitmap_changed;
DECLARE_BITMAP(eoi_exit_bitmap, NR_VECTORS);
VMX_MSR_GUEST, /* MSRs saved on VMExit, loaded on VMEntry. */
};
-int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type);
+int vmx_add_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type);
-static inline int vmx_add_host_load_msr(uint32_t msr)
+static inline int vmx_add_guest_msr(struct vcpu *v, uint32_t msr)
{
- return vmx_add_msr(msr, VMX_MSR_HOST);
+ return vmx_add_msr(v, msr, VMX_MSR_GUEST);
}
-static inline int vmx_add_guest_msr(uint32_t msr)
+static inline int vmx_add_host_load_msr(struct vcpu *v, uint32_t msr)
{
- return vmx_add_msr(msr, VMX_MSR_GUEST);
+ return vmx_add_msr(v, msr, VMX_MSR_HOST);
}
-struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type);
+struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, uint32_t msr,
+ enum vmx_msr_list_type type);
-static inline int vmx_read_guest_msr(uint32_t msr, uint64_t *val)
+static inline int vmx_read_guest_msr(const struct vcpu *v, uint32_t msr,
+ uint64_t *val)
{
- const struct vmx_msr_entry *ent = vmx_find_msr(msr, VMX_MSR_GUEST);
+ const struct vmx_msr_entry *ent = vmx_find_msr(v, msr, VMX_MSR_GUEST);
if ( !ent )
return -ESRCH;
return 0;
}
-static inline int vmx_write_guest_msr(uint32_t msr, uint64_t val)
+static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr,
+ uint64_t val)
{
- struct vmx_msr_entry *ent = vmx_find_msr(msr, VMX_MSR_GUEST);
+ struct vmx_msr_entry *ent = vmx_find_msr(v, msr, VMX_MSR_GUEST);
if ( !ent )
return -ESRCH;
#define _VPF_in_reset 7
#define VPF_in_reset (1UL<<_VPF_in_reset)
-static inline int vcpu_runnable(struct vcpu *v)
+static inline int vcpu_runnable(const struct vcpu *v)
{
return !(v->pause_flags |
atomic_read(&v->pause_count) |