{
const struct arch_vmx_struct *vmx = &v->arch.hvm_vmx;
struct vmx_msr_entry *start = NULL, *ent, *end;
- unsigned int total;
+ unsigned int substart, subend, total;
ASSERT(v == current || !vcpu_runnable(v));
{
case VMX_MSR_HOST:
start = vmx->host_msr_area;
- total = vmx->host_msr_count;
+ substart = 0;
+ subend = vmx->host_msr_count;
+ total = subend;
break;
case VMX_MSR_GUEST:
start = vmx->msr_area;
- total = vmx->msr_count;
+ substart = 0;
+ subend = vmx->msr_save_count;
+ total = vmx->msr_load_count;
+ break;
+
+ case VMX_MSR_GUEST_LOADONLY:
+ start = vmx->msr_area;
+ substart = vmx->msr_save_count;
+ subend = vmx->msr_load_count;
+ total = subend;
break;
default:
return NULL;
end = start + total;
- ent = locate_msr_entry(start, end, msr);
+ ent = locate_msr_entry(start + substart, start + subend, msr);
return ((ent < end) && (ent->index == msr)) ? ent : NULL;
}
{
struct arch_vmx_struct *vmx = &v->arch.hvm_vmx;
struct vmx_msr_entry **ptr, *start = NULL, *ent, *end;
- unsigned int total;
+ unsigned int substart, subend, total;
int rc;
ASSERT(v == current || !vcpu_runnable(v));
{
case VMX_MSR_HOST:
ptr = &vmx->host_msr_area;
- total = vmx->host_msr_count;
+ substart = 0;
+ subend = vmx->host_msr_count;
+ total = subend;
break;
case VMX_MSR_GUEST:
ptr = &vmx->msr_area;
- total = vmx->msr_count;
+ substart = 0;
+ subend = vmx->msr_save_count;
+ total = vmx->msr_load_count;
+ break;
+
+ case VMX_MSR_GUEST_LOADONLY:
+ ptr = &vmx->msr_area;
+ substart = vmx->msr_save_count;
+ subend = vmx->msr_load_count;
+ total = subend;
break;
default:
break;
case VMX_MSR_GUEST:
+ case VMX_MSR_GUEST_LOADONLY:
__vmwrite(VM_EXIT_MSR_STORE_ADDR, addr);
__vmwrite(VM_ENTRY_MSR_LOAD_ADDR, addr);
break;
start = *ptr;
end = start + total;
- ent = locate_msr_entry(start, end, msr);
+ ent = locate_msr_entry(start + substart, start + subend, msr);
if ( (ent < end) && (ent->index == msr) )
{
break;
case VMX_MSR_GUEST:
+ __vmwrite(VM_EXIT_MSR_STORE_COUNT, ++vmx->msr_save_count);
+
+ /* Fallthrough */
+ case VMX_MSR_GUEST_LOADONLY:
ent->data = 0;
- __vmwrite(VM_EXIT_MSR_STORE_COUNT, ++vmx->msr_count);
- __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_count);
+ __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, ++vmx->msr_load_count);
break;
}
*/
struct vmx_msr_entry *msr_area;
struct vmx_msr_entry *host_msr_area;
- unsigned int msr_count;
+ unsigned int msr_load_count;
+ unsigned int msr_save_count;
unsigned int host_msr_count;
unsigned long eoi_exitmap_changed;
enum vmx_msr_list_type {
VMX_MSR_HOST, /* MSRs loaded on VMExit. */
VMX_MSR_GUEST, /* MSRs saved on VMExit, loaded on VMEntry. */
+ VMX_MSR_GUEST_LOADONLY, /* MSRs loaded on VMEntry only. */
};
+/**
+ * Add an MSR to an MSR list. No-op if the MSR already exists.
+ *
+ * It is undefined behaviour to try and insert the same MSR into both the
+ * GUEST and GUEST_LOADONLY list.
+ *
+ * May fail if unable to allocate memory for the list, or the total number of
+ * entries exceeds the memory allocated.
+ */
int vmx_add_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type);
static inline int vmx_add_guest_msr(struct vcpu *v, uint32_t msr)