return rc;
}
-int vmx_read_guest_msr(u32 msr, u64 *val)
+struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type)
{
struct vcpu *curr = current;
- unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
- const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+ unsigned int msr_count;
+ struct vmx_msr_entry *msr_area = NULL;
+ unsigned int i;
- for ( i = 0; i < msr_count; i++ )
+ switch ( type )
{
- if ( msr_area[i].index == msr )
- {
- *val = msr_area[i].data;
- return 0;
- }
- }
+ case VMX_MSR_HOST:
+ msr_count = curr->arch.hvm_vmx.host_msr_count;
+ msr_area = curr->arch.hvm_vmx.host_msr_area;
+ break;
- return -ESRCH;
-}
+ case VMX_MSR_GUEST:
+ msr_count = curr->arch.hvm_vmx.msr_count;
+ msr_area = curr->arch.hvm_vmx.msr_area;
+ break;
-int vmx_write_guest_msr(u32 msr, u64 val)
-{
- struct vcpu *curr = current;
- unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
- struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+ default:
+ ASSERT_UNREACHABLE();
+ }
+
+ if ( msr_area == NULL )
+ return NULL;
for ( i = 0; i < msr_count; i++ )
{
if ( msr_area[i].index == msr )
- {
- msr_area[i].data = val;
- return 0;
- }
+ return &msr_area[i];
}
- return -ESRCH;
+ return NULL;
}
-int vmx_add_msr(u32 msr, int type)
+int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type)
{
struct vcpu *curr = current;
unsigned int idx, *msr_count;
struct vmx_msr_entry **msr_area, *msr_area_elem;
- if ( type == VMX_GUEST_MSR )
- {
- msr_count = &curr->arch.hvm_vmx.msr_count;
- msr_area = &curr->arch.hvm_vmx.msr_area;
- }
- else
+ switch ( type )
{
- ASSERT(type == VMX_HOST_MSR);
+ case VMX_MSR_HOST:
msr_count = &curr->arch.hvm_vmx.host_msr_count;
msr_area = &curr->arch.hvm_vmx.host_msr_area;
+ break;
+
+ case VMX_MSR_GUEST:
+ msr_count = &curr->arch.hvm_vmx.msr_count;
+ msr_area = &curr->arch.hvm_vmx.msr_area;
+ break;
+
+ default:
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
}
if ( *msr_area == NULL )
if ( (*msr_area = alloc_xenheap_page()) == NULL )
return -ENOMEM;
- if ( type == VMX_GUEST_MSR )
+ switch ( type )
{
+ case VMX_MSR_HOST:
+ __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
+ break;
+
+ case VMX_MSR_GUEST:
__vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(*msr_area));
__vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
+ break;
}
- else
- __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
}
for ( idx = 0; idx < *msr_count; idx++ )
++*msr_count;
- if ( type == VMX_GUEST_MSR )
+ switch ( type )
{
+ case VMX_MSR_HOST:
+ rdmsrl(msr, msr_area_elem->data);
+ __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
+ break;
+
+ case VMX_MSR_GUEST:
msr_area_elem->data = 0;
__vmwrite(VM_EXIT_MSR_STORE_COUNT, *msr_count);
__vmwrite(VM_ENTRY_MSR_LOAD_COUNT, *msr_count);
- }
- else
- {
- rdmsrl(msr, msr_area_elem->data);
- __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
+ break;
}
return 0;
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
-#define VMX_GUEST_MSR 0
-#define VMX_HOST_MSR 1
-
/* VM Instruction error numbers. */
#define VMX_INSN_INVALID_CONTROL_STATE 7
#define VMX_INSN_INVALID_HOST_STATE 8
+/* MSR load/save list infrastructure. */
+enum vmx_msr_list_type {
+ VMX_MSR_HOST, /* MSRs loaded on VMExit. */
+ VMX_MSR_GUEST, /* MSRs saved on VMExit, loaded on VMEntry. */
+};
+
+int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type);
+
+static inline int vmx_add_host_load_msr(uint32_t msr)
+{
+ return vmx_add_msr(msr, VMX_MSR_HOST);
+}
+
+static inline int vmx_add_guest_msr(uint32_t msr)
+{
+ return vmx_add_msr(msr, VMX_MSR_GUEST);
+}
+
+struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type);
+
+static inline int vmx_read_guest_msr(uint32_t msr, uint64_t *val)
+{
+ const struct vmx_msr_entry *ent = vmx_find_msr(msr, VMX_MSR_GUEST);
+
+ if ( !ent )
+ return -ESRCH;
+
+ *val = ent->data;
+
+ return 0;
+}
+
+static inline int vmx_write_guest_msr(uint32_t msr, uint64_t val)
+{
+ struct vmx_msr_entry *ent = vmx_find_msr(msr, VMX_MSR_GUEST);
+
+ if ( !ent )
+ return -ESRCH;
+
+ ent->data = val;
+
+ return 0;
+}
+
void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
-int vmx_read_guest_msr(u32 msr, u64 *val);
-int vmx_write_guest_msr(u32 msr, u64 val);
-int vmx_add_msr(u32 msr, int type);
void vmx_vmcs_switch(paddr_t from, paddr_t to);
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
u64 virtual_vmcs_vmread(const struct vcpu *, u32 encoding);
void virtual_vmcs_vmwrite(const struct vcpu *, u32 encoding, u64 val);
-static inline int vmx_add_guest_msr(u32 msr)
-{
- return vmx_add_msr(msr, VMX_GUEST_MSR);
-}
-static inline int vmx_add_host_load_msr(u32 msr)
-{
- return vmx_add_msr(msr, VMX_HOST_MSR);
-}
-
DECLARE_PER_CPU(bool_t, vmxon);
bool_t vmx_vcpu_pml_enabled(const struct vcpu *v);