break;
case EXIT_REASON_VMXOFF:
- if ( nvmx_handle_vmxoff(regs) == X86EMUL_OKAY )
- update_guest_eip();
- break;
-
case EXIT_REASON_VMXON:
- if ( nvmx_handle_vmxon(regs) == X86EMUL_OKAY )
- update_guest_eip();
- break;
-
case EXIT_REASON_VMCLEAR:
- if ( nvmx_handle_vmclear(regs) == X86EMUL_OKAY )
- update_guest_eip();
- break;
-
case EXIT_REASON_VMPTRLD:
- if ( nvmx_handle_vmptrld(regs) == X86EMUL_OKAY )
- update_guest_eip();
- break;
-
case EXIT_REASON_VMPTRST:
- if ( nvmx_handle_vmptrst(regs) == X86EMUL_OKAY )
- update_guest_eip();
- break;
-
case EXIT_REASON_VMREAD:
- if ( nvmx_handle_vmread(regs) == X86EMUL_OKAY )
- update_guest_eip();
- break;
-
case EXIT_REASON_VMWRITE:
- if ( nvmx_handle_vmwrite(regs) == X86EMUL_OKAY )
- update_guest_eip();
- break;
-
case EXIT_REASON_VMLAUNCH:
- if ( nvmx_handle_vmlaunch(regs) == X86EMUL_OKAY )
- update_guest_eip();
- break;
-
case EXIT_REASON_VMRESUME:
- if ( nvmx_handle_vmresume(regs) == X86EMUL_OKAY )
- update_guest_eip();
- break;
-
case EXIT_REASON_INVEPT:
- if ( nvmx_handle_invept(regs) == X86EMUL_OKAY )
- update_guest_eip();
- break;
-
case EXIT_REASON_INVVPID:
- if ( nvmx_handle_invvpid(regs) == X86EMUL_OKAY )
+ if ( nvmx_handle_vmx_insn(regs, exit_reason) == X86EMUL_OKAY )
update_guest_eip();
break;
* VMX instructions handling
*/
-int nvmx_handle_vmxon(struct cpu_user_regs *regs)
+static int nvmx_handle_vmxon(struct cpu_user_regs *regs)
{
struct vcpu *v=current;
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
return X86EMUL_OKAY;
}
-int nvmx_handle_vmxoff(struct cpu_user_regs *regs)
+static int nvmx_handle_vmxoff(struct cpu_user_regs *regs)
{
struct vcpu *v=current;
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
return X86EMUL_OKAY;
}
-int nvmx_handle_vmresume(struct cpu_user_regs *regs)
+static int nvmx_handle_vmresume(struct cpu_user_regs *regs)
{
bool_t launched;
struct vcpu *v = current;
return nvmx_vmresume(v,regs);
}
-int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
+static int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
{
bool_t launched;
struct vcpu *v = current;
return rc;
}
-int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
+static int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
struct vmx_inst_decoded decode;
return X86EMUL_OKAY;
}
-int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
+static int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
struct vmx_inst_decoded decode;
return X86EMUL_OKAY;
}
-int nvmx_handle_vmclear(struct cpu_user_regs *regs)
+static int nvmx_handle_vmclear(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
struct vmx_inst_decoded decode;
return X86EMUL_OKAY;
}
-int nvmx_handle_vmread(struct cpu_user_regs *regs)
+static int nvmx_handle_vmread(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
struct vmx_inst_decoded decode;
return X86EMUL_OKAY;
}
-int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
+static int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
struct vmx_inst_decoded decode;
return X86EMUL_OKAY;
}
-int nvmx_handle_invept(struct cpu_user_regs *regs)
+static int nvmx_handle_invept(struct cpu_user_regs *regs)
{
struct vmx_inst_decoded decode;
unsigned long eptp;
return X86EMUL_OKAY;
}
-int nvmx_handle_invvpid(struct cpu_user_regs *regs)
+static int nvmx_handle_invvpid(struct cpu_user_regs *regs)
{
struct vmx_inst_decoded decode;
unsigned long vpid;
return X86EMUL_OKAY;
}
+int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason)
+{
+ struct vcpu *curr = current;
+ int ret;
+
+ if ( !(curr->arch.hvm_vcpu.guest_cr[4] & X86_CR4_VMXE) ||
+ !nestedhvm_enabled(curr->domain) ||
+ (vmx_guest_x86_mode(curr) < (hvm_long_mode_active(curr) ? 8 : 2)) )
+ {
+ hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
+ return X86EMUL_EXCEPTION;
+ }
+
+ if ( vmx_get_cpl() > 0 )
+ {
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ return X86EMUL_EXCEPTION;
+ }
+
+ switch ( exit_reason )
+ {
+ case EXIT_REASON_VMXOFF:
+ ret = nvmx_handle_vmxoff(regs);
+ break;
+
+ case EXIT_REASON_VMXON:
+ ret = nvmx_handle_vmxon(regs);
+ break;
+
+ case EXIT_REASON_VMCLEAR:
+ ret = nvmx_handle_vmclear(regs);
+ break;
+
+ case EXIT_REASON_VMPTRLD:
+ ret = nvmx_handle_vmptrld(regs);
+ break;
+
+ case EXIT_REASON_VMPTRST:
+ ret = nvmx_handle_vmptrst(regs);
+ break;
+
+ case EXIT_REASON_VMREAD:
+ ret = nvmx_handle_vmread(regs);
+ break;
+
+ case EXIT_REASON_VMWRITE:
+ ret = nvmx_handle_vmwrite(regs);
+ break;
+
+ case EXIT_REASON_VMLAUNCH:
+ ret = nvmx_handle_vmlaunch(regs);
+ break;
+
+ case EXIT_REASON_VMRESUME:
+ ret = nvmx_handle_vmresume(regs);
+ break;
+
+ case EXIT_REASON_INVEPT:
+ ret = nvmx_handle_invept(regs);
+ break;
+
+ case EXIT_REASON_INVVPID:
+ ret = nvmx_handle_invvpid(regs);
+ break;
+
+ default:
+ ASSERT_UNREACHABLE();
+ domain_crash(curr->domain);
+ ret = X86EMUL_UNHANDLEABLE;
+ break;
+ }
+
+ return ret;
+}
+
#define __emul_value(enable1, default1) \
((enable1 | default1) << 32 | (default1))
bool_t nvmx_ept_enabled(struct vcpu *v);
-int nvmx_handle_vmxon(struct cpu_user_regs *regs);
-int nvmx_handle_vmxoff(struct cpu_user_regs *regs);
-
#define EPT_TRANSLATE_SUCCEED 0
#define EPT_TRANSLATE_VIOLATION 1
#define EPT_TRANSLATE_MISCONFIG 2
uint64_t get_shadow_eptp(struct vcpu *v);
void nvmx_destroy_vmcs(struct vcpu *v);
-int nvmx_handle_vmptrld(struct cpu_user_regs *regs);
-int nvmx_handle_vmptrst(struct cpu_user_regs *regs);
-int nvmx_handle_vmclear(struct cpu_user_regs *regs);
-int nvmx_handle_vmread(struct cpu_user_regs *regs);
-int nvmx_handle_vmwrite(struct cpu_user_regs *regs);
-int nvmx_handle_vmresume(struct cpu_user_regs *regs);
-int nvmx_handle_vmlaunch(struct cpu_user_regs *regs);
-int nvmx_handle_invept(struct cpu_user_regs *regs);
-int nvmx_handle_invvpid(struct cpu_user_regs *regs);
+int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason);
int nvmx_msr_read_intercept(unsigned int msr,
u64 *msr_content);