struct nestedvcpu *nv = &vcpu_nestedhvm(v);
if (nv->nv_vvmcx != NULL && nv->nv_vvmcxaddr != vmcbaddr) {
- ASSERT(nv->nv_vvmcxaddr != VMCX_EADDR);
+ ASSERT(nv->nv_vvmcxaddr != INVALID_PADDR);
hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
nv->nv_vvmcx = NULL;
- nv->nv_vvmcxaddr = VMCX_EADDR;
+ nv->nv_vvmcxaddr = INVALID_PADDR;
}
if ( !nv->nv_vvmcx )
if (nv->nv_n2vmcx) {
free_vmcb(nv->nv_n2vmcx);
nv->nv_n2vmcx = NULL;
- nv->nv_n2vmcx_pa = VMCX_EADDR;
+ nv->nv_n2vmcx_pa = INVALID_PADDR;
}
if (svm->ns_iomap)
svm->ns_iomap = NULL;
{
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- svm->ns_msr_hsavepa = VMCX_EADDR;
- svm->ns_ovvmcb_pa = VMCX_EADDR;
+ svm->ns_msr_hsavepa = INVALID_PADDR;
+ svm->ns_ovvmcb_pa = INVALID_PADDR;
svm->ns_tscratio = DEFAULT_TSC_RATIO;
/* Check if virtual VMCB cleanbits are valid */
vcleanbits_valid = 1;
- if (svm->ns_ovvmcb_pa == VMCX_EADDR)
+ if ( svm->ns_ovvmcb_pa == INVALID_PADDR )
vcleanbits_valid = 0;
if (svm->ns_ovvmcb_pa != nv->nv_vvmcxaddr)
vcleanbits_valid = 0;
ns_vmcb = nv->nv_vvmcx;
ASSERT(ns_vmcb != NULL);
ASSERT(nv->nv_n2vmcx != NULL);
- ASSERT(nv->nv_n2vmcx_pa != VMCX_EADDR);
+ ASSERT(nv->nv_n2vmcx_pa != INVALID_PADDR);
/* Save values for later use. Needed for Nested-on-Nested and
* Shadow-on-Shadow paging.
ASSERT(v->arch.hvm_svm.vmcb != NULL);
ASSERT(nv->nv_n1vmcx != NULL);
ASSERT(nv->nv_n2vmcx != NULL);
- ASSERT(nv->nv_n1vmcx_pa != VMCX_EADDR);
- ASSERT(nv->nv_n2vmcx_pa != VMCX_EADDR);
+ ASSERT(nv->nv_n1vmcx_pa != INVALID_PADDR);
+ ASSERT(nv->nv_n2vmcx_pa != INVALID_PADDR);
if (nv->nv_vmexit_pending) {
vmexit:
nvmx->guest_vpid = 0;
nvmx->vmxon_region_pa = INVALID_PADDR;
nvcpu->nv_vvmcx = NULL;
- nvcpu->nv_vvmcxaddr = VMCX_EADDR;
+ nvcpu->nv_vvmcxaddr = INVALID_PADDR;
nvmx->intr.intr_info = 0;
nvmx->intr.error_code = 0;
nvmx->iobitmap[0] = NULL;
int i;
__clear_current_vvmcs(v);
- if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR )
+ if ( nvcpu->nv_vvmcxaddr != INVALID_PADDR )
hvm_unmap_guest_frame(nvcpu->nv_vvmcx, 1);
nvcpu->nv_vvmcx = NULL;
- nvcpu->nv_vvmcxaddr = VMCX_EADDR;
+ nvcpu->nv_vvmcxaddr = INVALID_PADDR;
v->arch.hvm_vmx.vmcs_shadow_maddr = 0;
for (i=0; i<2; i++) {
if ( nvmx->iobitmap[i] ) {
if ( nvmx_vcpu_in_vmx(v) )
{
vmreturn(regs,
- nvcpu->nv_vvmcxaddr != VMCX_EADDR ?
+ nvcpu->nv_vvmcxaddr != INVALID_PADDR ?
VMFAIL_VALID : VMFAIL_INVALID);
return X86EMUL_OKAY;
}
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
/* check VMCS is valid and IO BITMAP is set */
- if ( (nvcpu->nv_vvmcxaddr != VMCX_EADDR) &&
+ if ( (nvcpu->nv_vvmcxaddr != INVALID_PADDR) &&
((nvmx->iobitmap[0] && nvmx->iobitmap[1]) ||
!(__n2_exec_control(v) & CPU_BASED_ACTIVATE_IO_BITMAP) ) )
nvcpu->nv_vmentry_pending = 1;
if ( rc != X86EMUL_OKAY )
return rc;
- if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
+ if ( vcpu_nestedhvm(v).nv_vvmcxaddr == INVALID_PADDR )
{
vmreturn (regs, VMFAIL_INVALID);
return X86EMUL_OKAY;
if ( rc != X86EMUL_OKAY )
return rc;
- if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
+ if ( vcpu_nestedhvm(v).nv_vvmcxaddr == INVALID_PADDR )
{
vmreturn (regs, VMFAIL_INVALID);
return X86EMUL_OKAY;
if ( nvcpu->nv_vvmcxaddr != gpa )
nvmx_purge_vvmcs(v);
- if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
+ if ( nvcpu->nv_vvmcxaddr == INVALID_PADDR )
{
bool_t writable;
void *vvmcx = hvm_map_guest_frame_rw(paddr_to_pfn(gpa), 1, &writable);