shadow_cntrl = __get_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_CONTROLS);
shadow_cntrl &= ~(VM_EXIT_SAVE_DEBUG_CNTRLS
- | VM_EXIT_SAVE_GUEST_PAT
+ | VM_EXIT_LOAD_HOST_PAT
| VM_EXIT_SAVE_GUEST_EFER);
shadow_cntrl |= host_cntrl;
__vmwrite(VM_EXIT_CONTROLS, shadow_cntrl);
/* 64 BITS */
VMCS_LINK_POINTER,
GUEST_IA32_DEBUGCTL,
+ GUEST_PAT,
/* 32 BITS */
GUEST_ES_LIMIT,
GUEST_CS_LIMIT,
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
void *vvmcs = nvcpu->nv_vvmcx;
int i;
+ u32 control;
/* vvmcs.gstate to shadow vmcs.gstate */
for ( i = 0; i < ARRAY_SIZE(vmcs_gstate_field); i++ )
hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4));
hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3));
+ control = __get_vvmcs(vvmcs, VM_ENTRY_CONTROLS);
+ if ( control & VM_ENTRY_LOAD_GUEST_PAT )
+ hvm_set_guest_pat(v, __get_vvmcs(vvmcs, GUEST_PAT));
+
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
vvmcs_to_shadow(vvmcs, VM_ENTRY_INTR_INFO);
int i;
u64 r;
void *vvmcs = vcpu_nestedhvm(v).nv_vvmcx;
+ u32 control;
for ( i = 0; i < ARRAY_SIZE(vmcs_h2g_field); i++ )
{
hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4));
hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3));
+ control = __get_vvmcs(vvmcs, VM_EXIT_CONTROLS);
+ if ( control & VM_EXIT_LOAD_HOST_PAT )
+ hvm_set_guest_pat(v, __get_vvmcs(vvmcs, HOST_PAT));
+
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
__set_vvmcs(vvmcs, VM_ENTRY_INTR_INFO, 0);
tmp = 0x36dff;
data = VM_EXIT_ACK_INTR_ON_EXIT |
VM_EXIT_IA32E_MODE |
- VM_EXIT_SAVE_PREEMPT_TIMER;
+ VM_EXIT_SAVE_PREEMPT_TIMER |
+ VM_EXIT_SAVE_GUEST_PAT |
+ VM_EXIT_LOAD_HOST_PAT;
/* 0-settings */
data = ((data | tmp) << 32) | tmp;
break;
case MSR_IA32_VMX_ENTRY_CTLS:
/* bit 0-8, and 12 must be 1 (refer G5 of SDM) */
- data = 0x11ff;
- data = (data << 32) | data;
+ tmp = 0x11ff;
+ data = VM_ENTRY_LOAD_GUEST_PAT;
+ data = ((data | tmp) << 32) | tmp;
break;
case IA32_FEATURE_CONTROL_MSR: