hvm_funcs.set_rdtsc_exiting(v, enable);
}
+void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
+{
+ if ( !hvm_funcs.get_guest_pat(v, guest_pat) )
+ *guest_pat = v->arch.hvm_vcpu.pat_cr;
+}
+
+int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat)
+{
+ int i;
+ uint8_t *value = (uint8_t *)&guest_pat;
+
+ for ( i = 0; i < 8; i++ )
+ if ( unlikely(!(value[i] == 0 || value[i] == 1 ||
+ value[i] == 4 || value[i] == 5 ||
+ value[i] == 6 || value[i] == 7)) ) {
+ HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid guest PAT: %"PRIx64"\n",
+ guest_pat);
+ return 0;
+ }
+
+ if ( !hvm_funcs.set_guest_pat(v, guest_pat) )
+ v->arch.hvm_vcpu.pat_cr = guest_pat;
+ return 1;
+}
+
void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
{
uint64_t tsc;
break;
case MSR_IA32_CR_PAT:
- *msr_content = v->arch.hvm_vcpu.pat_cr;
+ hvm_get_guest_pat(v, msr_content);
break;
case MSR_MTRRcap:
break;
case MSR_IA32_CR_PAT:
- if ( !pat_msr_set(&v->arch.hvm_vcpu.pat_cr, msr_content) )
+ if ( !hvm_set_guest_pat(v, msr_content) )
goto gp_fault;
break;
return pat_type_2_pte_flags(pat_entry_value);
}
-/* Helper funtions for seting mtrr/pat */
-bool_t pat_msr_set(uint64_t *pat, uint64_t msr_content)
-{
- uint8_t *value = (uint8_t*)&msr_content;
- int32_t i;
-
- if ( *pat != msr_content )
- {
- for ( i = 0; i < 8; i++ )
- if ( unlikely(!(value[i] == 0 || value[i] == 1 ||
- value[i] == 4 || value[i] == 5 ||
- value[i] == 6 || value[i] == 7)) )
- return 0;
-
- *pat = msr_content;
- }
-
- return 1;
-}
-
bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content)
{
uint8_t def_type = msr_content & 0xff;
{
mtrr_state = &v->arch.hvm_vcpu.mtrr;
- hw_mtrr.msr_pat_cr = v->arch.hvm_vcpu.pat_cr;
+ hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr);
hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type
| (mtrr_state->enabled << 10);
mtrr_state = &v->arch.hvm_vcpu.mtrr;
- pat_msr_set(&v->arch.hvm_vcpu.pat_cr, hw_mtrr.msr_pat_cr);
+ hvm_set_guest_pat(v, hw_mtrr.msr_pat_cr);
mtrr_state->mtrr_cap = hw_mtrr.msr_mtrr_cap;
svm_vmload(vmcb);
}
+static int svm_set_guest_pat(struct vcpu *v, u64 gpat)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ if ( !paging_mode_hap(v->domain) )
+ return 0;
+
+ vmcb_set_g_pat(vmcb, gpat);
+ return 1;
+}
+
+static int svm_get_guest_pat(struct vcpu *v, u64 *gpat)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ if ( !paging_mode_hap(v->domain) )
+ return 0;
+
+ *gpat = vmcb_get_g_pat(vmcb);
+ return 1;
+}
+
static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc,
uint64_t ratio)
{
.update_host_cr3 = svm_update_host_cr3,
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
+ .set_guest_pat = svm_set_guest_pat,
+ .get_guest_pat = svm_get_guest_pat,
.set_tsc_offset = svm_set_tsc_offset,
.inject_exception = svm_inject_exception,
.init_hypercall_page = svm_init_hypercall_page,
vmx_vmcs_exit(v);
}
+static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
+{
+ if ( !cpu_has_vmx_pat || !paging_mode_hap(v->domain) )
+ return 0;
+
+ vmx_vmcs_enter(v);
+ __vmwrite(GUEST_PAT, gpat);
+#ifdef __i386__
+ __vmwrite(GUEST_PAT_HIGH, gpat >> 32);
+#endif
+ vmx_vmcs_exit(v);
+ return 1;
+}
+
+static int vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
+{
+ if ( !cpu_has_vmx_pat || !paging_mode_hap(v->domain) )
+ return 0;
+
+ vmx_vmcs_enter(v);
+ *gpat = __vmread(GUEST_PAT);
+#ifdef __i386__
+ *gpat |= (u64)__vmread(GUEST_PAT_HIGH) << 32;
+#endif
+ vmx_vmcs_exit(v);
+ return 1;
+}
+
static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
{
vmx_vmcs_enter(v);
.update_host_cr3 = vmx_update_host_cr3,
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
+ .set_guest_pat = vmx_set_guest_pat,
+ .get_guest_pat = vmx_get_guest_pat,
.set_tsc_offset = vmx_set_tsc_offset,
.inject_exception = vmx_inject_exception,
.init_hypercall_page = vmx_init_hypercall_page,
void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
void (*update_guest_efer)(struct vcpu *v);
+ int (*get_guest_pat)(struct vcpu *v, u64 *);
+ int (*set_guest_pat)(struct vcpu *v, u64);
+
void (*set_tsc_offset)(struct vcpu *v, u64 offset);
void (*inject_exception)(unsigned int trapnr, int errcode,
bool_t hvm_send_assist_req(struct vcpu *v);
+void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
+int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
+
void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc);
u64 hvm_get_guest_tsc(struct vcpu *v);