#include <asm/hvm/trace.h>
#include <asm/hap.h>
+u32 svm_feature_flags;
+
#define set_segment_register(name, value) \
asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
setup_vmcb_dump();
+ svm_feature_flags = ((cpuid_eax(0x80000000) >= 0x8000000A) ?
+ cpuid_edx(0x8000000A) : 0);
+
#ifdef __x86_64__
/*
* Check CPUID for nested paging support. We support NPT only on 64-bit
* hosts since the phys-to-machine table is in host format. Hence 32-bit
* Xen could only support guests using NPT with up to a 4GB memory map.
*/
- svm_function_table.hap_supported = (cpuid_edx(0x8000000A) & 1);
+ svm_function_table.hap_supported = cpu_has_svm_npt;
#endif
hvm_enable(&svm_function_table);
msr_content = 0;
break;
+ case MSR_IA32_DEBUGCTLMSR:
+ msr_content = vmcb->debugctlmsr;
+ break;
+
+ case MSR_IA32_LASTBRANCHFROMIP:
+ msr_content = vmcb->lastbranchfromip;
+ break;
+
+ case MSR_IA32_LASTBRANCHTOIP:
+ msr_content = vmcb->lastbranchtoip;
+ break;
+
+ case MSR_IA32_LASTINTFROMIP:
+ msr_content = vmcb->lastintfromip;
+ break;
+
+ case MSR_IA32_LASTINTTOIP:
+ msr_content = vmcb->lastinttoip;
+ break;
+
default:
if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
rdmsr_safe(ecx, eax, edx) == 0 )
svm_inject_exception(v, TRAP_gp_fault, 1, 0);
break;
+ case MSR_IA32_DEBUGCTLMSR:
+ vmcb->debugctlmsr = msr_content;
+ if ( !msr_content || !cpu_has_svm_lbrv )
+ break;
+ vmcb->lbr_control.fields.enable = 1;
+ svm_disable_intercept_for_msr(v, MSR_IA32_DEBUGCTLMSR);
+ svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHFROMIP);
+ svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHTOIP);
+ svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTFROMIP);
+ svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTTOIP);
+ break;
+
+ case MSR_IA32_LASTBRANCHFROMIP:
+ vmcb->lastbranchfromip = msr_content;
+ break;
+
+ case MSR_IA32_LASTBRANCHTOIP:
+ vmcb->lastbranchtoip = msr_content;
+ break;
+
+ case MSR_IA32_LASTINTFROMIP:
+ vmcb->lastintfromip = msr_content;
+ break;
+
+ case MSR_IA32_LASTINTTOIP:
+ vmcb->lastinttoip = msr_content;
+ break;
+
default:
switch ( long_mode_do_msr_write(regs) )
{
return hsa;
}
-static void disable_intercept_for_msr(char *msr_bitmap, u32 msr)
+void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr)
{
+ char *msr_bitmap = v->arch.hvm_svm.msrpm;
+
/*
* See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address).
*/
return -ENOMEM;
memset(arch_svm->msrpm, 0xff, MSRPM_SIZE);
- disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_FS_BASE);
- disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_GS_BASE);
- disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SHADOW_GS_BASE);
- disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_CSTAR);
- disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_LSTAR);
- disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_STAR);
- disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SYSCALL_MASK);
- disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_CS);
- disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_ESP);
- disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_EIP);
+ svm_disable_intercept_for_msr(v, MSR_FS_BASE);
+ svm_disable_intercept_for_msr(v, MSR_GS_BASE);
+ svm_disable_intercept_for_msr(v, MSR_SHADOW_GS_BASE);
+ svm_disable_intercept_for_msr(v, MSR_CSTAR);
+ svm_disable_intercept_for_msr(v, MSR_LSTAR);
+ svm_disable_intercept_for_msr(v, MSR_STAR);
+ svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
vmcb->msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);
vmcb->iopm_base_pa = (u64)virt_to_maddr(hvm_io_bitmap);
: : "a" (__pa(vmcb)) : "memory" );
}
+extern u32 svm_feature_flags;
+
+#define SVM_FEATURE_NPT 0
+#define SVM_FEATURE_LBRV 1
+#define SVM_FEATURE_SVML 2
+#define SVM_FEATURE_NRIPS 3
+
+#define cpu_has_svm_npt test_bit(SVM_FEATURE_NPT, &svm_feature_flags)
+#define cpu_has_svm_lbrv test_bit(SVM_FEATURE_LBRV, &svm_feature_flags)
+#define cpu_has_svm_svml test_bit(SVM_FEATURE_SVML, &svm_feature_flags)
+#define cpu_has_svm_nrips test_bit(SVM_FEATURE_NRIPS, &svm_feature_flags)
+
#endif /* __ASM_X86_HVM_SVM_H__ */
} fields;
} __attribute__ ((packed)) ioio_info_t;
+typedef union
+{
+ u64 bytes;
+ struct
+ {
+ u64 enable:1;
+ } fields;
+} __attribute__ ((packed)) lbrctrl_t;
+
struct vmcb_struct {
u32 cr_intercepts; /* offset 0x00 */
u32 dr_intercepts; /* offset 0x04 */
u64 res08[2];
eventinj_t eventinj; /* offset 0xA8 */
u64 h_cr3; /* offset 0xB0 */
- u64 res09[105]; /* offset 0xB8 pad to save area */
+ lbrctrl_t lbr_control; /* offset 0xB8 */
+ u64 res09[104]; /* offset 0xC0 pad to save area */
svm_segment_register_t es; /* offset 1024 */
svm_segment_register_t cs;
u64 pdpe2;
u64 pdpe3;
u64 g_pat;
- u64 res16[50];
- u64 res17[128];
- u64 res18[128];
+ u64 debugctlmsr;
+ u64 lastbranchfromip;
+ u64 lastbranchtoip;
+ u64 lastintfromip;
+ u64 lastinttoip;
+ u64 res16[301];
} __attribute__ ((packed));
-
struct arch_svm_struct {
struct vmcb_struct *vmcb;
- u64 vmcb_pa;
- u64 asid_generation; /* ASID tracking, moved here to
- prevent cacheline misses. */
- u32 *msrpm;
- int launch_core;
- bool_t vmcb_in_sync; /* VMCB sync'ed with VMSAVE? */
+ u64 vmcb_pa;
+ u64 asid_generation; /* ASID tracking, moved here for cache locality. */
+ char *msrpm;
+ int launch_core;
+ bool_t vmcb_in_sync; /* VMCB sync'ed with VMSAVE? */
};
struct vmcb_struct *alloc_vmcb(void);
void setup_vmcb_dump(void);
+void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr);
+
#endif /* ASM_X86_HVM_SVM_VMCS_H__ */
/*