Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
{
#if 0
/* Optimization? */
- asm volatile (".byte 0x0F,0x01,0xDF \n"
- : /* output */
- : /* input */
- "a" (g_vaddr), "c"(v->arch.hvm_svm.vmcb->guest_asid) );
+ svm_invlpga(g_vaddr, v->arch.hvm_svm.vmcb->guest_asid);
#endif
/* Safe fallback. Take a new ASID. */
: : "a" (__pa(vmcb)) : "memory" );
}
+static inline void svm_invlpga(unsigned long vaddr, uint32_t asid)
+{
+ asm volatile (
+ ".byte 0x0f,0x01,0xdf"
+ : /* output */
+ : /* input */
+ "a" (vaddr), "c" (asid));
+}
+
unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len);