This avoids TLB flushing on every L1/L2 transition.
Signed-off-by: Keir Fraser <keir@xen.org>
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
data->next_asid = 1;
}
+void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid)
+{
+ asid->generation = 0;
+}
+
void hvm_asid_flush_vcpu(struct vcpu *v)
{
- v->arch.hvm_vcpu.asid_generation = 0;
+ hvm_asid_flush_vcpu_asid(&v->arch.hvm_vcpu.n1asid);
+ hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
}
void hvm_asid_flush_core(void)
data->disabled = 1;
}
-bool_t hvm_asid_handle_vmenter(void)
+bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid)
{
- struct vcpu *curr = current;
struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
/* On erratum #170 systems we must flush the TLB.
goto disabled;
/* Test if VCPU has valid ASID. */
- if ( curr->arch.hvm_vcpu.asid_generation == data->core_asid_generation )
+ if ( asid->generation == data->core_asid_generation )
return 0;
/* If there are no free ASIDs, need to go to a new generation */
}
/* Now guaranteed to be a free ASID. */
- curr->arch.hvm_vcpu.asid = data->next_asid++;
- curr->arch.hvm_vcpu.asid_generation = data->core_asid_generation;
+ asid->asid = data->next_asid++;
+ asid->generation = data->core_asid_generation;
/*
* When we assign ASID 1, flush all TLB entries as we are starting a new
* generation, and all old ASID allocations are now stale.
*/
- return (curr->arch.hvm_vcpu.asid == 1);
+ return (asid->asid == 1);
disabled:
- curr->arch.hvm_vcpu.asid = 0;
+ asid->asid = 0;
return 0;
}
#include <xen/perfc.h>
#include <asm/hvm/svm/asid.h>
#include <asm/amd.h>
+#include <asm/hvm/nestedhvm.h>
void svm_asid_init(struct cpuinfo_x86 *c)
{
{
struct vcpu *curr = current;
struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
- bool_t need_flush = hvm_asid_handle_vmenter();
+ struct hvm_vcpu_asid *p_asid =
+ nestedhvm_vcpu_in_guestmode(curr)
+ ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm_vcpu.n1asid;
+ bool_t need_flush = hvm_asid_handle_vmenter(p_asid);
/* ASID 0 indicates that ASIDs are disabled. */
- if ( curr->arch.hvm_vcpu.asid == 0 )
+ if ( p_asid->asid == 0 )
{
vmcb_set_guest_asid(vmcb, 1);
vmcb->tlb_control = 1;
return;
}
- vmcb_set_guest_asid(vmcb, curr->arch.hvm_vcpu.asid);
+ vmcb_set_guest_asid(vmcb, p_asid->asid);
vmcb->tlb_control = need_flush;
}
/* Cleanbits */
n1vmcb->cleanbits.bytes = 0;
- hvm_asid_flush_vcpu(v);
-
return 0;
}
if (rc)
return rc;
- /* ASID */
- hvm_asid_flush_vcpu(v);
- /* n2vmcb->_guest_asid = ns_vmcb->_guest_asid; */
+ /* ASID - Emulation handled in hvm_asid_handle_vmenter() */
/* TLB control */
n2vmcb->tlb_control = n1vmcb->tlb_control | ns_vmcb->tlb_control;
svm->ns_vmcb_guestcr3 = ns_vmcb->_cr3;
svm->ns_vmcb_hostcr3 = ns_vmcb->_h_cr3;
- nv->nv_flushp2m = (ns_vmcb->tlb_control
- || (svm->ns_guest_asid != ns_vmcb->_guest_asid));
- svm->ns_guest_asid = ns_vmcb->_guest_asid;
+ nv->nv_flushp2m = ns_vmcb->tlb_control;
+ if ( svm->ns_guest_asid != ns_vmcb->_guest_asid )
+ {
+ nv->nv_flushp2m = 1;
+ hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
+ svm->ns_guest_asid = ns_vmcb->_guest_asid;
+ }
/* nested paging for the guest */
svm->ns_hap_enabled = (ns_vmcb->_np_enable) ? 1 : 0;
__update_guest_eip(regs, inst_len);
}
+static void svm_invlpga_intercept(
+ struct vcpu *v, unsigned long vaddr, uint32_t asid)
+{
+ svm_invlpga(vaddr,
+ (asid == 0)
+ ? v->arch.hvm_vcpu.n1asid.asid
+ : vcpu_nestedhvm(v).nv_n2asid.asid);
+}
+
static void svm_invlpg_intercept(unsigned long vaddr)
{
struct vcpu *curr = current;
case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:
case VMEXIT_INVLPG:
- case VMEXIT_INVLPGA:
if ( !handle_mmio() )
hvm_inject_exception(TRAP_gp_fault, 0, 0);
break;
+ case VMEXIT_INVLPGA:
+ svm_invlpga_intercept(v, regs->rax, regs->ecx);
+ break;
+
case VMEXIT_VMMCALL:
if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
break;
#endif
}
- if ( cpu_has_vmx_vpid )
- __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vcpu.asid);
-
if ( cpu_has_vmx_pat && paging_mode_hap(d) )
{
u64 host_pat, guest_pat;
{
struct vcpu *curr = current;
u32 new_asid, old_asid;
+ struct hvm_vcpu_asid *p_asid;
bool_t need_flush;
if ( !cpu_has_vmx_vpid )
goto out;
- old_asid = curr->arch.hvm_vcpu.asid;
- need_flush = hvm_asid_handle_vmenter();
- new_asid = curr->arch.hvm_vcpu.asid;
+ p_asid = &curr->arch.hvm_vcpu.n1asid;
+ old_asid = p_asid->asid;
+ need_flush = hvm_asid_handle_vmenter(p_asid);
+ new_asid = p_asid->asid;
if ( unlikely(new_asid != old_asid) )
{
#include <xen/config.h>
struct vcpu;
+struct hvm_vcpu_asid;
/* Initialise ASID management for the current physical CPU. */
void hvm_asid_init(int nasids);
-/* Invalidate a VCPU's current ASID allocation: forces re-allocation. */
+/* Invalidate a particular ASID allocation: forces re-allocation. */
+void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid);
+
+/* Invalidate all ASID allocations for specified VCPU: forces re-allocation. */
void hvm_asid_flush_vcpu(struct vcpu *v);
/* Flush all ASIDs on this processor core. */
/* Called before entry to guest context. Checks ASID allocation, returns a
* boolean indicating whether all ASIDs must be flushed. */
-bool_t hvm_asid_handle_vmenter(void);
+bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid);
#endif /* __ASM_X86_HVM_ASID_H__ */
HVMIO_completed
};
+struct hvm_vcpu_asid {
+ uint64_t generation;
+ uint32_t asid;
+};
+
#define VMCX_EADDR (~0ULL)
struct nestedvcpu {
bool_t nv_flushp2m; /* True, when p2m table must be flushed */
struct p2m_domain *nv_p2m; /* used p2m table for this vcpu */
+ struct hvm_vcpu_asid nv_n2asid;
+
bool_t nv_vmentry_pending;
bool_t nv_vmexit_pending;
bool_t nv_vmswitch_in_progress; /* true during vmentry/vmexit emulation */
bool_t hcall_preempted;
bool_t hcall_64bit;
- uint64_t asid_generation;
- uint32_t asid;
+ struct hvm_vcpu_asid n1asid;
u32 msr_tsc_aux;
type = INVVPID_ALL_CONTEXT;
execute_invvpid:
- __invvpid(type, v->arch.hvm_vcpu.asid, (u64)gva);
+ __invvpid(type, v->arch.hvm_vcpu.n1asid.asid, (u64)gva);
}
static inline void vpid_sync_all(void)