rc = X86EMUL_OKAY;
}
- if ( rc == X86EMUL_OKAY && is_canonical_address(addr) )
- hvm_funcs.invlpg_intercept(addr);
+ if ( rc == X86EMUL_OKAY )
+ paging_invlpg(current, addr);
return rc;
}
static void svm_invlpg_intercept(unsigned long vaddr)
{
- struct vcpu *curr = current;
HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(vaddr));
- if ( paging_invlpg(curr, vaddr) )
- svm_asid_g_invlpg(curr, vaddr);
+ paging_invlpg(current, vaddr);
+}
+
+static void svm_invlpg(struct vcpu *v, unsigned long vaddr)
+{
+ svm_asid_g_invlpg(v, vaddr);
}
static struct hvm_function_table __initdata svm_function_table = {
.inject_trap = svm_inject_trap,
.init_hypercall_page = svm_init_hypercall_page,
.event_pending = svm_event_pending,
+ .invlpg = svm_invlpg,
.cpuid_intercept = svm_cpuid_intercept,
.wbinvd_intercept = svm_wbinvd_intercept,
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
.msr_read_intercept = svm_msr_read_intercept,
.msr_write_intercept = svm_msr_write_intercept,
- .invlpg_intercept = svm_invlpg_intercept,
.set_rdtsc_exiting = svm_set_rdtsc_exiting,
.get_insn_bytes = svm_get_insn_bytes,
static void vmx_fpu_dirty_intercept(void);
static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
-static void vmx_invlpg_intercept(unsigned long vaddr);
+static void vmx_invlpg(struct vcpu *v, unsigned long vaddr);
static int vmx_vmfunc_intercept(struct cpu_user_regs *regs);
struct vmx_pi_blocking_vcpu {
.inject_trap = vmx_inject_trap,
.init_hypercall_page = vmx_init_hypercall_page,
.event_pending = vmx_event_pending,
+ .invlpg = vmx_invlpg,
.cpu_up = vmx_cpu_up,
.cpu_down = vmx_cpu_down,
.cpuid_intercept = vmx_cpuid_intercept,
.fpu_dirty_intercept = vmx_fpu_dirty_intercept,
.msr_read_intercept = vmx_msr_read_intercept,
.msr_write_intercept = vmx_msr_write_intercept,
- .invlpg_intercept = vmx_invlpg_intercept,
.vmfunc_intercept = vmx_vmfunc_intercept,
.handle_cd = vmx_handle_cd,
.set_info_guest = vmx_set_info_guest,
static void vmx_invlpg_intercept(unsigned long vaddr)
{
- struct vcpu *curr = current;
HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
- if ( paging_invlpg(curr, vaddr) && cpu_has_vmx_vpid )
- vpid_sync_vcpu_gva(curr, vaddr);
+ paging_invlpg(current, vaddr);
+}
+
+static void vmx_invlpg(struct vcpu *v, unsigned long vaddr)
+{
+ if ( cpu_has_vmx_vpid )
+ vpid_sync_vcpu_gva(v, vaddr);
}
static int vmx_vmfunc_intercept(struct cpu_user_regs *regs)
case MMUEXT_INVLPG_LOCAL:
if ( unlikely(d != pg_owner) )
rc = -EPERM;
- else if ( !paging_mode_enabled(d)
- ? __addr_ok(op.arg1.linear_addr)
- : paging_invlpg(curr, op.arg1.linear_addr) )
- flush_tlb_one_local(op.arg1.linear_addr);
+ else
+ paging_invlpg(curr, op.arg1.linear_addr);
break;
case MMUEXT_TLB_FLUSH_MULTI:
switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
{
case UVMF_LOCAL:
- if ( !paging_mode_enabled(d) || paging_invlpg(v, va) )
- flush_tlb_one_local(va);
+ paging_invlpg(v, va);
break;
case UVMF_ALL:
flush_tlb_one_mask(d->domain_dirty_cpumask, va);
return bad_pages;
}
+void paging_invlpg(struct vcpu *v, unsigned long va)
+{
+ if ( !is_canonical_address(va) )
+ return;
+
+ if ( paging_mode_enabled(v->domain) &&
+ !paging_get_hostmode(v)->invlpg(v, va) )
+ return;
+
+ if ( is_pv_vcpu(v) )
+ flush_tlb_one_local(va);
+ else
+ hvm_funcs.invlpg(v, va);
+}
+
/*
* Local variables:
* mode: C
/*
* HAP guests can handle invlpg without needing any action from Xen, so
- * should not be intercepting it.
+ * should not be intercepting it. However, we need to correctly handle
+ * getting here from instruction emulation.
*/
static bool_t hap_invlpg(struct vcpu *v, unsigned long va)
{
- if (nestedhvm_enabled(v->domain)) {
- /* Emulate INVLPGA:
- * Must perform the flush right now or an other vcpu may
- * use it when we use the next VMRUN emulation, otherwise.
- */
- if ( vcpu_nestedhvm(v).nv_p2m )
- p2m_flush(v, vcpu_nestedhvm(v).nv_p2m);
- return 1;
- }
+ /*
+ * Emulate INVLPGA:
+ * Must perform the flush right now or an other vcpu may
+ * use it when we use the next VMRUN emulation, otherwise.
+ */
+ if ( nestedhvm_enabled(v->domain) && vcpu_nestedhvm(v).nv_p2m )
+ p2m_flush(v, vcpu_nestedhvm(v).nv_p2m);
- HAP_ERROR("Intercepted a guest INVLPG (%pv) with HAP enabled\n", v);
- domain_crash(v->domain);
- return 0;
+ return 1;
}
static void hap_update_cr3(struct vcpu *v, int do_locking)
void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
int (*event_pending)(struct vcpu *v);
+ void (*invlpg)(struct vcpu *v, unsigned long vaddr);
int (*cpu_up_prepare)(unsigned int cpu);
void (*cpu_dead)(unsigned int cpu);
void (*fpu_dirty_intercept)(void);
int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content);
int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content);
- void (*invlpg_intercept)(unsigned long vaddr);
int (*vmfunc_intercept)(struct cpu_user_regs *regs);
void (*handle_cd)(struct vcpu *v, unsigned long value);
void (*set_info_guest)(struct vcpu *v);
return paging_get_hostmode(v)->page_fault(v, va, regs);
}
-/* Handle invlpg requests on vcpus.
- * Returns 1 if the invlpg instruction should be issued on the hardware,
- * or 0 if it's safe not to do so. */
-static inline bool_t paging_invlpg(struct vcpu *v, unsigned long va)
-{
- return (paging_mode_external(v->domain) ? is_canonical_address(va)
- : __addr_ok(va)) &&
- paging_get_hostmode(v)->invlpg(v, va);
-}
+/* Handle invlpg requests on vcpus. */
+void paging_invlpg(struct vcpu *v, unsigned long va);
/* Translate a guest virtual address to the frame number that the
* *guest* pagetables would map it to. Returns INVALID_GFN if the guest