}
static void svm_invlpga_intercept(
- struct vcpu *v, unsigned long vaddr, uint32_t asid)
+ struct vcpu *v, unsigned long linear, uint32_t asid)
{
- svm_invlpga(vaddr,
+ svm_invlpga(linear,
(asid == 0)
? v->arch.hvm.n1asid.asid
: vcpu_nestedhvm(v).nv_n2asid.asid);
}
-static void svm_invlpg_intercept(unsigned long vaddr)
+static void svm_invlpg_intercept(unsigned long linear)
{
- HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(vaddr));
- paging_invlpg(current, vaddr);
+ HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(linear));
+ paging_invlpg(current, linear);
}
static bool is_invlpg(const struct x86_emulate_state *state,
(ext & 7) == 7;
}
-static void svm_invlpg(struct vcpu *v, unsigned long vaddr)
+static void svm_invlpg(struct vcpu *v, unsigned long linear)
{
- svm_asid_g_invlpg(v, vaddr);
+ svm_asid_g_invlpg(v, linear);
}
static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info)
static void vmx_fpu_dirty_intercept(void);
static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
-static void vmx_invlpg(struct vcpu *v, unsigned long vaddr);
+static void vmx_invlpg(struct vcpu *v, unsigned long linear);
/* Values for domain's ->arch.hvm_domain.pi_ops.flags. */
#define PI_CSW_FROM (1u << 0)
vmx_update_cpu_exec_control(v);
}
-static void vmx_invlpg_intercept(unsigned long vaddr)
+static void vmx_invlpg_intercept(unsigned long linear)
{
- HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
- paging_invlpg(current, vaddr);
+ HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(linear));
+ paging_invlpg(current, linear);
}
-static void vmx_invlpg(struct vcpu *v, unsigned long vaddr)
+static void vmx_invlpg(struct vcpu *v, unsigned long linear)
{
if ( cpu_has_vmx_vpid )
- vpid_sync_vcpu_gva(v, vaddr);
+ vpid_sync_vcpu_gva(v, linear);
}
static int vmx_vmfunc_intercept(struct cpu_user_regs *regs)
return bad_pages;
}
-void paging_invlpg(struct vcpu *v, unsigned long va)
+void paging_invlpg(struct vcpu *v, unsigned long linear)
{
- if ( !is_canonical_address(va) )
+ if ( !is_canonical_address(linear) )
return;
if ( paging_mode_enabled(v->domain) &&
- !paging_get_hostmode(v)->invlpg(v, va) )
+ !paging_get_hostmode(v)->invlpg(v, linear) )
return;
if ( is_pv_vcpu(v) )
- flush_tlb_one_local(va);
+ flush_tlb_one_local(linear);
else
- hvm_invlpg(v, va);
+ hvm_invlpg(v, linear);
}
/* Build a 32bit PSE page table using 4MB pages. */
* should not be intercepting it. However, we need to correctly handle
* getting here from instruction emulation.
*/
-static bool_t hap_invlpg(struct vcpu *v, unsigned long va)
+static bool_t hap_invlpg(struct vcpu *v, unsigned long linear)
{
/*
* Emulate INVLPGA:
* instruction should be issued on the hardware, or false if it's safe not
* to do so.
*/
-static bool sh_invlpg(struct vcpu *v, unsigned long va)
+static bool sh_invlpg(struct vcpu *v, unsigned long linear)
{
mfn_t sl1mfn;
shadow_l2e_t sl2e;
{
shadow_l3e_t sl3e;
if ( !(shadow_l4e_get_flags(
- sh_linear_l4_table(v)[shadow_l4_linear_offset(va)])
+ sh_linear_l4_table(v)[shadow_l4_linear_offset(linear)])
& _PAGE_PRESENT) )
return false;
/* This must still be a copy-from-user because we don't have the
* paging lock, and the higher-level shadows might disappear
* under our feet. */
if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v)
- + shadow_l3_linear_offset(va)),
+ + shadow_l3_linear_offset(linear)),
sizeof (sl3e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
return false;
}
#else /* SHADOW_PAGING_LEVELS == 3 */
- if ( !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(va)])
+ if ( !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(linear)])
& _PAGE_PRESENT) )
// no need to flush anything if there's no SL2...
return false;
/* This must still be a copy-from-user because we don't have the shadow
* lock, and the higher-level shadows might disappear under our feet. */
if ( __copy_from_user(&sl2e,
- sh_linear_l2_table(v) + shadow_l2_linear_offset(va),
+ sh_linear_l2_table(v) + shadow_l2_linear_offset(linear),
sizeof (sl2e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
* feet. */
if ( __copy_from_user(&sl2e,
sh_linear_l2_table(v)
- + shadow_l2_linear_offset(va),
+ + shadow_l2_linear_offset(linear),
sizeof (sl2e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
&& page_is_out_of_sync(pg) ) )
{
shadow_l1e_t *sl1;
- sl1 = sh_linear_l1_table(v) + shadow_l1_linear_offset(va);
+ sl1 = sh_linear_l1_table(v) + shadow_l1_linear_offset(linear);
/* Remove the shadow entry that maps this VA */
(void) shadow_set_l1e(d, sl1, shadow_l1e_empty(),
p2m_invalid, sl1mfn);
return 0;
}
-static bool _invlpg(struct vcpu *v, unsigned long va)
+static bool _invlpg(struct vcpu *v, unsigned long linear)
{
ASSERT_UNREACHABLE();
return true;
int (*event_pending)(struct vcpu *v);
bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
- void (*invlpg)(struct vcpu *v, unsigned long vaddr);
+ void (*invlpg)(struct vcpu *v, unsigned long linear);
int (*cpu_up_prepare)(unsigned int cpu);
void (*cpu_dead)(unsigned int cpu);
return hvm_funcs.event_pending(v);
}
-static inline void hvm_invlpg(struct vcpu *v, unsigned long va)
+static inline void hvm_invlpg(struct vcpu *v, unsigned long linear)
{
- hvm_funcs.invlpg(v, va);
+ hvm_funcs.invlpg(v, linear);
}
/* These bits in CR4 are owned by the host. */
void svm_asid_init(const struct cpuinfo_x86 *c);
void svm_asid_handle_vmrun(void);
-static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
+static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_linear)
{
#if 0
/* Optimization? */
- svm_invlpga(g_vaddr, v->arch.hvm.svm.vmcb->guest_asid);
+ svm_invlpga(g_linear, v->arch.hvm.svm.vmcb->guest_asid);
#endif
/* Safe fallback. Take a new ASID. */
: : "a" (vmcb) : "memory" );
}
-static inline void svm_invlpga(unsigned long vaddr, uint32_t asid)
+static inline void svm_invlpga(unsigned long linear, uint32_t asid)
{
asm volatile (
".byte 0x0f,0x01,0xdf"
: /* output */
: /* input */
- "a" (vaddr), "c" (asid));
+ "a" (linear), "c" (asid));
}
unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
struct paging_mode {
int (*page_fault )(struct vcpu *v, unsigned long va,
struct cpu_user_regs *regs);
- bool (*invlpg )(struct vcpu *v, unsigned long va);
+ bool (*invlpg )(struct vcpu *v,
+ unsigned long linear);
unsigned long (*gva_to_gfn )(struct vcpu *v,
struct p2m_domain *p2m,
unsigned long va,