struct vcpu *v = current;
struct page_info *page;
unsigned long old = v->arch.hvm_vcpu.guest_cr[3];
+ bool noflush = false;
if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) )
}
}
+ if ( hvm_pcid_enabled(v) ) /* Clear the noflush bit. */
+ {
+ noflush = value & X86_CR3_NOFLUSH;
+ value &= ~X86_CR3_NOFLUSH;
+ }
+
if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) &&
(value != v->arch.hvm_vcpu.guest_cr[3]) )
{
}
v->arch.hvm_vcpu.guest_cr[3] = value;
- paging_update_cr3(v);
+ paging_update_cr3(v, noflush);
return X86EMUL_OKAY;
bad_cr3:
/* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
for_each_vcpu ( d, v )
- paging_update_cr3(v);
+ paging_update_cr3(v, false);
/* Flush all dirty TLBs. */
flush_tlb_mask(d->dirty_cpumask);
domain_pause(d);
d->arch.hvm_domain.params[a.index] = a.value;
for_each_vcpu ( d, v )
- paging_update_cr3(v);
+ paging_update_cr3(v, false);
domain_unpause(d);
domctl_lock_release();
struct arch_domain *ad = &curr->domain->arch;
unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index);
+ if ( index == VM_EVENT_X86_CR3 && hvm_pcid_enabled(curr) )
+ value &= ~X86_CR3_NOFLUSH; /* Clear the noflush bit. */
+
if ( (ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
(!(ad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
value != old) &&
v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
- svm_update_guest_cr(v, 0);
- svm_update_guest_cr(v, 2);
- svm_update_guest_cr(v, 4);
+ svm_update_guest_cr(v, 0, 0);
+ svm_update_guest_cr(v, 2, 0);
+ svm_update_guest_cr(v, 4, 0);
/* Load sysenter MSRs into both VMCB save area and VCPU fields. */
vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
return likely(vmcb->cs.db) ? 4 : 2;
}
-void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
+void svm_update_guest_cr(struct vcpu *v, unsigned int cr, unsigned int flags)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
uint64_t value;
case 3:
vmcb_set_cr3(vmcb, v->arch.hvm_vcpu.hw_cr[3]);
if ( !nestedhvm_enabled(v->domain) )
- hvm_asid_flush_vcpu(v);
+ {
+ if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) )
+ hvm_asid_flush_vcpu(v);
+ }
else if ( nestedhvm_vmswitch_in_progress(v) )
; /* CR3 switches during VMRUN/VMEXIT do not flush the TLB. */
- else
+ else if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) )
hvm_asid_flush_vcpu_asid(
nestedhvm_vcpu_in_guestmode(v)
? &vcpu_nestedhvm(v).nv_n2asid : &v->arch.hvm_vcpu.n1asid);
static int vmx_alloc_vlapic_mapping(struct domain *d);
static void vmx_free_vlapic_mapping(struct domain *d);
static void vmx_install_vlapic_mapping(struct vcpu *v);
-static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
+static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr,
+ unsigned int flags);
static void vmx_update_guest_efer(struct vcpu *v);
static void vmx_wbinvd_intercept(void);
static void vmx_fpu_dirty_intercept(void);
v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
- vmx_update_guest_cr(v, 0);
- vmx_update_guest_cr(v, 2);
- vmx_update_guest_cr(v, 4);
+ vmx_update_guest_cr(v, 0, 0);
+ vmx_update_guest_cr(v, 2, 0);
+ vmx_update_guest_cr(v, 4, 0);
v->arch.hvm_vcpu.guest_efer = c->msr_efer;
vmx_update_guest_efer(v);
vmx_vmcs_exit(v);
}
-static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
+static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr,
+ unsigned int flags)
{
vmx_vmcs_enter(v);
}
__vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
- hvm_asid_flush_vcpu(v);
+
+ if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) )
+ hvm_asid_flush_vcpu(v);
break;
default:
*/
hvm_monitor_crX(CR0, value, old);
curr->arch.hvm_vcpu.guest_cr[0] = value;
- vmx_update_guest_cr(curr, 0);
+ vmx_update_guest_cr(curr, 0, 0);
HVMTRACE_0D(CLTS);
break;
}
if ( paging_mode_enabled(v->domain) )
{
- paging_update_cr3(v);
+ paging_update_cr3(v, false);
return;
}
return 1;
}
-static void hap_update_cr3(struct vcpu *v, int do_locking)
+static void hap_update_cr3(struct vcpu *v, int do_locking, bool noflush)
{
v->arch.hvm_vcpu.hw_cr[3] = v->arch.hvm_vcpu.guest_cr[3];
- hvm_update_guest_cr(v, 3);
+ hvm_update_guest_cr3(v, noflush);
}
const struct paging_mode *
}
/* CR3 is effectively updated by a mode change. Flush ASIDs, etc. */
- hap_update_cr3(v, 0);
+ hap_update_cr3(v, 0, false);
paging_unlock(d);
put_gfn(d, cr3_gfn);
}
#endif /* OOS */
- v->arch.paging.mode->update_cr3(v, 0);
+ v->arch.paging.mode->update_cr3(v, 0, false);
}
void shadow_update_paging_modes(struct vcpu *v)
* In any case, in the PAE case, the ASSERT is not true; it can
* happen because of actions the guest is taking. */
#if GUEST_PAGING_LEVELS == 3
- v->arch.paging.mode->update_cr3(v, 0);
+ v->arch.paging.mode->update_cr3(v, 0, false);
#else
ASSERT(d->is_shutting_down);
#endif
static void
-sh_update_cr3(struct vcpu *v, int do_locking)
+sh_update_cr3(struct vcpu *v, int do_locking, bool noflush)
/* Updates vcpu->arch.cr3 after the guest has changed CR3.
* Paravirtual guests should set v->arch.guest_table (and guest_table_user,
* if appropriate).
v->arch.hvm_vcpu.hw_cr[3] =
pagetable_get_paddr(v->arch.shadow_table[0]);
#endif
- hvm_update_guest_cr(v, 3);
+ hvm_update_guest_cr3(v, noflush);
}
/* Fix up the linear pagetable mappings */
return gfn_x(INVALID_GFN);
}
-static void _update_cr3(struct vcpu *v, int do_locking)
+static void _update_cr3(struct vcpu *v, int do_locking, bool noflush)
{
ASSERT_UNREACHABLE();
}
#define HVM_EVENT_VECTOR_UNSET (-1)
#define HVM_EVENT_VECTOR_UPDATING (-2)
+/* update_guest_cr() flags. */
+#define HVM_UPDATE_GUEST_CR3_NOFLUSH 0x00000001
+
/*
* The hardware virtual machine (HVM) interface abstracts away from the
* x86/x86_64 CPU virtualization assist specifics. Currently this interface
/*
* Called to inform HVM layer that a guest CRn or EFER has changed.
*/
- void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
+ void (*update_guest_cr)(struct vcpu *v, unsigned int cr,
+ unsigned int flags);
void (*update_guest_efer)(struct vcpu *v);
void (*cpuid_policy_changed)(struct vcpu *v);
static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
{
- hvm_funcs.update_guest_cr(v, cr);
+ hvm_funcs.update_guest_cr(v, cr, 0);
+}
+
+static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush)
+{
+ unsigned int flags = noflush ? HVM_UPDATE_GUEST_CR3_NOFLUSH : 0;
+
+ hvm_funcs.update_guest_cr(v, 3, flags);
}
static inline void hvm_update_guest_efer(struct vcpu *v)
unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len);
-void svm_update_guest_cr(struct vcpu *, unsigned int cr);
+void svm_update_guest_cr(struct vcpu *, unsigned int cr, unsigned int flags);
extern u32 svm_feature_flags;
unsigned long cr3,
paddr_t ga, uint32_t *pfec,
unsigned int *page_order);
- void (*update_cr3 )(struct vcpu *v, int do_locking);
+ void (*update_cr3 )(struct vcpu *v, int do_locking,
+ bool noflush);
void (*update_paging_modes )(struct vcpu *v);
void (*write_p2m_entry )(struct domain *d, unsigned long gfn,
l1_pgentry_t *p, l1_pgentry_t new,
/* Update all the things that are derived from the guest's CR3.
* Called when the guest changes CR3; the caller can then use v->arch.cr3
* as the value to load into the host CR3 to schedule this vcpu */
-static inline void paging_update_cr3(struct vcpu *v)
+static inline void paging_update_cr3(struct vcpu *v, bool noflush)
{
- paging_get_hostmode(v)->update_cr3(v, 1);
+ paging_get_hostmode(v)->update_cr3(v, 1, noflush);
}
/* Update all the things that are derived from the guest's CR0/CR3/CR4.
#define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */
#define X86_CR0_PG 0x80000000 /* Paging (RW) */
+/*
+ * Intel CPU flags in CR3
+ */
+#define X86_CR3_NOFLUSH (_AC(1, ULL) << 63)
+
/*
* Intel CPU features in CR4
*/