* (used by p2m and log-dirty code for their tries) */
struct page_info * (*alloc_page)(struct domain *d);
void (*free_page)(struct domain *d, struct page_info *pg);
+
+ void (*update_paging_modes)(struct vcpu *v);
};
struct paging_vcpu {
#endif
void (*update_cr3 )(struct vcpu *v, int do_locking,
bool noflush);
- void (*update_paging_modes )(struct vcpu *v);
bool (*flush_tlb )(const unsigned long *vcpu_bitmap);
unsigned int guest_levels;
* has changed, and when bringing up a VCPU for the first time. */
static inline void paging_update_paging_modes(struct vcpu *v)
{
- paging_get_hostmode(v)->update_paging_modes(v);
+ v->domain->arch.paging.update_paging_modes(v);
}
#ifdef CONFIG_PV
/************************************************/
/* HAP DOMAIN LEVEL FUNCTIONS */
/************************************************/
+
+static void cf_check hap_update_paging_modes(struct vcpu *v);
+
void hap_domain_init(struct domain *d)
{
static const struct log_dirty_ops hap_ops = {
/* Use HAP logdirty mechanism. */
paging_log_dirty_init(d, &hap_ops);
+
+ d->arch.paging.update_paging_modes = hap_update_paging_modes;
}
/* return 0 for success, -errno for failure */
.gva_to_gfn = hap_gva_to_gfn_real_mode,
.p2m_ga_to_gfn = hap_p2m_ga_to_gfn_real_mode,
.update_cr3 = hap_update_cr3,
- .update_paging_modes = hap_update_paging_modes,
.flush_tlb = flush_tlb,
.guest_levels = 1
};
.gva_to_gfn = hap_gva_to_gfn_2_levels,
.p2m_ga_to_gfn = hap_p2m_ga_to_gfn_2_levels,
.update_cr3 = hap_update_cr3,
- .update_paging_modes = hap_update_paging_modes,
.flush_tlb = flush_tlb,
.guest_levels = 2
};
.gva_to_gfn = hap_gva_to_gfn_3_levels,
.p2m_ga_to_gfn = hap_p2m_ga_to_gfn_3_levels,
.update_cr3 = hap_update_cr3,
- .update_paging_modes = hap_update_paging_modes,
.flush_tlb = flush_tlb,
.guest_levels = 3
};
.gva_to_gfn = hap_gva_to_gfn_4_levels,
.p2m_ga_to_gfn = hap_p2m_ga_to_gfn_4_levels,
.update_cr3 = hap_update_cr3,
- .update_paging_modes = hap_update_paging_modes,
.flush_tlb = flush_tlb,
.guest_levels = 4
};
static int cf_check sh_disable_log_dirty(struct domain *);
static void cf_check sh_clean_dirty_bitmap(struct domain *);
+static void cf_check shadow_update_paging_modes(struct vcpu *);
+
/* Set up the shadow-specific parts of a domain struct at start of day.
* Called for every domain from arch_domain_create() */
int shadow_domain_init(struct domain *d)
/* Use shadow pagetables for log-dirty support */
paging_log_dirty_init(d, &sh_ops);
+ d->arch.paging.update_paging_modes = shadow_update_paging_modes;
+
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
d->arch.paging.shadow.oos_active = 0;
#endif
v->arch.paging.mode->update_cr3(v, 0, false);
}
-void cf_check shadow_update_paging_modes(struct vcpu *v)
+/*
+ * Update all the things that are derived from the guest's CR0/CR3/CR4.
+ * Called to initialize paging structures if the paging mode has changed,
+ * and when bringing up a VCPU for the first time.
+ */
+static void cf_check shadow_update_paging_modes(struct vcpu *v)
{
paging_lock(v->domain);
sh_update_paging_modes(v);
.gva_to_gfn = sh_gva_to_gfn,
#endif
.update_cr3 = sh_update_cr3,
- .update_paging_modes = shadow_update_paging_modes,
.flush_tlb = shadow_flush_tlb,
.guest_levels = GUEST_PAGING_LEVELS,
#ifdef CONFIG_PV
ASSERT(is_pv_domain(d));
}
+static void cf_check _update_paging_modes(struct vcpu *v)
+{
+ ASSERT_UNREACHABLE();
+}
+
int shadow_domain_init(struct domain *d)
{
+ /* For HVM set up pointers for safety, then fail. */
static const struct log_dirty_ops sh_none_ops = {
.enable = _enable_log_dirty,
.disable = _disable_log_dirty,
};
paging_log_dirty_init(d, &sh_none_ops);
+
+ d->arch.paging.update_paging_modes = _update_paging_modes;
+
return is_hvm_domain(d) ? -EOPNOTSUPP : 0;
}
ASSERT_UNREACHABLE();
}
-static void cf_check _update_paging_modes(struct vcpu *v)
-{
- ASSERT_UNREACHABLE();
-}
-
static const struct paging_mode sh_paging_none = {
.page_fault = _page_fault,
.invlpg = _invlpg,
.gva_to_gfn = _gva_to_gfn,
#endif
.update_cr3 = _update_cr3,
- .update_paging_modes = _update_paging_modes,
};
void shadow_vcpu_init(struct vcpu *v)
intpte_t cf_check sh_cmpxchg_guest_entry(
struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn);
-/* Update all the things that are derived from the guest's CR0/CR3/CR4.
- * Called to initialize paging structures if the paging mode
- * has changed, and when bringing up a VCPU for the first time. */
-void cf_check shadow_update_paging_modes(struct vcpu *v);
-
/* Unhook the non-Xen mappings in this top-level shadow mfn.
* With user_only == 1, unhooks only the user-mode mappings. */
void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only);