}
static void
-hap_write_p2m_entry(struct domain *d, unsigned long gfn, l1_pgentry_t *p,
+hap_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, l1_pgentry_t *p,
l1_pgentry_t new, unsigned int level)
{
+ struct domain *d = p2m->domain;
uint32_t old_flags;
bool_t flush_nestedp2m = 0;
if ( v->domain != d )
v = d->vcpu ? d->vcpu[0] : NULL;
if ( likely(v && paging_mode_enabled(d) && paging_get_hostmode(v) != NULL) )
- paging_get_hostmode(v)->write_p2m_entry(d, gfn, p, new, level);
+ paging_get_hostmode(v)->write_p2m_entry(p2m, gfn, p, new, level);
else
safe_write_pte(p, new);
}
}
void
-shadow_write_p2m_entry(struct domain *d, unsigned long gfn,
+shadow_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
l1_pgentry_t *p, l1_pgentry_t new,
unsigned int level)
{
+ struct domain *d = p2m->domain;
+
paging_lock(d);
/* If there are any shadows, update them. But if shadow_teardown()
ASSERT_UNREACHABLE();
}
-static void _write_p2m_entry(struct domain *d, unsigned long gfn,
+static void _write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
l1_pgentry_t *p, l1_pgentry_t new,
unsigned int level)
{
unsigned long fault_addr);
/* Functions that atomically write PT/P2M entries and update state */
-void shadow_write_p2m_entry(struct domain *d, unsigned long gfn,
+void shadow_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
l1_pgentry_t *p, l1_pgentry_t new,
unsigned int level);
void (*update_cr3 )(struct vcpu *v, int do_locking,
bool noflush);
void (*update_paging_modes )(struct vcpu *v);
- void (*write_p2m_entry )(struct domain *d, unsigned long gfn,
+ void (*write_p2m_entry )(struct p2m_domain *p2m,
+ unsigned long gfn,
l1_pgentry_t *p, l1_pgentry_t new,
unsigned int level);