struct domain *d = p2m->domain;
uint32_t old_flags;
bool_t flush_nestedp2m = 0;
+ int rc;
/* We know always use the host p2m here, regardless if the vcpu
* is in host or guest mode. The vcpu can be in guest mode by
&& perms_strictly_increased(old_flags, l1e_get_flags(new)) );
}
- p2m_entry_modify(p2m, p2m_flags_to_type(l1e_get_flags(new)),
- p2m_flags_to_type(old_flags), level);
+ rc = p2m_entry_modify(p2m, p2m_flags_to_type(l1e_get_flags(new)),
+ p2m_flags_to_type(old_flags), l1e_get_mfn(new),
+ l1e_get_mfn(*p), level);
+ if ( rc )
+ {
+ paging_unlock(d);
+ return rc;
+ }
safe_write_pte(p, new);
if ( old_flags & _PAGE_PRESENT )
ept_entry_t *entryptr, ept_entry_t new,
int level)
{
- int rc;
- unsigned long oldmfn = mfn_x(INVALID_MFN);
- bool_t check_foreign = (new.mfn != entryptr->mfn ||
- new.sa_p2mt != entryptr->sa_p2mt);
-
- if ( level )
- {
- ASSERT(!is_epte_superpage(&new) || !p2m_is_foreign(new.sa_p2mt));
- write_atomic(&entryptr->epte, new.epte);
- return 0;
- }
-
- if ( unlikely(p2m_is_foreign(new.sa_p2mt)) )
- {
- rc = -EINVAL;
- if ( !is_epte_present(&new) )
- goto out;
-
- if ( check_foreign )
- {
- struct domain *fdom;
-
- if ( !mfn_valid(_mfn(new.mfn)) )
- goto out;
-
- rc = -ESRCH;
- fdom = page_get_owner(mfn_to_page(_mfn(new.mfn)));
- if ( fdom == NULL )
- goto out;
+ int rc = p2m_entry_modify(p2m, new.sa_p2mt, entryptr->sa_p2mt,
+ _mfn(new.mfn), _mfn(entryptr->mfn), level);
- /* get refcount on the page */
- rc = -EBUSY;
- if ( !get_page(mfn_to_page(_mfn(new.mfn)), fdom) )
- goto out;
- }
- }
-
- if ( unlikely(p2m_is_foreign(entryptr->sa_p2mt)) && check_foreign )
- oldmfn = entryptr->mfn;
-
- p2m_entry_modify(p2m, new.sa_p2mt, entryptr->sa_p2mt, level);
+ if ( rc )
+ return rc;
write_atomic(&entryptr->epte, new.epte);
- if ( unlikely(oldmfn != mfn_x(INVALID_MFN)) )
- put_page(mfn_to_page(_mfn(oldmfn)));
-
- rc = 0;
-
- out:
- if ( rc )
- gdprintk(XENLOG_ERR, "epte o:%"PRIx64" n:%"PRIx64" rc:%d\n",
- entryptr->epte, new.epte, rc);
- return rc;
+ return 0;
}
static void ept_p2m_type_to_flags(struct p2m_domain *p2m, ept_entry_t *entry,
__trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
}
- if ( unlikely(p2m_is_foreign(p2mt)) )
- {
- /* hvm fixme: foreign types are only supported on ept at present */
- gdprintk(XENLOG_WARNING, "Unimplemented foreign p2m type.\n");
- return -EINVAL;
- }
-
/* Carry out any eventually pending earlier changes first. */
rc = do_recalc(p2m, gfn);
if ( rc < 0 )
unsigned int level)
{
struct domain *d = p2m->domain;
+ int rc;
paging_lock(d);
if ( likely(d->arch.paging.shadow.total_pages != 0) )
sh_unshadow_for_p2m_change(d, gfn, p, new, level);
- p2m_entry_modify(p2m, p2m_flags_to_type(l1e_get_flags(new)),
- p2m_flags_to_type(l1e_get_flags(*p)), level);
+ rc = p2m_entry_modify(p2m, p2m_flags_to_type(l1e_get_flags(new)),
+ p2m_flags_to_type(l1e_get_flags(*p)),
+ l1e_get_mfn(new), l1e_get_mfn(*p), level);
+ if ( rc )
+ {
+ paging_unlock(d);
+ return rc;
+ }
/* Update the entry with new content */
safe_write_pte(p, new);
struct hvm_ioreq_server *p2m_get_ioreq_server(struct domain *d,
unsigned int *flags);
-static inline void p2m_entry_modify(struct p2m_domain *p2m, p2m_type_t nt,
- p2m_type_t ot, unsigned int level)
+static inline int p2m_entry_modify(struct p2m_domain *p2m, p2m_type_t nt,
+ p2m_type_t ot, mfn_t nfn, mfn_t ofn,
+ unsigned int level)
{
- if ( level != 1 || nt == ot )
- return;
+ BUG_ON(level > 1 && (nt == p2m_ioreq_server || nt == p2m_map_foreign));
+
+ if ( level != 1 || (nt == ot && mfn_eq(nfn, ofn)) )
+ return 0;
switch ( nt )
{
p2m->ioreq.entry_count++;
break;
+ case p2m_map_foreign:
+ if ( !mfn_valid(nfn) )
+ {
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
+ }
+
+ if ( !page_get_owner_and_reference(mfn_to_page(nfn)) )
+ return -EBUSY;
+
+ break;
+
default:
break;
}
p2m->ioreq.entry_count--;
break;
+ case p2m_map_foreign:
+ if ( !mfn_valid(ofn) )
+ {
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
+ }
+ put_page(mfn_to_page(ofn));
+ break;
+
default:
break;
}
+
+ return 0;
}
#endif /* _XEN_ASM_X86_P2M_H */