return e;
}
+/* Generate table entry with correct attributes. */
+static lpae_t page_to_p2m_table(struct page_info *page)
+{
+ /*
+ * The access value does not matter because the hardware will ignore
+ * the permission fields for table entry.
+ */
+ return mfn_to_p2m_entry(page_to_mfn(page), p2m_invalid, p2m_access_rwx);
+}
+
static inline void p2m_write_pte(lpae_t *p, lpae_t pte, bool clean_pte)
{
write_pte(p, pte);
{
struct page_info *page;
lpae_t *p;
- lpae_t pte;
ASSERT(!lpae_is_valid(*entry));
unmap_domain_page(p);
- /*
- * The access value does not matter because the hardware will ignore
- * the permission fields for table entry.
- */
- pte = mfn_to_p2m_entry(page_to_mfn(page), p2m_invalid,
- p2m->default_access);
-
- p2m_write_pte(entry, pte, p2m->clean_pte);
+ p2m_write_pte(entry, page_to_p2m_table(page), p2m->clean_pte);
return 0;
}
unmap_domain_page(table);
- pte = mfn_to_p2m_entry(page_to_mfn(page), p2m_invalid,
- p2m->default_access);
-
/*
* Even if we failed, we should install the newly allocated LPAE
* entry. The caller will be in charge to free the sub-tree.
*/
- p2m_write_pte(entry, pte, p2m->clean_pte);
+ p2m_write_pte(entry, page_to_p2m_table(page), p2m->clean_pte);
return rv;
}