extern bool opt_hap_1gb, opt_hap_2mb;
/*
- * The upper levels of the p2m pagetable always contain full rights; all
+ * The upper levels of the p2m pagetable always contain full rights; all
* variation in the access control bits is made in the level-1 PTEs.
- *
+ *
* In addition to the phys-to-machine translation, each p2m PTE contains
* *type* information about the gfn it translates, helping Xen to decide
* on the correct course of action when handling a page-fault to that
*/
/*
- * AMD IOMMU: When we share p2m table with iommu, bit 52 -bit 58 in pte
- * cannot be non-zero, otherwise, hardware generates io page faults when
+ * AMD IOMMU: When we share p2m table with iommu, bit 52 -bit 58 in pte
+ * cannot be non-zero, otherwise, hardware generates io page faults when
* device access those pages. Therefore, p2m_ram_rw has to be defined as 0.
*/
typedef enum {
unsigned int defer_flush;
bool need_flush;
- /* If true, and an access fault comes in and there is no vm_event listener,
- * pause domain. Otherwise, remove access restrictions. */
+ /*
+ * If true, and an access fault comes in and there is no vm_event
+ * listener, pause domain. Otherwise, remove access restrictions.
+ */
bool access_required;
/* Highest guest frame that's ever been mapped in the p2m */
unsigned long min_remapped_gfn;
unsigned long max_remapped_gfn;
- /* Populate-on-demand variables
+ /*
+ * Populate-on-demand variables
* All variables are protected with the pod lock. We cannot rely on
* the p2m lock if it's turned into a fine-grained lock.
- * We only use the domain page_alloc lock for additions and
+ * We only use the domain page_alloc lock for additions and
* deletions to the domain's page list. Because we use it nested
* within the PoD lock, we enforce it's ordering (by remembering
- * the unlock level in the arch_domain sub struct). */
+ * the unlock level in the arch_domain sub struct).
+ */
struct {
struct page_list_head super, /* List of superpages */
single; /* Non-super lists */
return p2m_get_gfn_type_access(p2m, gfn, t, a, q, page_order, locked);
}
-/* Read a particular P2M table, mapping pages as we go. Most callers
+/*
+ * Read a particular P2M table, mapping pages as we go. Most callers
* should _not_ call this directly; use the other get_gfn* functions
* below unless you know you want to walk a p2m that isn't a domain's
* main one.
- * If the lookup succeeds, the return value is != INVALID_MFN and
+ * If the lookup succeeds, the return value is != INVALID_MFN and
* *page_order is filled in with the order of the superpage (if any) that
- * the entry was found in. */
+ * the entry was found in.
+ */
static inline mfn_t __nonnull(3, 4) get_gfn_type_access(
struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
p2m_access_t *a, p2m_query_t q, unsigned int *page_order)
p2m_put_gfn(p2m_get_hostp2m(d), _gfn(gfn));
}
-/* The intent of the "unlocked" accessor is to have the caller not worry about
- * put_gfn. They apply to very specific situations: debug printk's, dumps
- * during a domain crash, or to peek at a p2m entry/type. Caller is not
- * holding the p2m entry exclusively during or after calling this.
+/*
+ * The intent of the "unlocked" accessor is to have the caller not worry about
+ * put_gfn. They apply to very specific situations: debug printk's, dumps
+ * during a domain crash, or to peek at a p2m entry/type. Caller is not
+ * holding the p2m entry exclusively during or after calling this.
*
* This is also used in the shadow code whenever the paging lock is
* held -- in those cases, the caller is protected against concurrent
* Any other type of query can cause a change in the p2m and may need to
* perform locking.
*/
-static inline mfn_t get_gfn_query_unlocked(struct domain *d,
- unsigned long gfn,
+static inline mfn_t get_gfn_query_unlocked(struct domain *d,
+ unsigned long gfn,
p2m_type_t *t)
{
p2m_access_t a;
NULL, 0);
}
-/* Atomically look up a GFN and take a reference count on the backing page.
+/*
+ * Atomically look up a GFN and take a reference count on the backing page.
* This makes sure the page doesn't get freed (or shared) underfoot,
* and should be used by any path that intends to write to the backing page.
* Returns NULL if the page is not backed by RAM.
- * The caller is responsible for calling put_page() afterwards. */
+ * The caller is responsible for calling put_page() afterwards.
+ */
struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn,
p2m_type_t *t, p2m_access_t *a,
p2m_query_t q);
/* Init the datastructures for later use by the p2m code */
int p2m_init(struct domain *d);
-/* Allocate a new p2m table for a domain.
- *
- * Returns 0 for success or -errno. */
+/* Allocate a new p2m table for a domain. Returns 0 for success or -errno. */
int p2m_alloc_table(struct p2m_domain *p2m);
/* Return all the p2m resources to Xen. */
#endif
/* Change types across all p2m entries in a domain */
-void p2m_change_entry_type_global(struct domain *d,
+void p2m_change_entry_type_global(struct domain *d,
p2m_type_t ot, p2m_type_t nt);
/* Change types across a range of p2m entries (start ... end-1) */
-void p2m_change_type_range(struct domain *d,
+void p2m_change_type_range(struct domain *d,
unsigned long start, unsigned long end,
p2m_type_t ot, p2m_type_t nt);
p2m_access_t p2ma, unsigned int flag);
int p2m_remove_identity_entry(struct domain *d, unsigned long gfn_l);
-/*
+/*
* Populate-on-demand
*/
struct vm_event_st;
void p2m_mem_paging_resume(struct domain *d, struct vm_event_st *rsp);
-/*
+/*
* Internal functions, only called by other p2m code
*/
/* Extract the type from the PTE flags that store it */
static inline p2m_type_t p2m_flags_to_type(unsigned int flags)
{
- /* For AMD IOMMUs we need to use type 0 for plain RAM, but we need
- * to make sure that an entirely empty PTE doesn't have RAM type */
- if ( flags == 0 )
+ /*
+ * For AMD IOMMUs we need to use type 0 for plain RAM, but we need
+ * to make sure that an entirely empty PTE doesn't have RAM type.
+ */
+ if ( flags == 0 )
return p2m_invalid;
- /* AMD IOMMUs use bits 9-11 to encode next io page level and bits
- * 59-62 for iommu flags so we can't use them to store p2m type info. */
+
+ /*
+ * AMD IOMMUs use bits 9-11 to encode next io page level and bits
+ * 59-62 for iommu flags so we can't use them to store p2m type info.
+ */
return (flags >> 12) & 0x7f;
}
int p2m_pt_handle_deferred_changes(uint64_t gpa);
/*
- * Nested p2m: shadow p2m tables used for nested HVM virtualization
+ * Nested p2m: shadow p2m tables used for nested HVM virtualization
*/
/* Flushes specified p2m table */
*/
void p2m_tlb_flush_sync(struct p2m_domain *p2m)
{
- if ( p2m->need_flush ) {
+ if ( p2m->need_flush )
+ {
p2m->need_flush = 0;
p2m->tlb_flush(p2m);
}
*/
void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m)
{
- if ( p2m->need_flush ) {
+ if ( p2m->need_flush )
+ {
p2m->need_flush = 0;
mm_write_unlock(&p2m->lock);
p2m->tlb_flush(p2m);
mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
}
- if (unlikely((p2m_is_broken(*t))))
+ if ( unlikely(p2m_is_broken(*t)) )
{
/* Return invalid_mfn to avoid caller's access */
mfn = INVALID_MFN;
mfn_x(omfn), ot, a,
mfn_x(mfn) + i, t, p2m->default_access);
domain_crash(d);
+
return -EPERM;
}
else if ( p2m_is_ram(ot) && !p2m_is_paged(ot) )
* Resets the access permissions.
*/
int p2m_change_type_one(struct domain *d, unsigned long gfn_l,
- p2m_type_t ot, p2m_type_t nt)
+ p2m_type_t ot, p2m_type_t nt)
{
p2m_access_t a;
p2m_type_t pt;
unsigned int i;
for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
{
struct p2m_domain *altp2m = d->arch.altp2m_p2m[i];
change_type_range(altp2m, start, end, ot, nt);
p2m_unlock(altp2m);
}
+ }
}
hostp2m->defer_nested_flush = false;
if ( nestedhvm_enabled(d) )
unsigned int i;
for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
{
struct p2m_domain *altp2m = d->arch.altp2m_p2m[i];
if ( rc < 0 )
goto out;
}
+ }
}
out:
p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
{
struct list_head *lru_list = &p2m_get_hostp2m(d)->np2m_list;
-
+
ASSERT(!list_empty(lru_list));
if ( p2m == NULL )
/* Mask out low bits; this avoids collisions with P2M_BASE_EADDR */
np2m_base &= ~(0xfffULL);
- if (nv->nv_flushp2m && nv->nv_p2m) {
+ if ( nv->nv_flushp2m && nv->nv_p2m )
nv->nv_p2m = NULL;
- }
nestedp2m_lock(d);
p2m = nv->nv_p2m;
- if ( p2m )
+ if ( p2m )
{
p2m_lock(p2m);
if ( p2m->np2m_base == np2m_base )
struct p2m_domain *
p2m_get_p2m(struct vcpu *v)
{
- if (!nestedhvm_is_n2(v))
+ if ( !nestedhvm_is_n2(v) )
return p2m_get_hostp2m(v->domain);
return p2m_get_nestedp2m(v);
#if P2M_AUDIT
void audit_p2m(struct domain *d,
uint64_t *orphans,
- uint64_t *m2p_bad,
- uint64_t *p2m_bad)
+ uint64_t *m2p_bad,
+ uint64_t *p2m_bad)
{
struct page_info *page;
struct domain *od;
p2m_lock(p2m);
pod_lock(p2m);
- if (p2m->audit_p2m)
+ if ( p2m->audit_p2m )
pmbad = p2m->audit_p2m(p2m);
/* Audit part two: walk the domain's page allocation list, checking
{
orphans_count++;
P2M_PRINTK("orphaned guest page: mfn=%#lx has invalid gfn\n",
- mfn);
+ mfn);
continue;
}
if ( SHARED_M2P(gfn) )
{
P2M_PRINTK("shared mfn (%lx) on domain page list!\n",
- mfn);
+ mfn);
continue;
}
p2m_put_gfn(p2m, _gfn(gfn));
P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx\n",
- mfn, gfn, mfn_x(p2mfn));
+ mfn, gfn, mfn_x(p2mfn));
}
spin_unlock(&d->page_alloc_lock);
pod_unlock(p2m);
p2m_unlock(p2m);
-
+
P2M_PRINTK("p2m audit complete\n");
if ( orphans_count | mpbad | pmbad )
P2M_PRINTK("p2m audit found %lu orphans\n", orphans_count);