{
return pte.p2m.valid;
}
-/* These two can only be used on L0..L2 ptes because L3 mappings set
+/*
+ * These two can only be used on L0..L2 ptes because L3 mappings set
* the table bit and therefore these would return the opposite to what
- * you would expect. */
+ * you would expect.
+ */
static bool_t p2m_table(lpae_t pte)
{
return p2m_valid(pte) && pte.p2m.table;
{
unsigned long flags = 0;
- /* Update the VTTBR if necessary with the domain d. In this case,
+ /*
+ * Update the VTTBR if necessary with the domain d. In this case,
* it's only necessary to flush TLBs on every CPUs with the current VMID
* (our domain).
*/
p2m_type_t t, p2m_access_t a)
{
paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT;
- /* sh, xn and write bit will be defined in the following switches
- * based on mattr and t. */
+ /*
+ * sh, xn and write bit will be defined in the following switches
+ * based on mattr and t.
+ */
lpae_t e = (lpae_t) {
.p2m.af = 1,
.p2m.read = 1,
MEMACCESS,
};
-/* Put any references on the single 4K page referenced by pte. TODO:
- * Handle superpages, for now we only take special references for leaf
+/*
+ * Put any references on the single 4K page referenced by pte.
+ * TODO: Handle superpages, for now we only take special references for leaf
* pages (specifically foreign ones, which can't be super mapped today).
*/
static void p2m_put_l3_page(const lpae_t pte)
{
ASSERT(p2m_valid(pte));
- /* TODO: Handle other p2m types
+ /*
+ * TODO: Handle other p2m types
*
* It's safe to do the put_page here because page_alloc will
* flush the TLBs if the page is reallocated before the end of
PAGE_LIST_HEAD(free_pages);
struct page_info *pg;
- /* Some IOMMU don't support coherent PT walk. When the p2m is
+ /*
+ * Some IOMMU don't support coherent PT walk. When the p2m is
* shared with the CPU, Xen has to make sure that the PT changes have
* reached the memory
*/
d->arch.vttbr = page_to_maddr(p2m->root)
| ((uint64_t)p2m->vmid&0xff)<<48;
- /* Make sure that all TLBs corresponding to the new VMID are flushed
+ /*
+ * Make sure that all TLBs corresponding to the new VMID are flushed
* before using it
*/
flush_tlb_domain(d);
static spinlock_t vmid_alloc_lock = SPIN_LOCK_UNLOCKED;
-/* VTTBR_EL2 VMID field is 8 bits. Using a bitmap here limits us to
- * 256 concurrent domains. */
+/*
+ * VTTBR_EL2 VMID field is 8 bits. Using a bitmap here limits us to
+ * 256 concurrent domains.
+ */
static DECLARE_BITMAP(vmid_mask, MAX_VMID);
void p2m_vmid_allocator_init(void)
/* Current VMID in use */
uint8_t vmid;
- /* Highest guest frame that's ever been mapped in the p2m
+ /*
+ * Highest guest frame that's ever been mapped in the p2m
* Only takes into account ram and foreign mapping
*/
gfn_t max_mapped_gfn;
- /* Lowest mapped gfn in the p2m. When releasing mapped gfn's in a
+ /*
+ * Lowest mapped gfn in the p2m. When releasing mapped gfn's in a
* preemptible manner this is update to track recall where to
* resume the search. Apart from during teardown this can only
* decrease. */
unsigned long shattered[4];
} stats;
- /* If true, and an access fault comes in and there is no vm_event listener,
- * pause domain. Otherwise, remove access restrictions. */
+ /*
+ * If true, and an access fault comes in and there is no vm_event listener,
+ * pause domain. Otherwise, remove access restrictions.
+ */
bool_t access_required;
/* Defines if mem_access is in use for the domain. */
bool_t mem_access_enabled;
- /* Default P2M access type for each page in the the domain: new pages,
+ /*
+ * Default P2M access type for each page in the the domain: new pages,
* swapped in pages, cleared pages, and pages that are ambiguously
- * retyped get this access type. See definition of p2m_access_t. */
+ * retyped get this access type. See definition of p2m_access_t.
+ */
p2m_access_t default_access;
- /* Radix tree to store the p2m_access_t settings as the pte's don't have
- * enough available bits to store this information. */
+ /*
+ * Radix tree to store the p2m_access_t settings as the pte's don't have
+ * enough available bits to store this information.
+ */
struct radix_tree_root mem_access_settings;
};
-/* List of possible type for each page in the p2m entry.
+/*
+ * List of possible type for each page in the p2m entry.
* The number of available bit per page in the pte for this purpose is 4 bits.
* So it's possible to only have 16 fields. If we run out of value in the
* future, it's possible to use higher value for pseudo-type and don't store
/* Return all the p2m resources to Xen. */
void p2m_teardown(struct domain *d);
-/* Remove mapping refcount on each mapping page in the p2m
+/*
+ * Remove mapping refcount on each mapping page in the p2m
*
* TODO: For the moment only foreign mappings are handled
*/
int relinquish_p2m_mapping(struct domain *d);
-/* Allocate a new p2m table for a domain.
+/*
+ * Allocate a new p2m table for a domain.
*
* Returns 0 for success or -errno.
*/
* Populate-on-demand
*/
-/* Call when decreasing memory reservation to handle PoD entries properly.
- * Will return '1' if all entries were handled and nothing more need be done.*/
+/*
+ * Call when decreasing memory reservation to handle PoD entries properly.
+ * Will return '1' if all entries were handled and nothing more need be done.
+ */
int
p2m_pod_decrease_reservation(struct domain *d,
xen_pfn_t gpfn,
return NULL;
page = mfn_to_page(mfn);
- /* get_page won't work on foreign mapping because the page doesn't
+ /*
+ * get_page won't work on foreign mapping because the page doesn't
* belong to the current domain.
*/
if ( p2mt == p2m_map_foreign )
return 1;
}
-/* Send mem event based on the access. Boolean return value indicates if trap
- * needs to be injected into guest. */
+/*
+ * Send mem event based on the access. Boolean return value indicates if trap
+ * needs to be injected into guest.
+ */
bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec);
#endif /* _XEN_P2M_H */