...for some uses of get_page_from_gfn().
There are many occurrences of the following pattern in the code:
q = <readonly look-up> ? P2M_ALLOC : P2M_UNSHARE;
page = get_page_from_gfn(d, gfn, &p2mt, q);
if ( p2m_is_paging(p2mt) )
{
if ( page )
put_page(page);
p2m_mem_paging_populate(d, gfn);
return <-EAGAIN or equivalent>;
}
if ( (q & P2M_UNSHARE) && p2m_is_shared(p2mt) )
{
if ( page )
put_page(page);
return <-EAGAIN or equivalent>;
}
if ( !page )
return <-EINVAL or equivalent>;
There are some small differences between the exact way the occurrences
are coded but the desired semantic is the same.
This patch introduces a new common implementation of this code in
check_get_page_from_gfn() and then converts the various open-coded patterns
into calls to this new function.
NOTE: A forward declaration of p2m_type_t enum has been introduced in
p2m-common.h so that it is possible to declare
check_get_page_from_gfn() there rather than having to add
duplicate declarations in the per-architecture p2m headers.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Roger Pau Monne <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Julien Grall <julien.grall@arm.com>
struct domain *curr_d = current->domain;
p2m_type_t p2mt;
- *page = get_page_from_gfn(curr_d, gmfn, &p2mt, P2M_UNSHARE);
-
- if ( *page == NULL )
- return X86EMUL_UNHANDLEABLE;
-
- if ( p2m_is_paging(p2mt) )
+ switch ( check_get_page_from_gfn(curr_d, _gfn(gmfn), false, &p2mt,
+ page) )
{
- put_page(*page);
- p2m_mem_paging_populate(curr_d, gmfn);
- return X86EMUL_RETRY;
- }
+ case 0:
+ break;
- if ( p2m_is_shared(p2mt) )
- {
- put_page(*page);
+ case -EAGAIN:
return X86EMUL_RETRY;
+
+ default:
+ ASSERT_UNREACHABLE();
+ /* Fallthrough */
+
+ case -EINVAL:
+ return X86EMUL_UNHANDLEABLE;
}
/* This code should not be reached if the gmfn is not RAM */
struct page_info *page;
struct domain *d = current->domain;
- page = get_page_from_gfn(d, gfn, &p2mt,
- writable ? P2M_UNSHARE : P2M_ALLOC);
- if ( (p2m_is_shared(p2mt) && writable) || !page )
- {
- if ( page )
- put_page(page);
- return NULL;
- }
- if ( p2m_is_paging(p2mt) )
- {
- put_page(page);
- p2m_mem_paging_populate(d, gfn);
+ if ( check_get_page_from_gfn(d, _gfn(gfn), !writable, &p2mt, &page) )
return NULL;
- }
if ( writable )
{
struct page_info **page, bool readonly,
struct domain *rd)
{
- int rc = GNTST_okay;
p2m_type_t p2mt;
+ int rc;
- *mfn = INVALID_MFN;
- *page = get_page_from_gfn(rd, gfn, &p2mt,
- readonly ? P2M_ALLOC : P2M_UNSHARE);
- if ( !*page )
+ rc = check_get_page_from_gfn(rd, _gfn(gfn), readonly, &p2mt, page);
+ switch ( rc )
{
-#ifdef P2M_SHARED_TYPES
- if ( p2m_is_shared(p2mt) )
- return GNTST_eagain;
-#endif
-#ifdef P2M_PAGES_TYPES
- if ( p2m_is_paging(p2mt) )
- {
- p2m_mem_paging_populate(rd, gfn);
- return GNTST_eagain;
- }
-#endif
+ case 0:
+ break;
+
+ case -EAGAIN:
+ return GNTST_eagain;
+
+ default:
+ ASSERT_UNREACHABLE();
+ /* Fallthrough */
+
+ case -EINVAL:
return GNTST_bad_page;
}
*mfn = page_to_mfn(*page);
- return rc;
+ return GNTST_okay;
}
static inline void
}
}
-int prepare_ring_for_helper(
- struct domain *d, unsigned long gmfn, struct page_info **_page,
- void **_va)
+/*
+ * Acquire a pointer to struct page_info for a specified doman and GFN,
+ * checking whether the page has been paged out, or needs unsharing.
+ * If the function succeeds then zero is returned, page_p is written
+ * with a pointer to the struct page_info with a reference taken, and
+ * p2mt_p it is written with the P2M type of the page. The caller is
+ * responsible for dropping the reference.
+ * If the function fails then an appropriate errno is returned and the
+ * values referenced by page_p and p2mt_p are undefined.
+ */
+int check_get_page_from_gfn(struct domain *d, gfn_t gfn, bool readonly,
+ p2m_type_t *p2mt_p, struct page_info **page_p)
{
- struct page_info *page;
+ p2m_query_t q = readonly ? P2M_ALLOC : P2M_UNSHARE;
p2m_type_t p2mt;
- void *va;
+ struct page_info *page;
- page = get_page_from_gfn(d, gmfn, &p2mt, P2M_UNSHARE);
+ page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, q);
#ifdef CONFIG_HAS_MEM_PAGING
if ( p2m_is_paging(p2mt) )
{
if ( page )
put_page(page);
- p2m_mem_paging_populate(d, gmfn);
- return -ENOENT;
+
+ p2m_mem_paging_populate(d, gfn_x(gfn));
+ return -EAGAIN;
}
#endif
#ifdef CONFIG_HAS_MEM_SHARING
- if ( p2m_is_shared(p2mt) )
+ if ( (q & P2M_UNSHARE) && p2m_is_shared(p2mt) )
{
if ( page )
put_page(page);
- return -ENOENT;
+
+ return -EAGAIN;
}
#endif
if ( !page )
return -EINVAL;
+ *p2mt_p = p2mt;
+ *page_p = page;
+ return 0;
+}
+
+int prepare_ring_for_helper(
+ struct domain *d, unsigned long gmfn, struct page_info **_page,
+ void **_va)
+{
+ p2m_type_t p2mt;
+ struct page_info *page;
+ void *va;
+ int rc;
+
+ rc = check_get_page_from_gfn(d, _gfn(gmfn), false, &p2mt, &page);
+ if ( rc )
+ return (rc == -EAGAIN) ? -ENOENT : rc;
+
if ( !get_page_type(page, PGT_writable_page) )
{
put_page(page);
* future, it's possible to use higher value for pseudo-type and don't store
* them in the p2m entry.
*/
-typedef enum {
+enum p2m_type {
p2m_invalid = 0, /* Nothing mapped here */
p2m_ram_rw, /* Normal read/write guest RAM */
p2m_ram_ro, /* Read-only; writes are silently dropped */
p2m_iommu_map_rw, /* Read/write iommu mapping */
p2m_iommu_map_ro, /* Read-only iommu mapping */
p2m_max_real_type, /* Types after this won't be store in the p2m */
-} p2m_type_t;
+};
/* We use bitmaps and mask to handle groups of types */
#define p2m_to_mask(_t) (1UL << (_t))
* cannot be non-zero, otherwise, hardware generates io page faults when
* device access those pages. Therefore, p2m_ram_rw has to be defined as 0.
*/
-typedef enum {
+enum p2m_type {
p2m_ram_rw = 0, /* Normal read/write guest RAM */
p2m_invalid = 1, /* Nothing mapped here */
p2m_ram_logdirty = 2, /* Temporarily read-only for log-dirty */
p2m_ram_broken = 13, /* Broken page, access cause domain crash */
p2m_map_foreign = 14, /* ram pages from foreign domain */
p2m_ioreq_server = 15,
-} p2m_type_t;
+};
/* Modifiers to the query */
typedef unsigned int p2m_query_t;
return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL;
}
-
/* General conversion function from mfn to gfn */
static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
{
p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
unsigned int order);
+typedef enum p2m_type p2m_type_t;
+
+int __must_check check_get_page_from_gfn(struct domain *d, gfn_t gfn,
+ bool readonly, p2m_type_t *p2mt_p,
+ struct page_info **page_p);
+
#endif /* _XEN_P2M_COMMON_H */