return 0;
}
-/* If the map is non-NULL, we leave this function having
- * acquired an extra ref on mfn_to_page(*mfn) */
-void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
- p2m_type_t *p2mt, p2m_query_t q, uint32_t *rc)
-{
- struct page_info *page;
- void *map;
-
- /* Translate the gfn, unsharing if shared */
- page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL,
- q);
- if ( p2m_is_paging(*p2mt) )
- {
- ASSERT(p2m_is_hostp2m(p2m));
- if ( page )
- put_page(page);
- p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
- *rc = _PAGE_PAGED;
- return NULL;
- }
- if ( p2m_is_shared(*p2mt) )
- {
- if ( page )
- put_page(page);
- *rc = _PAGE_SHARED;
- return NULL;
- }
- if ( !page )
- {
- *rc |= _PAGE_PRESENT;
- return NULL;
- }
- *mfn = _mfn(page_to_mfn(page));
- ASSERT(mfn_valid(mfn_x(*mfn)));
-
- map = map_domain_page(*mfn);
- return map;
-}
-
-
/* Walk the guest pagetables, after the manner of a hardware walker. */
/* Because the walk is essentially random, it can cause a deadlock
* warning in the p2m locking code. Highly unlikely this is an actual
#include <asm/hvm/vmx/vmx.h>
#include <asm/hvm/vmx/vvmx.h>
-/* EPT always use 4-level paging structure */
-#define GUEST_PAGING_LEVELS 4
-#include <asm/guest_pt.h>
-
/* Must reserved bits in all level entries */
#define EPT_MUST_RSV_BITS (((1ull << PADDR_BITS) - 1) & \
~((1ull << paddr_bits) - 1))
return hostmode->gva_to_gfn(v, hostp2m, va, pfec);
}
+/*
+ * If the map is non-NULL, we leave this function having
+ * acquired an extra ref on mfn_to_page(*mfn).
+ */
+void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
+ p2m_type_t *p2mt, p2m_query_t q, uint32_t *rc)
+{
+ struct page_info *page;
+
+ /* Translate the gfn, unsharing if shared. */
+ page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL, q);
+ if ( p2m_is_paging(*p2mt) )
+ {
+ ASSERT(p2m_is_hostp2m(p2m));
+ if ( page )
+ put_page(page);
+ p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
+ *rc = _PAGE_PAGED;
+ return NULL;
+ }
+ if ( p2m_is_shared(*p2mt) )
+ {
+ if ( page )
+ put_page(page);
+ *rc = _PAGE_SHARED;
+ return NULL;
+ }
+ if ( !page )
+ {
+ *rc |= _PAGE_PRESENT;
+ return NULL;
+ }
+ *mfn = page_to_mfn(page);
+ ASSERT(mfn_valid(*mfn));
+
+ return map_domain_page(*mfn);
+}
+
int map_mmio_regions(struct domain *d,
unsigned long start_gfn,
unsigned long nr,
#define GPT_RENAME2(_n, _l) _n ## _ ## _l ## _levels
#define GPT_RENAME(_n, _l) GPT_RENAME2(_n, _l)
#define guest_walk_tables GPT_RENAME(guest_walk_tables, GUEST_PAGING_LEVELS)
-#define map_domain_gfn GPT_RENAME(map_domain_gfn, GUEST_PAGING_LEVELS)
-
-void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
- p2m_type_t *p2mt, p2m_query_t q, uint32_t *rc);
extern uint32_t
guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, unsigned long va,
/* Set up function pointers for PT implementation: only for use by p2m code */
extern void p2m_pt_init(struct p2m_domain *p2m);
+void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
+ p2m_type_t *p2mt, p2m_query_t q, uint32_t *rc);
+
/* Debugging and auditing of the P2M code? */
#ifndef NDEBUG
#define P2M_AUDIT 1