Populate-on-demand is HVM only.
Provide a bunch of stubs for common p2m code and guard one invocation
of guest_physmap_mark_populate_on_demand with is_hvm_domain.
Put relevant fields in p2m_domain and code which touches those fields
under CONFIG_HVM.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tamas K Lengyel <tamas@tklengyel.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
break;
-#if P2M_AUDIT
+#if P2M_AUDIT && defined(CONFIG_HVM)
case XEN_DOMCTL_audit_p2m:
if ( d == currd )
ret = -EPERM;
return 0;
}
+#ifdef CONFIG_HVM
case XENMEM_set_pod_target:
case XENMEM_get_pod_target:
{
rcu_unlock_domain(d);
return rc;
}
+#endif
default:
return subarch_memory_op(cmd, arg);
return rc;
}
+void p2m_pod_init(struct p2m_domain *p2m)
+{
+ unsigned int i;
+
+ mm_lock_init(&p2m->pod.lock);
+ INIT_PAGE_LIST_HEAD(&p2m->pod.super);
+ INIT_PAGE_LIST_HEAD(&p2m->pod.single);
+
+ for ( i = 0; i < ARRAY_SIZE(p2m->pod.mrp.list); ++i )
+ p2m->pod.mrp.list[i] = gfn_x(INVALID_GFN);
+}
return err;
}
-#if P2M_AUDIT
+#if P2M_AUDIT && defined(CONFIG_HVM)
long p2m_pt_audit_p2m(struct p2m_domain *p2m)
{
unsigned long entry_count = 0, pmbad = 0;
return pmbad;
}
+#else
+# define p2m_pt_audit_p2m NULL
#endif /* P2M_AUDIT */
/* Set up the p2m function pointers for pagetable format */
/* Init the datastructures for later use by the p2m code */
static int p2m_initialise(struct domain *d, struct p2m_domain *p2m)
{
- unsigned int i;
int ret = 0;
mm_rwlock_init(&p2m->lock);
- mm_lock_init(&p2m->pod.lock);
INIT_LIST_HEAD(&p2m->np2m_list);
INIT_PAGE_LIST_HEAD(&p2m->pages);
- INIT_PAGE_LIST_HEAD(&p2m->pod.super);
- INIT_PAGE_LIST_HEAD(&p2m->pod.single);
p2m->domain = d;
p2m->default_access = p2m_access_rwx;
p2m->np2m_base = P2M_BASE_EADDR;
p2m->np2m_generation = 0;
- for ( i = 0; i < ARRAY_SIZE(p2m->pod.mrp.list); ++i )
- p2m->pod.mrp.list[i] = gfn_x(INVALID_GFN);
+ p2m_pod_init(p2m);
if ( hap_enabled(d) && cpu_has_vmx )
ret = ept_p2m_init(p2m);
gfn_x(gfn), mfn_x(mfn));
rc = p2m_set_entry(p2m, gfn, INVALID_MFN, page_order,
p2m_invalid, p2m->default_access);
+#ifdef CONFIG_HVM
if ( rc == 0 )
{
pod_lock(p2m);
BUG_ON(p2m->pod.entry_count < 0);
pod_unlock(p2m);
}
+#endif
}
out:
if ( rc )
gdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d (0x%"PRI_mfn")\n",
gfn_l, order, rc, mfn_x(mfn));
+#ifdef CONFIG_HVM
else if ( p2m_is_pod(ot) )
{
pod_lock(p2m);
BUG_ON(p2m->pod.entry_count < 0);
pod_unlock(p2m);
}
+#endif
gfn_unlock(p2m, gfn, order);
return rc;
* when discarding them.
*/
ASSERT(!p2m_is_hostp2m(p2m));
+#ifdef CONFIG_HVM
/* Nested p2m's do not do pod, hence the asserts (and no pod lock)*/
ASSERT(page_list_empty(&p2m->pod.super));
ASSERT(page_list_empty(&p2m->pod.single));
+#endif
/* No need to flush if it's already empty */
if ( p2m_is_nestedp2m(p2m) && p2m->np2m_base == P2M_BASE_EADDR )
/*** Audit ***/
-#if P2M_AUDIT
+#if P2M_AUDIT && defined(CONFIG_HVM)
void audit_p2m(struct domain *d,
uint64_t *orphans,
uint64_t *m2p_bad,
if ( d == curr_d )
goto out;
- if ( guest_physmap_mark_populate_on_demand(d, gpfn,
+ if ( is_hvm_domain(d) &&
+ guest_physmap_mark_populate_on_demand(d, gpfn,
a->extent_order) < 0 )
goto out;
}
{
case XEN_VM_EVENT_ENABLE:
{
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
rc = -EOPNOTSUPP;
/* hvm fixme: p2m_is_foreign types need addressing */
if ( is_hvm_domain(hardware_domain) )
rc = -EXDEV;
/* Disallow paging in a PoD guest */
- if ( p2m->pod.entry_count )
+ if ( p2m_pod_entry_count(p2m_get_hostp2m(d)) )
break;
/* domain_pause() not required here, see XSA-99 */
* to resume the search */
unsigned long next_shared_gfn_to_relinquish;
+#ifdef CONFIG_HVM
/* Populate-on-demand variables
* All variables are protected with the pod lock. We cannot rely on
* the p2m lock if it's turned into a fine-grained lock.
mm_lock_t lock; /* Locking of private pod structs, *
* not relying on the p2m lock. */
} pod;
+#endif
+
union {
struct ept_data ept;
/* NPT-equivalent structure could be added here. */
/* Dump PoD information about the domain */
void p2m_pod_dump_data(struct domain *d);
+#ifdef CONFIG_HVM
+
+/* Called by p2m code when demand-populating a PoD page */
+bool
+p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order);
+
/* Move all pages from the populate-on-demand cache to the domain page_list
* (usually in preparation for domain destruction) */
int p2m_pod_empty_cache(struct domain *d);
void
p2m_pod_offline_or_broken_replace(struct page_info *p);
+static inline long p2m_pod_entry_count(const struct p2m_domain *p2m)
+{
+ return p2m->pod.entry_count;
+}
+
+void p2m_pod_init(struct p2m_domain *p2m);
+
+#else
+
+static inline bool
+p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order)
+{
+ return false;
+}
+
+static inline int p2m_pod_empty_cache(struct domain *d)
+{
+ return 0;
+}
+
+static inline int p2m_pod_offline_or_broken_hit(struct page_info *p)
+{
+ return 0;
+}
+
+static inline void p2m_pod_offline_or_broken_replace(struct page_info *p)
+{
+ ASSERT_UNREACHABLE();
+}
+
+static inline long p2m_pod_entry_count(const struct p2m_domain *p2m)
+{
+ return 0;
+}
+
+static inline void p2m_pod_init(struct p2m_domain *p2m) {}
+
+#endif
+
/*
* Paging to disk and page-sharing
#define P2M_DEBUG(f, a...) do { (void)(f); } while(0)
#endif
-/* Called by p2m code when demand-populating a PoD page */
-bool
-p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order);
-
/*
* Functions specific to the p2m-pt implementation
*/