- On x86:
- HVM PIRQs are disabled by default.
- Reduce IOMMU setup time for hardware domain.
+ - Allow HVM/PVH domains to map foreign pages.
- xl/libxl configures vkb=[] for HVM domains with priority over vkb_device.
- Increase the maximum number of CPUs Xen can be built for from 4095 to
16383.
enum {
PROG_iommu_pagetables = 1,
PROG_shared,
+ PROG_mappings,
PROG_paging,
PROG_vcpu_pagetables,
PROG_xen,
}
#endif
+ PROGRESS(mappings):
+ ret = relinquish_p2m_mapping(d);
+ if ( ret )
+ return ret;
+
PROGRESS(paging):
/* Tear down paging-assistance stuff. */
/* Number of foreign mappings. */
unsigned long nr_foreign;
+ /* Cursor for iterating over the p2m on teardown. */
+ unsigned long teardown_gfn;
#endif /* CONFIG_HVM */
};
#endif
#include <xen/p2m-common.h>
-static inline bool arch_acquire_resource_check(struct domain *d)
-{
- /*
- * FIXME: Until foreign pages inserted into the P2M are properly
- * reference counted, it is unsafe to allow mapping of
- * resource pages unless the caller is the hardware domain
- * (see set_foreign_p2m_entry()).
- */
- return !paging_mode_translate(d) || is_hardware_domain(d);
-}
+bool arch_acquire_resource_check(const struct domain *d);
/*
* Updates vCPU's n2pm to match its np2m_base in VMCx12 and returns that np2m.
void
p2m_pod_offline_or_broken_replace(struct page_info *p);
+/* Perform cleanup of p2m mappings ahead of teardown. */
+int
+relinquish_p2m_mapping(struct domain *d);
+
#else
static inline bool
ASSERT_UNREACHABLE();
}
+static inline int relinquish_p2m_mapping(struct domain *d)
+{
+ return 0;
+}
+
#endif
break;
case p2m_map_foreign:
- if ( !mfn_valid(nfn) )
+ if ( !mfn_valid(nfn) || p2m != p2m_get_hostp2m(p2m->domain) )
{
ASSERT_UNREACHABLE();
return -EINVAL;
break;
case p2m_map_foreign:
- if ( !mfn_valid(ofn) )
+ if ( !mfn_valid(ofn) || p2m != p2m_get_hostp2m(p2m->domain) )
{
ASSERT_UNREACHABLE();
return -EINVAL;
#include <xen/event.h>
#include <xen/types.h>
+#include <asm/altp2m.h>
+#include <asm/hvm/nestedhvm.h>
#include <asm/p2m.h>
#include "mm-locks.h"
#include "p2m.h"
p2m_teardown_hostp2m(d);
}
+bool arch_acquire_resource_check(const struct domain *d)
+{
+ /*
+ * altp2m is not supported as we would otherwise also need to walk the
+ * altp2m tables and drop any foreign map entries in order to drop the page
+ * reference.
+ *
+ * The same applies to nestedhvm nested p2m tables, as the type from the L0
+ * p2m is replicated into the L1 p2m, and there's no filtering that
+ * prevents foreign mappings from being created in nestedp2m.
+ */
+ return is_pv_domain(d) ||
+ (d->arch.hvm.params[HVM_PARAM_ALTP2M] == XEN_ALTP2M_disabled &&
+ !nestedhvm_enabled(d));
+}
+
/*
* Local variables:
* mode: C
int rc;
struct domain *fdom;
- /*
- * hvm fixme: until support is added to p2m teardown code to cleanup any
- * foreign entries, limit this to hardware domain only.
- */
if ( !arch_acquire_resource_check(tdom) )
return -EPERM;
return rc;
}
+/*
+ * Remove foreign mappings from the p2m, as that drops the page reference taken
+ * when mapped.
+ */
+int relinquish_p2m_mapping(struct domain *d)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ unsigned long gfn, count = 0;
+ int rc = 0;
+
+ if ( !paging_mode_translate(d) )
+ return 0;
+
+ BUG_ON(!d->is_dying);
+
+ p2m_lock(p2m);
+
+ gfn = p2m->teardown_gfn;
+
+ /* Iterate over the whole p2m on debug builds to ensure correctness. */
+ while ( gfn <= p2m->max_mapped_pfn &&
+ (IS_ENABLED(CONFIG_DEBUG) || p2m->nr_foreign) )
+ {
+ unsigned int order;
+ p2m_type_t t;
+ p2m_access_t a;
+
+ _get_gfn_type_access(p2m, _gfn(gfn), &t, &a, 0, &order, 0);
+ ASSERT(IS_ALIGNED(gfn, 1UL << order));
+
+ if ( t == p2m_map_foreign )
+ {
+ ASSERT(p2m->nr_foreign);
+ ASSERT(order == 0);
+
+ rc = p2m_set_entry(p2m, _gfn(gfn), INVALID_MFN, order, p2m_invalid,
+ p2m->default_access);
+ if ( rc )
+ {
+ printk(XENLOG_ERR
+ "%pd: failed to unmap foreign page %" PRI_gfn " order %u error %d\n",
+ d, gfn, order, rc);
+ ASSERT_UNREACHABLE();
+ break;
+ }
+ }
+
+ gfn += 1UL << order;
+
+ if ( !(++count & 0xff) && hypercall_preempt_check() )
+ {
+ rc = -ERESTART;
+ break;
+ }
+ }
+
+ ASSERT(gfn <= p2m->max_mapped_pfn || !p2m->nr_foreign);
+ p2m->teardown_gfn = gfn;
+
+ p2m_unlock(p2m);
+
+ return rc;
+}
+
/*
* Local variables:
* mode: C