int xenmem_add_to_physmap_one(
struct domain *d,
unsigned int space,
- union xen_add_to_physmap_batch_extra extra,
+ union add_to_physmap_extra extra,
unsigned long idx,
gfn_t gfn)
{
break;
}
case XENMAPSPACE_dev_mmio:
- /* extra should be 0. Reserved for future use. */
- if ( extra.res0 )
- return -EOPNOTSUPP;
-
rc = map_dev_mmio_region(d, gfn, 1, _mfn(idx));
return rc;
int xenmem_add_to_physmap_one(
struct domain *d,
unsigned int space,
- union xen_add_to_physmap_batch_extra extra,
+ union add_to_physmap_extra extra,
unsigned long idx,
gfn_t gpfn)
{
rc = guest_physmap_add_page(d, gpfn, mfn, PAGE_ORDER_4K);
put_both:
- /* In the XENMAPSPACE_gmfn, we took a ref of the gfn at the top */
+ /*
+ * In the XENMAPSPACE_gmfn case, we took a ref of the gfn at the top.
+ * We also may need to transfer ownership of the page reference to our
+ * caller.
+ */
if ( space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range )
+ {
put_gfn(d, gfn);
+ if ( !rc && extra.ppage )
+ {
+ *extra.ppage = page;
+ page = NULL;
+ }
+ }
if ( page )
put_page(page);
{
unsigned int done = 0;
long rc = 0;
- union xen_add_to_physmap_batch_extra extra;
+ union add_to_physmap_extra extra = {};
+ struct page_info *pages[16];
- if ( xatp->space != XENMAPSPACE_gmfn_foreign )
- extra.res0 = 0;
- else
+ if ( xatp->space == XENMAPSPACE_gmfn_foreign )
extra.foreign_domid = DOMID_INVALID;
if ( xatp->space != XENMAPSPACE_gmfn_range )
#ifdef CONFIG_HAS_PASSTHROUGH
if ( need_iommu(d) )
+ {
this_cpu(iommu_dont_flush_iotlb) = 1;
+ extra.ppage = &pages[0];
+ }
#endif
while ( xatp->size > done )
xatp->idx++;
xatp->gpfn++;
+ if ( extra.ppage )
+ ++extra.ppage;
+
/* Check for continuation if it's not the last iteration. */
- if ( xatp->size > ++done && hypercall_preempt_check() )
+ if ( (++done > ARRAY_SIZE(pages) && extra.ppage) ||
+ (xatp->size > done && hypercall_preempt_check()) )
{
rc = start + done;
break;
if ( need_iommu(d) )
{
int ret;
+ unsigned int i;
this_cpu(iommu_dont_flush_iotlb) = 0;
if ( unlikely(ret) && rc >= 0 )
rc = ret;
+ /*
+ * Now that the IOMMU TLB flush was done for the original GFN, drop
+ * the page references. The 2nd flush below is fine to make later, as
+ * whoever removes the page again from its new GFN will have to do
+ * another flush anyway.
+ */
+ for ( i = 0; i < done; ++i )
+ put_page(pages[i]);
+
ret = iommu_iotlb_flush(d, xatp->gpfn - done, done);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
{
unsigned int done = 0;
int rc;
+ union add_to_physmap_extra extra = {};
if ( xatpb->size < start )
return -EILSEQ;
!guest_handle_okay(xatpb->errs, xatpb->size) )
return -EFAULT;
+ switch ( xatpb->space )
+ {
+ case XENMAPSPACE_dev_mmio:
+ /* res0 is reserved for future use. */
+ if ( xatpb->u.res0 )
+ return -EOPNOTSUPP;
+ break;
+
+ case XENMAPSPACE_gmfn_foreign:
+ extra.foreign_domid = xatpb->u.foreign_domid;
+ break;
+ }
+
while ( xatpb->size > done )
{
xen_ulong_t idx;
goto out;
}
- rc = xenmem_add_to_physmap_one(d, xatpb->space,
- xatpb->u,
+ rc = xenmem_add_to_physmap_one(d, xatpb->space, extra,
idx, _gfn(gpfn));
if ( unlikely(__copy_to_guest_offset(xatpb->errs, 0, &rc, 1)) )
&(d)->xenpage_list : &(d)->page_list)
#endif
+union add_to_physmap_extra {
+ /*
+ * XENMAPSPACE_gmfn: When deferring TLB flushes, a page reference needs
+ * to be kept until after the flush, so the page can't get removed from
+ * the domain (and re-used for another purpose) beforehand. By passing
+ * non-NULL, the caller of xenmem_add_to_physmap_one() indicates it wants
+ * to have ownership of such a reference transferred in the success case.
+ */
+ struct page_info **ppage;
+
+ /* XENMAPSPACE_gmfn_foreign */
+ domid_t foreign_domid;
+};
+
int xenmem_add_to_physmap_one(struct domain *d, unsigned int space,
- union xen_add_to_physmap_batch_extra extra,
+ union add_to_physmap_extra extra,
unsigned long idx, gfn_t gfn);
/* Return 0 on success, or negative on error. */