}
int xc_memshr_fork(xc_interface *xch, uint32_t pdomid, uint32_t domid,
- bool allow_with_iommu, bool block_interrupts)
+ bool allow_with_iommu, bool block_interrupts,
+ bool empty_p2m)
{
xen_mem_sharing_op_t mso;
mso.u.fork.flags |= XENMEM_FORK_WITH_IOMMU_ALLOWED;
if ( block_interrupts )
mso.u.fork.flags |= XENMEM_FORK_BLOCK_INTERRUPTS;
+ if ( empty_p2m )
+ mso.u.fork.flags |= XENMEM_FORK_EMPTY_P2M;
return xc_memshr_memop(xch, domid, &mso);
}
return 0;
}
-static int copy_vcpu_settings(struct domain *cd, const struct domain *d)
+static int copy_vcpu_settings(struct domain *cd, const struct domain *d,
+ bool empty_p2m)
{
unsigned int i;
struct p2m_domain *p2m = p2m_get_hostp2m(cd);
/* Copy & map in the vcpu_info page if the guest uses one */
vcpu_info_mfn = d_vcpu->vcpu_info_mfn;
- if ( !mfn_eq(vcpu_info_mfn, INVALID_MFN) )
+ if ( !empty_p2m && !mfn_eq(vcpu_info_mfn, INVALID_MFN) )
{
mfn_t new_vcpu_info_mfn = cd_vcpu->vcpu_info_mfn;
return 0;
}
-static int copy_settings(struct domain *cd, struct domain *d)
+static int copy_settings(struct domain *cd, struct domain *d,
+ bool empty_p2m)
{
int rc;
- if ( (rc = copy_vcpu_settings(cd, d)) )
+ if ( (rc = copy_vcpu_settings(cd, d, empty_p2m)) )
return rc;
if ( (rc = hvm_copy_context_and_params(cd, d)) )
return rc;
- if ( (rc = copy_special_pages(cd, d)) )
+ if ( !empty_p2m && (rc = copy_special_pages(cd, d)) )
return rc;
copy_tsc(cd, d);
return rc;
}
-static int fork(struct domain *cd, struct domain *d)
+static int fork(struct domain *cd, struct domain *d, uint16_t flags)
{
int rc = -EBUSY;
+ bool block_interrupts = flags & XENMEM_FORK_BLOCK_INTERRUPTS;
+ bool empty_p2m = flags & XENMEM_FORK_EMPTY_P2M;
if ( !cd->controller_pause_count )
return rc;
if ( (rc = bring_up_vcpus(cd, d)) )
goto done;
- rc = copy_settings(cd, d);
+ if ( !(rc = copy_settings(cd, d, empty_p2m)) )
+ {
+ cd->arch.hvm.mem_sharing.block_interrupts = block_interrupts;
+
+ if ( (cd->arch.hvm.mem_sharing.empty_p2m = empty_p2m) )
+ ASSERT(page_list_empty(&cd->page_list));
+ }
done:
if ( rc && rc != -ERESTART )
}
spin_unlock_recursive(&d->page_alloc_lock);
- rc = copy_settings(d, pd);
+ rc = copy_settings(d, pd, d->arch.hvm.mem_sharing.empty_p2m);
domain_unpause(d);
if ( mso.u.fork.pad )
goto out;
if ( mso.u.fork.flags &
- ~(XENMEM_FORK_WITH_IOMMU_ALLOWED | XENMEM_FORK_BLOCK_INTERRUPTS) )
+ ~(XENMEM_FORK_WITH_IOMMU_ALLOWED | XENMEM_FORK_BLOCK_INTERRUPTS |
+ XENMEM_FORK_EMPTY_P2M) )
goto out;
rc = rcu_lock_live_remote_domain_by_id(mso.u.fork.parent_domain,
goto out;
}
- rc = fork(d, pd);
+ rc = fork(d, pd, mso.u.fork.flags);
if ( rc == -ERESTART )
rc = hypercall_create_continuation(__HYPERVISOR_memory_op,
"lh", XENMEM_sharing_op,
arg);
- else if ( !rc && (mso.u.fork.flags & XENMEM_FORK_BLOCK_INTERRUPTS) )
- d->arch.hvm.mem_sharing.block_interrupts = true;
rcu_unlock_domain(pd);
break;
} debug;
struct mem_sharing_op_fork { /* OP_FORK */
domid_t parent_domain; /* IN: parent's domain id */
-/* Only makes sense for short-lived forks */
+/* These flags only makes sense for short-lived forks */
#define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0)
-/* Only makes sense for short-lived forks */
#define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1)
+#define XENMEM_FORK_EMPTY_P2M (1u << 2)
uint16_t flags; /* IN: optional settings */
uint32_t pad; /* Must be set to 0 */
} fork;