*
* With VMs that have a lot of memory this call may block for a long time.
*/
-int xc_memshr_fork_reset(xc_interface *xch, uint32_t forked_domain);
+int xc_memshr_fork_reset(xc_interface *xch, uint32_t forked_domain,
+ bool reset_state, bool reset_memory);
/* Debug calls: return the number of pages referencing the shared frame backing
* the input argument. Should be one or greater.
return xc_memshr_memop(xch, domid, &mso);
}
-int xc_memshr_fork_reset(xc_interface *xch, uint32_t domid)
+int xc_memshr_fork_reset(xc_interface *xch, uint32_t domid, bool reset_state,
+ bool reset_memory)
{
xen_mem_sharing_op_t mso;
memset(&mso, 0, sizeof(mso));
mso.op = XENMEM_sharing_op_fork_reset;
+ if ( reset_state )
+ mso.u.fork.flags |= XENMEM_FORK_RESET_STATE;
+ if ( reset_memory )
+ mso.u.fork.flags |= XENMEM_FORK_RESET_MEMORY;
return xc_memshr_memop(xch, domid, &mso);
}
int mem_sharing_fork_page(struct domain *d, gfn_t gfn,
bool unsharing);
+int mem_sharing_fork_reset(struct domain *d, bool reset_state,
+ bool reset_memory);
+
/*
* If called by a foreign domain, possible errors are
* -EBUSY -> ring full
return -EOPNOTSUPP;
}
+static inline int mem_sharing_fork_reset(struct domain *d, bool reset_state,
+ bool reset_memory)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
#endif /* __MEM_SHARING_H__ */
* footprints the hypercall continuation should be implemented (or if this
* feature needs to be become "stable").
*/
-static int mem_sharing_fork_reset(struct domain *d)
+int mem_sharing_fork_reset(struct domain *d, bool reset_state,
+ bool reset_memory)
{
- int rc;
+ int rc = 0;
struct domain *pd = d->parent;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
struct page_info *page, *tmp;
domain_pause(d);
+ if ( !reset_memory )
+ goto state;
+
/* need recursive lock because we will free pages */
spin_lock_recursive(&d->page_alloc_lock);
page_list_for_each_safe(page, tmp, &d->page_list)
}
spin_unlock_recursive(&d->page_alloc_lock);
- rc = copy_settings(d, pd, d->arch.hvm.mem_sharing.empty_p2m);
+ state:
+ if ( reset_state )
+ rc = copy_settings(d, pd, d->arch.hvm.mem_sharing.empty_p2m);
domain_unpause(d);
case XENMEM_sharing_op_fork_reset:
{
+ bool reset_state = mso.u.fork.flags & XENMEM_FORK_RESET_STATE;
+ bool reset_memory = mso.u.fork.flags & XENMEM_FORK_RESET_MEMORY;
+
rc = -EINVAL;
- if ( mso.u.fork.pad || mso.u.fork.flags )
+ if ( mso.u.fork.pad || (!reset_state && !reset_memory) )
+ goto out;
+ if ( mso.u.fork.flags &
+ ~(XENMEM_FORK_RESET_STATE | XENMEM_FORK_RESET_MEMORY) )
goto out;
rc = -ENOSYS;
if ( !d->parent )
goto out;
- rc = mem_sharing_fork_reset(d);
+ rc = mem_sharing_fork_reset(d, reset_state, reset_memory);
break;
}
#include <asm/p2m.h>
#include <asm/monitor.h>
#include <asm/vm_event.h>
+
+#ifdef CONFIG_MEM_SHARING
+#include <asm/mem_sharing.h>
+#endif
+
#include <xsm/xsm.h>
#include <public/hvm/params.h>
if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
p2m_mem_paging_resume(d, &rsp);
#endif
+#ifdef CONFIG_MEM_SHARING
+ do {
+ bool reset_state = rsp.flags & VM_EVENT_FLAG_RESET_FORK_STATE;
+ bool reset_mem = rsp.flags & VM_EVENT_FLAG_RESET_FORK_MEMORY;
+
+ if ( reset_state || reset_mem )
+ mem_sharing_fork_reset(d, reset_state, reset_mem);
+ } while(0);
+#endif
/*
* Check emulation flags in the arch-specific handler only, as it
uint32_t gref; /* IN: gref to debug */
} u;
} debug;
- struct mem_sharing_op_fork { /* OP_FORK */
+ struct mem_sharing_op_fork { /* OP_FORK/_RESET */
domid_t parent_domain; /* IN: parent's domain id */
/* These flags only makes sense for short-lived forks */
#define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0)
#define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1)
#define XENMEM_FORK_EMPTY_P2M (1u << 2)
+#define XENMEM_FORK_RESET_STATE (1u << 3)
+#define XENMEM_FORK_RESET_MEMORY (1u << 4)
uint16_t flags; /* IN: optional settings */
uint32_t pad; /* Must be set to 0 */
} fork;
* Reset the vmtrace buffer (if vmtrace is enabled)
*/
#define VM_EVENT_FLAG_RESET_VMTRACE (1 << 13)
+/*
+ * Reset the VM state (if VM is fork)
+ */
+#define VM_EVENT_FLAG_RESET_FORK_STATE (1 << 14)
+/*
+ * Remove unshared entried from physmap (if VM is fork)
+ */
+#define VM_EVENT_FLAG_RESET_FORK_MEMORY (1 << 15)
/*
* Reasons for the vm event request