}
}
- d->arch.relmem = RELMEM_xen;
+ d->arch.relmem = RELMEM_shared;
/* fallthrough */
- /* Relinquish every page of memory. */
+ case RELMEM_shared:
+
+ if ( is_hvm_domain(d) )
+ {
+ /* If the domain has shared pages, relinquish them allowing
+ * for preemption. */
+ ret = relinquish_shared_pages(d);
+ if ( ret )
+ return ret;
+ }
+
+ d->arch.relmem = RELMEM_xen;
+ /* Fallthrough. Relinquish every page of memory. */
case RELMEM_xen:
ret = relinquish_memory(d, &d->xenpage_list, ~0UL);
if ( ret )
#include <asm/mem_event.h>
#include <asm/atomic.h>
#include <xen/rcupdate.h>
+#include <asm/event.h>
#include "mm-locks.h"
return 0;
}
+int relinquish_shared_pages(struct domain *d)
+{
+ int rc = 0;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ unsigned long gfn, count = 0;
+
+ if ( p2m == NULL )
+ return 0;
+
+ p2m_lock(p2m);
+ for (gfn = p2m->next_shared_gfn_to_relinquish;
+ gfn < p2m->max_mapped_pfn; gfn++ )
+ {
+ p2m_access_t a;
+ p2m_type_t t;
+ mfn_t mfn;
+ if ( atomic_read(&d->shr_pages) == 0 )
+ break;
+ mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL);
+ if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
+ {
+ /* Does not fail with ENOMEM given the DESTROY flag */
+ BUG_ON(__mem_sharing_unshare_page(d, gfn,
+ MEM_SHARING_DESTROY_GFN));
+ /* Clear out the p2m entry so no one else may try to
+ * unshare */
+ p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K,
+ p2m_invalid, p2m_access_rwx);
+ count++;
+ }
+
+ /* Preempt every 2MiB. Arbitrary */
+ if ( (count == 512) && hypercall_preempt_check() )
+ {
+ p2m->next_shared_gfn_to_relinquish = gfn + 1;
+ rc = -EAGAIN;
+ break;
+ }
+ }
+
+ p2m_unlock(p2m);
+ return rc;
+}
+
int mem_sharing_memop(struct domain *d, xen_mem_sharing_op_t *mec)
{
int rc = 0;
p2m_lock(p2m);
#ifdef __x86_64__
+ /* Try to unshare any remaining shared p2m entries. Safeguard
+ * Since relinquish_shared_pages should have done the work. */
for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
{
p2m_access_t a;
+ if ( atomic_read(&d->shr_pages) == 0 )
+ break;
mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL);
if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
{
static inline void put_gfn(struct domain *d, unsigned long gfn) {}
static inline void mem_event_cleanup(struct domain *d) {}
+static inline int relinquish_shared_pages(struct domain *d)
+{
+ return 0;
+}
#define INVALID_MFN (~0UL)
/* Continuable domain_relinquish_resources(). */
enum {
RELMEM_not_started,
+ RELMEM_shared,
RELMEM_xen,
RELMEM_l4,
RELMEM_l3,
int mem_sharing_audit(void);
void mem_sharing_init(void);
+/* Scans the p2m and relinquishes any shared pages, destroying
+ * those for which this domain holds the final reference.
+ * Preemptible.
+ */
+int relinquish_shared_pages(struct domain *d);
+
#else
#define mem_sharing_init() do { } while (0)
+static inline int relinquish_shared_pages(struct domain *d)
+{
+ return 0;
+}
#endif /* __x86_64__ */
/* Highest guest frame that's ever been mapped in the p2m */
unsigned long max_mapped_pfn;
+ /* When releasing shared gfn's in a preemptible manner, recall where
+ * to resume the search */
+ unsigned long next_shared_gfn_to_relinquish;
+
/* Populate-on-demand variables
* All variables are protected with the pod lock. We cannot rely on
* the p2m lock if it's turned into a fine-grained lock.