#include <xen/mm.h>
#include <xen/sched.h>
#include <xen/trace.h>
+#include <asm/hvm/nestedhvm.h>
#include <asm/page.h>
#include <asm/paging.h>
#include <asm/p2m.h>
static int
p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn);
+static void pod_unlock_and_flush(struct p2m_domain *p2m)
+{
+ pod_unlock(p2m);
+ p2m->defer_nested_flush = false;
+ if ( nestedhvm_enabled(p2m->domain) )
+ p2m_flush_nestedp2m(p2m->domain);
+}
/*
* This function is needed for two reasons:
gfn_lock(p2m, gfn, order);
pod_lock(p2m);
+ p2m->defer_nested_flush = true;
/*
* If we don't have any outstanding PoD entries, let things take their
}
out_unlock:
- pod_unlock(p2m);
+ pod_unlock_and_flush(p2m);
gfn_unlock(p2m, gfn, order);
return ret;
}
* won't start until we're done.
*/
if ( unlikely(d->is_dying) )
- goto out_fail;
-
+ {
+ pod_unlock(p2m);
+ return false;
+ }
/*
* Because PoD does not have cache list for 1GB pages, it has to remap
p2m_populate_on_demand, p2m->default_access);
}
+ p2m->defer_nested_flush = true;
+
/* Only reclaim if we're in actual need of more cache. */
if ( p2m->pod.entry_count > p2m->pod.count )
pod_eager_reclaim(p2m);
__trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), &t);
}
- pod_unlock(p2m);
+ pod_unlock_and_flush(p2m);
return true;
+
out_of_memory:
- pod_unlock(p2m);
+ pod_unlock_and_flush(p2m);
printk("%s: Dom%d out of PoD memory! (tot=%"PRIu32" ents=%ld dom%d)\n",
__func__, d->domain_id, domain_tot_pages(d),
p2m->pod.entry_count, current->domain->domain_id);
domain_crash(d);
return false;
+
out_fail:
- pod_unlock(p2m);
+ pod_unlock_and_flush(p2m);
return false;
+
remap_and_retry:
BUG_ON(order != PAGE_ORDER_2M);
- pod_unlock(p2m);
+ pod_unlock_and_flush(p2m);
/*
* Remap this 2-meg region in singleton chunks. See the comment on the