This patch implemented parts of TODO left in commit id
a902c12ee45fc9389eb8fe54eeddaf267a555c58 (More efficient TLB-flush
filtering in alloc_heap_pages()). It moved TLB-flush filtering out into
populate_physmap. Because of TLB-flush in alloc_heap_pages, it's very slow
to create a guest with memory size of more than 100GB on host with 100+
cpus.
This patch introduced a "MEMF_no_tlbflush" bit to memflags to indicate
whether TLB-flush should be done in alloc_heap_pages or its caller
populate_physmap. Once this bit is set in memflags, alloc_heap_pages will
ignore TLB-flush. To use this bit after vm is created might lead to
security issue, that is, this would make pages accessible to the guest B,
when guest A may still have a cached mapping to them.
Therefore, this patch also introduced a "creation_finished" field to struct
domain to indicate whether this domain has ever got unpaused by hypervisor.
MEMF_no_tlbflush can be set only during vm creation phase when
creation_finished is still false before this domain gets unpaused for the
first time.
Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
Reviewed-by: Dario Faggioli <dario.faggioli@citrix.com>
{
int old, new, prev = d->controller_pause_count;
+ /*
+ * We record this information here for populate_physmap to figure out
+ * that the domain has finished being created. In fact, we're only
+ * allowed to set the MEMF_no_tlbflush flag during VM creation.
+ */
+ d->creation_finished = true;
+
do
{
old = prev;
unsigned int i, j;
xen_pfn_t gpfn, mfn;
struct domain *d = a->domain, *curr_d = current->domain;
+ bool need_tlbflush = false;
+ uint32_t tlbflush_timestamp = 0;
if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
a->nr_extents-1) )
max_order(curr_d)) )
return;
+ /*
+ * With MEMF_no_tlbflush set, alloc_heap_pages() will ignore
+ * TLB-flushes. After VM creation, this is a security issue (it can
+ * make pages accessible to guest B, when guest A may still have a
+ * cached mapping to them). So we do this only during domain creation,
+ * when the domain itself has not yet been unpaused for the first
+ * time.
+ */
+ if ( unlikely(!d->creation_finished) )
+ a->memflags |= MEMF_no_tlbflush;
+
for ( i = a->nr_done; i < a->nr_extents; i++ )
{
if ( i != a->nr_done && hypercall_preempt_check() )
goto out;
}
+ if ( unlikely(a->memflags & MEMF_no_tlbflush) )
+ {
+ for ( j = 0; j < (1U << a->extent_order); j++ )
+ accumulate_tlbflush(&need_tlbflush, &page[j],
+ &tlbflush_timestamp);
+ }
+
mfn = page_to_mfn(page);
}
}
out:
+ if ( need_tlbflush )
+ filtered_flush_tlb_mask(tlbflush_timestamp);
a->nr_done = i;
}
BUG_ON(pg[i].count_info != PGC_state_free);
pg[i].count_info = PGC_state_inuse;
- accumulate_tlbflush(&need_tlbflush, &pg[i], &tlbflush_timestamp);
+ if ( !(memflags & MEMF_no_tlbflush) )
+ accumulate_tlbflush(&need_tlbflush, &pg[i],
+ &tlbflush_timestamp);
/* Initialise fields which have other uses for free pages. */
pg[i].u.inuse.type_info = 0;
#define MEMF_exact_node (1U<<_MEMF_exact_node)
#define _MEMF_no_owner 5
#define MEMF_no_owner (1U<<_MEMF_no_owner)
+#define _MEMF_no_tlbflush 6
+#define MEMF_no_tlbflush (1U<<_MEMF_no_tlbflush)
#define _MEMF_node 8
#define MEMF_node_mask ((1U << (8 * sizeof(nodeid_t))) - 1)
#define MEMF_node(n) ((((n) + 1) & MEMF_node_mask) << _MEMF_node)
bool_t disable_migrate;
/* Is this guest being debugged by dom0? */
bool_t debugger_attached;
+ /*
+ * Set to true at the very end of domain creation, when the domain is
+ * unpaused for the first time by the systemcontroller.
+ */
+ bool creation_finished;
+
/* Which guest this guest has privileges on */
struct domain *target;