There are a few places in Xen where we walk a domain's page lists
without holding the page_alloc lock. They race with updates to the
page lists, which are normally rare but can be quite common under PoD
when the domain is close to its memory limit and the PoD reclaimer is
busy. This patch protects those places by taking the page_alloc lock.
I think this is OK for the two debug-key printouts - they don't run
from irq context and look deadlock-free. The tboot change seems safe
too unless tboot shutdown functions are called from irq context or
with the page_alloc lock held. The p2m one is the scariest but there
are already code paths in PoD that take the page_alloc lock with the
p2m lock held so it's no worse than existing code.
Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
xen-unstable changeset: 21881:
57de3a3118bb
xen-unstable date: Wed Jul 28 07:54:12 2010 +0100
}
else
{
+ spin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->page_list )
{
printk(" DomPage %p: caf=%08lx, taf=%" PRtype_info "\n",
_p(page_to_mfn(page)),
page->count_info, page->u.inuse.type_info);
}
+ spin_unlock(&d->page_alloc_lock);
}
if ( is_hvm_domain(d) )
p2m_pod_dump_data(d);
}
+ spin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->xenpage_list )
{
printk(" XenPage %p: caf=%08lx, taf=%" PRtype_info "\n",
_p(page_to_mfn(page)),
page->count_info, page->u.inuse.type_info);
}
+ spin_unlock(&d->page_alloc_lock);
}
struct domain *alloc_domain_struct(void)
goto error;
/* Copy all existing mappings from the page list and m2p */
+ spin_lock(&d->page_alloc_lock);
page_list_for_each(page, &d->page_list)
{
mfn = page_to_mfn(page);
#endif
&& gfn != INVALID_M2P_ENTRY
&& !set_p2m_entry(d, gfn, mfn, 0, p2m_ram_rw) )
- goto error;
+ goto error_unlock;
}
+ spin_unlock(&d->page_alloc_lock);
P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
p2m_unlock(p2m);
return 0;
+error_unlock:
+ spin_unlock(&d->page_alloc_lock);
error:
P2M_PRINTK("failed to initialize p2m table, gfn=%05lx, mfn=%"
PRI_mfn "\n", gfn, mfn_x(mfn));
for_each_online_node(i)
page_num_node[i] = 0;
+ spin_lock(&d->page_alloc_lock);
page_list_for_each(page, &d->page_list)
{
i = phys_to_nid((paddr_t)page_to_mfn(page) << PAGE_SHIFT);
page_num_node[i]++;
}
+ spin_unlock(&d->page_alloc_lock);
for_each_online_node(i)
printk(" Node %u: %u\n", i, page_num_node[i]);
continue;
printk("MACing Domain %u\n", d->domain_id);
+ spin_lock(&d->page_alloc_lock);
page_list_for_each(page, &d->page_list)
{
void *pg = __map_domain_page(page);
vmac_update(pg, PAGE_SIZE, &ctx);
unmap_domain_page(pg);
}
+ spin_unlock(&d->page_alloc_lock);
if ( !is_idle_domain(d) )
{