From 637a283f17eba11a3c1f6255c87fead99bda2e6d Mon Sep 17 00:00:00 2001 From: George Dunlap Date: Mon, 17 Jan 2011 14:24:13 +0000 Subject: [PATCH] PoD: Allow pod_set_cache_target hypercall to be preempted For very large VMs, setting the cache target can take long enough that dom0 complains of soft lockups. Allow the hypercall to be preempted. Signed-off-by: George Dunlap Acked-by: Tim Deegan --- xen/arch/x86/domain.c | 4 ++-- xen/arch/x86/mm.c | 24 ++++++++++++++++-------- xen/arch/x86/mm/p2m.c | 18 +++++++++++++++--- xen/arch/x86/x86_64/compat/mm.c | 3 +++ 4 files changed, 36 insertions(+), 13 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 376a8e4d44..b971df19b9 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1653,8 +1653,8 @@ int hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...) unsigned long nval = 0; va_list args; - BUG_ON(*id > 5); - BUG_ON(mask & (1U << *id)); + BUG_ON(id && *id > 5); + BUG_ON(id && (mask & (1U << *id))); va_start(args, mask); diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index c1666d7504..84044d1931 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -4799,15 +4799,23 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) rc = p2m_pod_set_mem_target(d, target.target_pages); } - p2m = p2m_get_hostp2m(d); - target.tot_pages = d->tot_pages; - target.pod_cache_pages = p2m->pod.count; - target.pod_entries = p2m->pod.entry_count; - - if ( copy_to_guest(arg, &target, 1) ) + if ( rc == -EAGAIN ) + { + rc = hypercall_create_continuation( + __HYPERVISOR_memory_op, "lh", op, arg); + } + else if ( rc >= 0 ) { - rc= -EFAULT; - goto pod_target_out_unlock; + p2m = p2m_get_hostp2m(d); + target.tot_pages = d->tot_pages; + target.pod_cache_pages = p2m->pod.count; + target.pod_entries = p2m->pod.entry_count; + + if ( copy_to_guest(arg, &target, 1) ) + { + rc= -EFAULT; + goto pod_target_out_unlock; + } } pod_target_out_unlock: diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index e455ede60a..f18b2ce244 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -435,7 +435,7 @@ static struct page_info * p2m_pod_cache_get(struct p2m_domain *p2m, /* Set the size of the cache, allocating or freeing as necessary. */ static int -p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target) +p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target, int preemptible) { struct domain *d = p2m->domain; int ret = 0; @@ -468,6 +468,12 @@ p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target) } p2m_pod_cache_add(p2m, page, order); + + if ( hypercall_preempt_check() && preemptible ) + { + ret = -EAGAIN; + goto out; + } } /* Decreasing the target */ @@ -512,6 +518,12 @@ p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target) put_page(page+i); put_page(page+i); + + if ( hypercall_preempt_check() && preemptible ) + { + ret = -EAGAIN; + goto out; + } } } @@ -589,7 +601,7 @@ p2m_pod_set_mem_target(struct domain *d, unsigned long target) ASSERT( pod_target >= p2m->pod.count ); - ret = p2m_pod_set_cache_target(p2m, pod_target); + ret = p2m_pod_set_cache_target(p2m, pod_target, 1/*preemptible*/); out: p2m_unlock(p2m); @@ -753,7 +765,7 @@ out_entry_check: /* If we've reduced our "liabilities" beyond our "assets", free some */ if ( p2m->pod.entry_count < p2m->pod.count ) { - p2m_pod_set_cache_target(p2m, p2m->pod.entry_count); + p2m_pod_set_cache_target(p2m, p2m->pod.entry_count, 0/*can't preempt*/); } out_unlock: diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c index 453fbac190..f567aabab8 100644 --- a/xen/arch/x86/x86_64/compat/mm.c +++ b/xen/arch/x86/x86_64/compat/mm.c @@ -127,6 +127,9 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) if ( rc < 0 ) break; + if ( rc == __HYPERVISOR_memory_op ) + hypercall_xlat_continuation(NULL, 0x2, nat, arg); + XLAT_pod_target(&cmp, nat); if ( copy_to_guest(arg, &cmp, 1) ) -- 2.39.5