]> xenbits.xensource.com Git - people/aperard/centos-package-xen.git/commitdiff
Backport miscellaneous fixes which look important
authorGeorge Dunlap <george.dunlap@eu.citrix.com>
Thu, 11 Dec 2014 16:58:17 +0000 (16:58 +0000)
committerGeorge Dunlap <george.dunlap@eu.citrix.com>
Thu, 11 Dec 2014 17:10:15 +0000 (17:10 +0000)
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
SOURCES/xen-queue.am
SPECS/xen.spec

index 74cc255351acfad4658b1c76659b3de81b24c105..9224f2b627715cd54e308f7f901b033f6018c337 100644 (file)
@@ -2209,7 +2209,233 @@ index 65104d5..aef7fb8 100644
 1.9.1
 
 
-From a170a29814a379ccb7758a3f48e9d991379ec4af Mon Sep 17 00:00:00 2001
+From 5fd3b05da5fbcb86d1225ffb368ab64210ee7746 Mon Sep 17 00:00:00 2001
+From: Don Koch <dkoch@verizon.com>
+Date: Thu, 11 Dec 2014 17:02:21 +0000
+Subject: [PATCH] x86/HVM: sanity check xsave area when migrating or restoring
+ from older Xen versions
+
+Xen 4.3.0, 4.2.3 and older transferred a maximum sized xsave area (as
+if all the available XCR0 bits were set); the new version only
+transfers based on the actual XCR0 bits. This may result in a smaller
+area if the last sections were missing (e.g., the LWP area from an AMD
+machine). If the size doesn't match the XCR0 derived size, the size is
+checked against the maximum size and the part of the xsave area
+between the actual and maximum used size is checked for zero data. If
+either the max size check or any part of the overflow area is
+non-zero, we return with an error.
+
+Signed-off-by: Don Koch <dkoch@verizon.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+master commit: d7bb8e88a087690feba63ef83c13ba067f041da0
+master date: 2014-10-27 16:45:09 +0100
+---
+ xen/arch/x86/hvm/hvm.c | 31 ++++++++++++++++++++-----------
+ 1 file changed, 20 insertions(+), 11 deletions(-)
+
+diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
+index 3289604..1351891 100644
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+@@ -1041,6 +1041,7 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+     struct vcpu *v;
+     struct hvm_hw_cpu_xsave *ctxt;
+     struct hvm_save_descriptor *desc;
++    unsigned int i, desc_start;
+     /* Which vcpu is this? */
+     vcpuid = hvm_load_instance(h);
+@@ -1081,15 +1082,8 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+                         save_area) + XSTATE_AREA_MIN_SIZE);
+         return -EINVAL;
+     }
+-    size = HVM_CPU_XSAVE_SIZE(xfeature_mask);
+-    if ( desc->length > size )
+-    {
+-        printk(XENLOG_G_WARNING
+-               "HVM%d.%d restore mismatch: xsave length %u > %u\n",
+-               d->domain_id, vcpuid, desc->length, size);
+-        return -EOPNOTSUPP;
+-    }
+     h->cur += sizeof (*desc);
++    desc_start = h->cur;
+     ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
+     h->cur += desc->length;
+@@ -1109,10 +1103,24 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+     size = HVM_CPU_XSAVE_SIZE(ctxt->xcr0_accum);
+     if ( desc->length > size )
+     {
++        /*
++         * Xen 4.3.0, 4.2.3 and older used to send longer-than-needed
++         * xsave regions.  Permit loading the record if the extra data
++         * is all zero.
++         */
++        for ( i = size; i < desc->length; i++ )
++        {
++            if ( h->data[desc_start + i] )
++            {
++                printk(XENLOG_G_WARNING
++                       "HVM%d.%u restore mismatch: xsave length %#x > %#x (non-zero data at %#x)\n",
++                       d->domain_id, vcpuid, desc->length, size, i);
++                return -EOPNOTSUPP;
++            }
++        }
+         printk(XENLOG_G_WARNING
+-               "HVM%d.%d restore mismatch: xsave length %u > %u\n",
++               "HVM%d.%u restore mismatch: xsave length %#x > %#x\n",
+                d->domain_id, vcpuid, desc->length, size);
+-        return -EOPNOTSUPP;
+     }
+     /* Checking finished */
+@@ -1121,7 +1129,8 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+     if ( ctxt->xcr0_accum & XSTATE_NONLAZY )
+         v->arch.nonlazy_xstate_used = 1;
+     memcpy(v->arch.xsave_area, &ctxt->save_area,
+-           desc->length - offsetof(struct hvm_hw_cpu_xsave, save_area));
++           min(desc->length, size) - offsetof(struct hvm_hw_cpu_xsave,
++           save_area));
+     return 0;
+ }
+-- 
+1.9.1
+
+
+From 4a0d9186fa98831f48d7842bce8aeebce0bc7111 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 11 Dec 2014 17:02:33 +0000
+Subject: [PATCH] adjust number of domains in cpupools when destroying domain
+
+Commit bac6334b51d9bcfe57ecf4a4cb5288348fcf044a (move domain to
+cpupool0 before destroying it) introduced an error in the accounting
+of cpupools regarding the number of domains. The number of domains
+is nor adjusted when a domain is moved to cpupool0 in kill_domain().
+
+Correct this by introducing a cpupool function doing the move
+instead of open coding it by calling sched_move_domain().
+
+Reported-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Tested-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
+Reviewed-by: Andrew Cooper <Andrew.Cooper3@citrix.com>
+Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
+master commit: 934e7baa6c12d19cfaf24e8f8e27d6c6a8b8c5e4
+master date: 2014-11-12 12:39:58 +0100
+---
+ xen/common/cpupool.c    | 47 +++++++++++++++++++++++++++++++++--------------
+ xen/common/domain.c     |  2 +-
+ xen/include/xen/sched.h |  1 +
+ 3 files changed, 35 insertions(+), 15 deletions(-)
+
+diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
+index e46e930..53a1394 100644
+--- a/xen/common/cpupool.c
++++ b/xen/common/cpupool.c
+@@ -225,6 +225,35 @@ static int cpupool_destroy(struct cpupool *c)
+ }
+ /*
++ * Move domain to another cpupool
++ */
++static int cpupool_move_domain_locked(struct domain *d, struct cpupool *c)
++{
++    int ret;
++
++    d->cpupool->n_dom--;
++    ret = sched_move_domain(d, c);
++    if ( ret )
++        d->cpupool->n_dom++;
++    else
++        c->n_dom++;
++
++    return ret;
++}
++int cpupool_move_domain(struct domain *d, struct cpupool *c)
++{
++    int ret;
++
++    spin_lock(&cpupool_lock);
++
++    ret = cpupool_move_domain_locked(d, c);
++
++    spin_unlock(&cpupool_lock);
++
++    return ret;
++}
++
++/*
+  * assign a specific cpu to a cpupool
+  * cpupool_lock must be held
+  */
+@@ -338,14 +367,9 @@ int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
+                 ret = -EBUSY;
+                 break;
+             }
+-            c->n_dom--;
+-            ret = sched_move_domain(d, cpupool0);
++            ret = cpupool_move_domain_locked(d, cpupool0);
+             if ( ret )
+-            {
+-                c->n_dom++;
+                 break;
+-            }
+-            cpupool0->n_dom++;
+         }
+         rcu_read_unlock(&domlist_read_lock);
+         if ( ret )
+@@ -613,16 +637,11 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
+                         d->domain_id, op->cpupool_id);
+         ret = -ENOENT;
+         spin_lock(&cpupool_lock);
++
+         c = cpupool_find_by_id(op->cpupool_id);
+         if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
+-        {
+-            d->cpupool->n_dom--;
+-            ret = sched_move_domain(d, c);
+-            if ( ret )
+-                d->cpupool->n_dom++;
+-            else
+-                c->n_dom++;
+-        }
++            ret = cpupool_move_domain_locked(d, c);
++
+         spin_unlock(&cpupool_lock);
+         cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d ret %d\n",
+                         d->domain_id, op->cpupool_id, ret);
+diff --git a/xen/common/domain.c b/xen/common/domain.c
+index 1308193..b18e0a7 100644
+--- a/xen/common/domain.c
++++ b/xen/common/domain.c
+@@ -539,7 +539,7 @@ int domain_kill(struct domain *d)
+             BUG_ON(rc != -EAGAIN);
+             break;
+         }
+-        if ( sched_move_domain(d, cpupool0) )
++        if ( cpupool_move_domain(d, cpupool0) )
+             return -EAGAIN;
+         for_each_vcpu ( d, v )
+             unmap_vcpu_info(v);
+diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
+index 4418883..996a08a 100644
+--- a/xen/include/xen/sched.h
++++ b/xen/include/xen/sched.h
+@@ -828,6 +828,7 @@ struct cpupool *cpupool_get_by_id(int poolid);
+ void cpupool_put(struct cpupool *pool);
+ int cpupool_add_domain(struct domain *d, int poolid);
+ void cpupool_rm_domain(struct domain *d);
++int cpupool_move_domain(struct domain *d, struct cpupool *c);
+ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
+ void schedule_dump(struct cpupool *c);
+ extern void dump_runq(unsigned char key);
+-- 
+1.9.1
+
+
+From 8518a090c4c19700cd885f7468cb5314e299faad Mon Sep 17 00:00:00 2001
 From: George Dunlap <george.dunlap@eu.citrix.com>
 Date: Wed, 15 Oct 2014 15:36:23 +0100
 Subject: [PATCH] xen-centos-disable-CFLAGS-for-qemu.patch
@@ -2234,7 +2460,7 @@ index 6610a8d..86d8a58 100644
 1.9.1
 
 
-From 51e2e2631a3410275bc9588ebca0886b583af66b Mon Sep 17 00:00:00 2001
+From 611b1f115a73741aa0d7b9c96c10d8ecb7edf9ba Mon Sep 17 00:00:00 2001
 From: George Dunlap <george.dunlap@eu.citrix.com>
 Date: Wed, 15 Oct 2014 15:36:23 +0100
 Subject: [PATCH] Adapt libxl to use blktap 2.5 v0.9.2
index 99794dd1db2143afc6965512e8b10f40041a47b9..ea05f01302e0e8f0d0624b26c0f789ed93f4f7af 100644 (file)
@@ -770,6 +770,7 @@ rm -rf %{buildroot}
  - Backported qdisk persistent grant fix
  - Backported fixes to use tapdisk with HVM guests
  - Backported XSAs 107,109-114
+ - Backported fixes to migration, cpupools
 
 * Wed Oct 22 2014 George Dunlap <george.dunlap@eu.citrix.com> - 4.4.1-2.el6.centos
  - Updated to blktap 2.5 v0.9.2