db426394965c48c1d29023e1cc6d965ea6b9a9035d8a849be2750ca4659a3d07 SOURCES/newlib-1.16.0.tar.gz
f60ae61cfbd5da1d849d0beaa21f593c38dac9359f0b3ddc612f447408265b24 SOURCES/pciutils-2.2.9.tar.bz2
fad9414898f727ddb7d14d30d89ca977375e6dddef301aa6f3df74ee766b0235 SOURCES/qemu-xen-4.2.3.tar.gz
-69b6a73701383d609ad094a38925004e8595755fb39a6fafd579ba754e8667db SOURCES/xen-4.2.3.tar.gz
1795c7d067a43174113fdf03447532f373e1c6c57c08d61d9e4e9be5e244b05e SOURCES/zlib-1.2.3.tar.gz
+69b6a73701383d609ad094a38925004e8595755fb39a6fafd579ba754e8667db SOURCES/xen-4.2.3.tar.gz
--- /dev/null
+x86/xsave: initialize extended register state when guests enable it
+
+Till now, when setting previously unset bits in XCR0 we wouldn't touch
+the active register state, thus leaving in the newly enabled registers
+whatever a prior user of it left there, i.e. potentially leaking
+information between guests.
+
+This is CVE-2013-1442 / XSA-62.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/arch/x86/xstate.c
++++ b/xen/arch/x86/xstate.c
+@@ -307,6 +307,7 @@ int validate_xstate(u64 xcr0, u64 xcr0_a
+ int handle_xsetbv(u32 index, u64 new_bv)
+ {
+ struct vcpu *curr = current;
++ u64 mask;
+
+ if ( index != XCR_XFEATURE_ENABLED_MASK )
+ return -EOPNOTSUPP;
+@@ -320,9 +321,23 @@ int handle_xsetbv(u32 index, u64 new_bv)
+ if ( !set_xcr0(new_bv) )
+ return -EFAULT;
+
++ mask = new_bv & ~curr->arch.xcr0_accum;
+ curr->arch.xcr0 = new_bv;
+ curr->arch.xcr0_accum |= new_bv;
+
++ mask &= curr->fpu_dirtied ? ~XSTATE_FP_SSE : XSTATE_NONLAZY;
++ if ( mask )
++ {
++ unsigned long cr0 = read_cr0();
++
++ clts();
++ if ( curr->fpu_dirtied )
++ asm ( "stmxcsr %0" : "=m" (curr->arch.xsave_area->fpu_sse.mxcsr) );
++ xrstor(curr, mask);
++ if ( cr0 & X86_CR0_TS )
++ write_cr0(cr0);
++ }
++
+ return 0;
+ }
+
--- /dev/null
+x86: properly handle hvm_copy_from_guest_{phys,virt}() errors
+
+Ignoring them generally implies using uninitialized data and, in all
+cases dealt with here, potentially leaking hypervisor stack contents to
+guests.
+
+This is XSA-63.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+@@ -2308,11 +2308,7 @@ void hvm_task_switch(
+
+ rc = hvm_copy_from_guest_virt(
+ &tss, prev_tr.base, sizeof(tss), PFEC_page_present);
+- if ( rc == HVMCOPY_bad_gva_to_gfn )
+- goto out;
+- if ( rc == HVMCOPY_gfn_paged_out )
+- goto out;
+- if ( rc == HVMCOPY_gfn_shared )
++ if ( rc != HVMCOPY_okay )
+ goto out;
+
+ eflags = regs->eflags;
+@@ -2357,13 +2353,11 @@ void hvm_task_switch(
+
+ rc = hvm_copy_from_guest_virt(
+ &tss, tr.base, sizeof(tss), PFEC_page_present);
+- if ( rc == HVMCOPY_bad_gva_to_gfn )
+- goto out;
+- if ( rc == HVMCOPY_gfn_paged_out )
+- goto out;
+- /* Note: this could be optimised, if the callee functions knew we want RO
+- * access */
+- if ( rc == HVMCOPY_gfn_shared )
++ /*
++ * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
++ * functions knew we want RO access.
++ */
++ if ( rc != HVMCOPY_okay )
+ goto out;
+
+
+--- a/xen/arch/x86/hvm/intercept.c
++++ b/xen/arch/x86/hvm/intercept.c
+@@ -87,17 +87,28 @@ static int hvm_mmio_access(struct vcpu *
+ {
+ for ( i = 0; i < p->count; i++ )
+ {
+- int ret;
+-
+- ret = hvm_copy_from_guest_phys(&data,
+- p->data + (sign * i * p->size),
+- p->size);
+- if ( (ret == HVMCOPY_gfn_paged_out) ||
+- (ret == HVMCOPY_gfn_shared) )
++ switch ( hvm_copy_from_guest_phys(&data,
++ p->data + sign * i * p->size,
++ p->size) )
+ {
++ case HVMCOPY_okay:
++ break;
++ case HVMCOPY_gfn_paged_out:
++ case HVMCOPY_gfn_shared:
+ rc = X86EMUL_RETRY;
+ break;
++ case HVMCOPY_bad_gfn_to_mfn:
++ data = ~0;
++ break;
++ case HVMCOPY_bad_gva_to_gfn:
++ ASSERT(0);
++ /* fall through */
++ default:
++ rc = X86EMUL_UNHANDLEABLE;
++ break;
+ }
++ if ( rc != X86EMUL_OKAY )
++ break;
+ rc = write_handler(v, p->addr + (sign * i * p->size), p->size,
+ data);
+ if ( rc != X86EMUL_OKAY )
+@@ -165,8 +176,28 @@ static int process_portio_intercept(port
+ for ( i = 0; i < p->count; i++ )
+ {
+ data = 0;
+- (void)hvm_copy_from_guest_phys(&data, p->data + sign*i*p->size,
+- p->size);
++ switch ( hvm_copy_from_guest_phys(&data,
++ p->data + sign * i * p->size,
++ p->size) )
++ {
++ case HVMCOPY_okay:
++ break;
++ case HVMCOPY_gfn_paged_out:
++ case HVMCOPY_gfn_shared:
++ rc = X86EMUL_RETRY;
++ break;
++ case HVMCOPY_bad_gfn_to_mfn:
++ data = ~0;
++ break;
++ case HVMCOPY_bad_gva_to_gfn:
++ ASSERT(0);
++ /* fall through */
++ default:
++ rc = X86EMUL_UNHANDLEABLE;
++ break;
++ }
++ if ( rc != X86EMUL_OKAY )
++ break;
+ rc = action(IOREQ_WRITE, p->addr, p->size, &data);
+ if ( rc != X86EMUL_OKAY )
+ break;
+--- a/xen/arch/x86/hvm/io.c
++++ b/xen/arch/x86/hvm/io.c
+@@ -340,14 +340,24 @@ static int dpci_ioport_write(uint32_t mp
+ data = p->data;
+ if ( p->data_is_ptr )
+ {
+- int ret;
+-
+- ret = hvm_copy_from_guest_phys(&data,
+- p->data + (sign * i * p->size),
+- p->size);
+- if ( (ret == HVMCOPY_gfn_paged_out) &&
+- (ret == HVMCOPY_gfn_shared) )
++ switch ( hvm_copy_from_guest_phys(&data,
++ p->data + sign * i * p->size,
++ p->size) )
++ {
++ case HVMCOPY_okay:
++ break;
++ case HVMCOPY_gfn_paged_out:
++ case HVMCOPY_gfn_shared:
+ return X86EMUL_RETRY;
++ case HVMCOPY_bad_gfn_to_mfn:
++ data = ~0;
++ break;
++ case HVMCOPY_bad_gva_to_gfn:
++ ASSERT(0);
++ /* fall through */
++ default:
++ return X86EMUL_UNHANDLEABLE;
++ }
+ }
+
+ switch ( p->size )
+--- a/xen/arch/x86/hvm/vmx/realmode.c
++++ b/xen/arch/x86/hvm/vmx/realmode.c
+@@ -39,7 +39,9 @@ static void realmode_deliver_exception(
+
+ again:
+ last_byte = (vector * 4) + 3;
+- if ( idtr->limit < last_byte )
++ if ( idtr->limit < last_byte ||
++ hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) !=
++ HVMCOPY_okay )
+ {
+ /* Software interrupt? */
+ if ( insn_len != 0 )
+@@ -64,8 +66,6 @@ static void realmode_deliver_exception(
+ }
+ }
+
+- (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4);
+-
+ frame[0] = regs->eip + insn_len;
+ frame[1] = csr->sel;
+ frame[2] = regs->eflags & ~X86_EFLAGS_RF;
--- /dev/null
+commit 95a0770282ea2a03f7bc48c6656d5fc79bae0599
+Author: Tim Deegan <tim@xen.org>
+Date: Thu Sep 12 14:16:28 2013 +0100
+
+ x86/mm/shadow: Fix initialization of PV shadow L4 tables.
+
+ Shadowed PV L4 tables must have the same Xen mappings as their
+ unshadowed equivalent. This is done by copying the Xen entries
+ verbatim from the idle pagetable, and then using guest_l4_slot()
+ in the SHADOW_FOREACH_L4E() iterator to avoid touching those entries.
+
+ adc5afbf1c70ef55c260fb93e4b8ce5ccb918706 (x86: support up to 16Tb)
+ changed the definition of ROOT_PAGETABLE_XEN_SLOTS to extend right to
+ the top of the address space, which causes the shadow code to
+ copy Xen mappings into guest-kernel-address slots too.
+
+ In the common case, all those slots are zero in the idle pagetable,
+ and no harm is done. But if any slot above #271 is non-zero, Xen will
+ crash when that slot is later cleared (it attempts to drop
+ shadow-pagetable refcounts on its own L4 pagetables).
+
+ Fix by using the new ROOT_PAGETABLE_PV_XEN_SLOTS when appropriate.
+ Monitor pagetables need the full Xen mappings, so they keep using the
+ old name (with its new semantics).
+
+ This is XSA-64.
+
+ Signed-off-by: Tim Deegan <tim@xen.org>
+ Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
+index 4c4c2ba..3fed0b6 100644
+--- a/xen/arch/x86/mm/shadow/multi.c
++++ b/xen/arch/x86/mm/shadow/multi.c
+@@ -1433,15 +1433,19 @@ void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn)
+ {
+ struct domain *d = v->domain;
+ shadow_l4e_t *sl4e;
++ unsigned int slots;
+
+ sl4e = sh_map_domain_page(sl4mfn);
+ ASSERT(sl4e != NULL);
+ ASSERT(sizeof (l4_pgentry_t) == sizeof (shadow_l4e_t));
+
+ /* Copy the common Xen mappings from the idle domain */
++ slots = (shadow_mode_external(d)
++ ? ROOT_PAGETABLE_XEN_SLOTS
++ : ROOT_PAGETABLE_PV_XEN_SLOTS);
+ memcpy(&sl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+ &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+- ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
++ slots * sizeof(l4_pgentry_t));
+
+ /* Install the per-domain mappings for this domain */
+ sl4e[shadow_l4_table_offset(PERDOMAIN_VIRT_START)] =
--- /dev/null
+x86: properly set up fbld emulation operand address
+
+This is CVE-2013-4361 / XSA-66.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
+
+--- a/xen/arch/x86/x86_emulate/x86_emulate.c
++++ b/xen/arch/x86/x86_emulate/x86_emulate.c
+@@ -3156,11 +3156,11 @@ x86_emulate(
+ break;
+ case 4: /* fbld m80dec */
+ ea.bytes = 10;
+- dst = ea;
++ src = ea;
+ if ( (rc = ops->read(src.mem.seg, src.mem.off,
+ &src.val, src.bytes, ctxt)) != 0 )
+ goto done;
+- emulate_fpu_insn_memdst("fbld", src.val);
++ emulate_fpu_insn_memsrc("fbld", src.val);
+ break;
+ case 5: /* fild m64i */
+ ea.bytes = 8;
--- /dev/null
+x86: check segment descriptor read result in 64-bit OUTS emulation
+
+When emulating such an operation from a 64-bit context (CS has long
+mode set), and the data segment is overridden to FS/GS, the result of
+reading the overridden segment's descriptor (read_descriptor) is not
+checked. If it fails, data_base is left uninitialized.
+
+This can lead to 8 bytes of Xen's stack being leaked to the guest
+(implicitly, i.e. via the address given in a #PF).
+
+Coverity-ID: 1055116
+
+This is CVE-2013-4368 / XSA-67.
+
+Signed-off-by: Matthew Daley <mattjd@gmail.com>
+
+Fix formatting.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/arch/x86/traps.c
++++ b/xen/arch/x86/traps.c
+@@ -1993,10 +1993,10 @@ static int emulate_privileged_op(struct
+ break;
+ }
+ }
+- else
+- read_descriptor(data_sel, v, regs,
+- &data_base, &data_limit, &ar,
+- 0);
++ else if ( !read_descriptor(data_sel, v, regs,
++ &data_base, &data_limit, &ar, 0) ||
++ !(ar & _SEGMENT_S) || !(ar & _SEGMENT_P) )
++ goto fail;
+ data_limit = ~0UL;
+ ar = _SEGMENT_WR|_SEGMENT_S|_SEGMENT_DPL|_SEGMENT_P;
+ }
--- /dev/null
+libxl: fix vif rate parsing
+
+strtok can return NULL here. We don't need to use strtok anyway, so just
+use a simple strchr method.
+
+Coverity-ID: 1055642
+
+This is CVE-2013-4369 / XSA-68
+
+Signed-off-by: Matthew Daley <mattjd@gmail.com>
+
+Fix type. Add test case
+
+Signed-off-by: Ian Campbell <Ian.campbell@citrix.com>
+
+diff --git a/tools/libxl/check-xl-vif-parse b/tools/libxl/check-xl-vif-parse
+index 0473182..02c6dba 100755
+--- a/tools/libxl/check-xl-vif-parse
++++ b/tools/libxl/check-xl-vif-parse
+@@ -206,4 +206,8 @@ expected </dev/null
+ one $e rate=4294967295GB/s@5us
+ one $e rate=4296MB/s@4294s
+
++# test include of single '@'
++expected </dev/null
++one $e rate=@
++
+ complete
+diff --git a/tools/libxl/libxlu_vif.c b/tools/libxl/libxlu_vif.c
+index 3b3de0f..0665e62 100644
+--- a/tools/libxl/libxlu_vif.c
++++ b/tools/libxl/libxlu_vif.c
+@@ -95,23 +95,30 @@ int xlu_vif_parse_rate(XLU_Config *cfg, const char *rate, libxl_device_nic *nic)
+ uint64_t bytes_per_sec = 0;
+ uint64_t bytes_per_interval = 0;
+ uint32_t interval_usecs = 50000UL; /* Default to 50ms */
+- char *ratetok, *tmprate;
++ char *p, *tmprate;
+ int rc = 0;
+
+ tmprate = strdup(rate);
++ if (tmprate == NULL) {
++ rc = ENOMEM;
++ goto out;
++ }
++
++ p = strchr(tmprate, '@');
++ if (p != NULL)
++ *p++ = 0;
++
+ if (!strcmp(tmprate,"")) {
+ xlu__vif_err(cfg, "no rate specified", rate);
+ rc = EINVAL;
+ goto out;
+ }
+
+- ratetok = strtok(tmprate, "@");
+- rc = vif_parse_rate_bytes_per_sec(cfg, ratetok, &bytes_per_sec);
++ rc = vif_parse_rate_bytes_per_sec(cfg, tmprate, &bytes_per_sec);
+ if (rc) goto out;
+
+- ratetok = strtok(NULL, "@");
+- if (ratetok != NULL) {
+- rc = vif_parse_rate_interval_usecs(cfg, ratetok, &interval_usecs);
++ if (p != NULL) {
++ rc = vif_parse_rate_interval_usecs(cfg, p, &interval_usecs);
+ if (rc) goto out;
+ }
+
--- /dev/null
+From 067c122873c67bd1d9620f8340f9c9c209135388 Mon Sep 17 00:00:00 2001
+From: Matthew Daley <mattjd@gmail.com>
+Date: Tue, 10 Sep 2013 23:12:45 +1200
+Subject: [PATCH] tools/ocaml: fix erroneous free of cpumap in
+ stub_xc_vcpu_getaffinity
+
+Not sure how it got there...
+
+Coverity-ID: 1056196
+
+This is CVE-2013-4370 / XSA-69
+
+Signed-off-by: Matthew Daley <mattjd@gmail.com>
+Acked-by: Ian Campbell <ian.campbell@citrix.com>
+---
+ tools/ocaml/libs/xc/xenctrl_stubs.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/tools/ocaml/libs/xc/xenctrl_stubs.c b/tools/ocaml/libs/xc/xenctrl_stubs.c
+index df756ad..f5cf0ed 100644
+--- a/tools/ocaml/libs/xc/xenctrl_stubs.c
++++ b/tools/ocaml/libs/xc/xenctrl_stubs.c
+@@ -461,8 +461,6 @@ CAMLprim value stub_xc_vcpu_getaffinity(value xch, value domid,
+
+ retval = xc_vcpu_getaffinity(_H(xch), _D(domid),
+ Int_val(vcpu), c_cpumap);
+- free(c_cpumap);
+-
+ if (retval < 0) {
+ free(c_cpumap);
+ failwith_xc(_H(xch));
+--
+1.7.10.4
+
--- /dev/null
+From 94db3e1cb356a0d2de1753888ceb0eb767404ec4 Mon Sep 17 00:00:00 2001
+From: Matthew Daley <mattjd@gmail.com>
+Date: Tue, 10 Sep 2013 22:18:46 +1200
+Subject: [PATCH] libxl: fix out-of-memory error handling in
+ libxl_list_cpupool
+
+...otherwise it will return freed memory. All the current users of this
+function check already for a NULL return, so use that.
+
+Coverity-ID: 1056194
+
+This is CVE-2013-4371 / XSA-70
+
+Signed-off-by: Matthew Daley <mattjd@gmail.com>
+Acked-by: Ian Campbell <ian.campbell@citrix.com>
+---
+ tools/libxl/libxl.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
+index 0879f23..17653ef 100644
+--- a/tools/libxl/libxl.c
++++ b/tools/libxl/libxl.c
+@@ -651,6 +651,7 @@ libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx *ctx, int *nb_pool_out)
+ if (!tmp) {
+ LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
+ libxl_cpupoolinfo_list_free(ptr, i);
++ ptr = NULL;
+ goto out;
+ }
+ ptr = tmp;
+--
+1.7.10.4
+
--- /dev/null
+tools: xenstored: if the reply is too big then send E2BIG error
+
+This fixes the issue for both C and ocaml xenstored, however only the ocaml
+xenstored is vulnerable in its default configuration.
+
+Adding a new error appears to be safe, since bit libxenstore and the Linux
+driver at least treat an unknown error code as EINVAL.
+
+This is XSA-72
+
+Original ocaml patch by Jerome Maloberti <jerome.maloberti@citrix.com>
+Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
+Signed-off-by: Thomas Sanders <thomas.sanders@citrix.com>
+
+diff --git a/tools/ocaml/xenstored/connection.ml b/tools/ocaml/xenstored/connection.ml
+index 273fe4d..47695f8 100644
+--- a/tools/ocaml/xenstored/connection.ml
++++ b/tools/ocaml/xenstored/connection.ml
+@@ -18,6 +18,8 @@ exception End_of_file
+
+ open Stdext
+
++let xenstore_payload_max = 4096 (* xen/include/public/io/xs_wire.h *)
++
+ type watch = {
+ con: t;
+ token: string;
+@@ -112,8 +114,15 @@ let restrict con domid =
+ let set_target con target_domid =
+ con.perm <- Perms.Connection.set_target (get_perm con) ~perms:[Perms.READ; Perms.WRITE] target_domid
+
++let is_backend_mmap con = match con.xb.Xenbus.Xb.backend with
++ | Xenbus.Xb.Xenmmap _ -> true
++ | _ -> false
++
+ let send_reply con tid rid ty data =
+- Xenbus.Xb.queue con.xb (Xenbus.Xb.Packet.create tid rid ty data)
++ if (String.length data) > xenstore_payload_max && (is_backend_mmap con) then
++ Xenbus.Xb.queue con.xb (Xenbus.Xb.Packet.create tid rid Xenbus.Xb.Op.Error "E2BIG\000")
++ else
++ Xenbus.Xb.queue con.xb (Xenbus.Xb.Packet.create tid rid ty data)
+
+ let send_error con tid rid err = send_reply con tid rid Xenbus.Xb.Op.Error (err ^ "\000")
+ let send_ack con tid rid ty = send_reply con tid rid ty "OK\000"
+diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
+index 0f8ba64..ccfdaa3 100644
+--- a/tools/xenstore/xenstored_core.c
++++ b/tools/xenstore/xenstored_core.c
+@@ -629,6 +629,11 @@ void send_reply(struct connection *conn, enum xsd_sockmsg_type type,
+ {
+ struct buffered_data *bdata;
+
++ if ( len > XENSTORE_PAYLOAD_MAX ) {
++ send_error(conn, E2BIG);
++ return;
++ }
++
+ /* Message is a child of the connection context for auto-cleanup. */
+ bdata = new_buffer(conn);
+ bdata->buffer = talloc_array(bdata, char, len);
+diff --git a/xen/include/public/io/xs_wire.h b/xen/include/public/io/xs_wire.h
+index 99d24e3..585f0c8 100644
+--- a/xen/include/public/io/xs_wire.h
++++ b/xen/include/public/io/xs_wire.h
+@@ -83,7 +83,8 @@ __attribute__((unused))
+ XSD_ERROR(EROFS),
+ XSD_ERROR(EBUSY),
+ XSD_ERROR(EAGAIN),
+- XSD_ERROR(EISCONN)
++ XSD_ERROR(EISCONN),
++ XSD_ERROR(E2BIG)
+ };
+ #endif
+
--- /dev/null
+From 52b2c3148bdcaa46befcdca64e14d0201d7ca642 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Thu, 31 Oct 2013 20:49:00 +0000
+Subject: [PATCH] gnttab: correct locking order reversal
+
+Coverity ID 1087189
+
+Correct a lock order reversal between a domains page allocation and grant
+table locks.
+
+This is CVE-2013-4494 / XSA-73.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+Consolidate error handling.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Keir Fraser <keir@xen.org>
+Tested-by: Matthew Daley <mattjd@gmail.com>
+
+Backported to Xen-4.2
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+---
+ xen/common/grant_table.c | 52 +++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 44 insertions(+), 8 deletions(-)
+
+diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
+index 0e349cc..0672bad 100644
+--- a/xen/common/grant_table.c
++++ b/xen/common/grant_table.c
+@@ -1499,6 +1499,8 @@ gnttab_transfer(
+
+ for ( i = 0; i < count; i++ )
+ {
++ bool_t okay;
++
+ if (i && hypercall_preempt_check())
+ return i;
+
+@@ -1607,16 +1609,18 @@ gnttab_transfer(
+ * pages when it is dying.
+ */
+ if ( unlikely(e->is_dying) ||
+- unlikely(e->tot_pages >= e->max_pages) ||
+- unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
++ unlikely(e->tot_pages >= e->max_pages) )
+ {
+- if ( !e->is_dying )
+- gdprintk(XENLOG_INFO, "gnttab_transfer: "
+- "Transferee has no reservation "
+- "headroom (%d,%d) or provided a bad grant ref (%08x) "
+- "or is dying (%d)\n",
+- e->tot_pages, e->max_pages, gop.ref, e->is_dying);
+ spin_unlock(&e->page_alloc_lock);
++
++ if ( e->is_dying )
++ gdprintk(XENLOG_INFO, "gnttab_transfer: "
++ "Transferee (d%d) is dying\n", e->domain_id);
++ else
++ gdprintk(XENLOG_INFO, "gnttab_transfer: "
++ "Transferee (d%d) has no headroom (tot %u, max %u)\n",
++ e->domain_id, e->tot_pages, e->max_pages);
++
+ rcu_unlock_domain(e);
+ put_gfn(d, gop.mfn);
+ page->count_info &= ~(PGC_count_mask|PGC_allocated);
+@@ -1628,6 +1632,38 @@ gnttab_transfer(
+ /* Okay, add the page to 'e'. */
+ if ( unlikely(e->tot_pages++ == 0) )
+ get_knownalive_domain(e);
++
++ /*
++ * We must drop the lock to avoid a possible deadlock in
++ * gnttab_prepare_for_transfer. We have reserved a page in e so can
++ * safely drop the lock and re-aquire it later to add page to the
++ * pagelist.
++ */
++ spin_unlock(&e->page_alloc_lock);
++ okay = gnttab_prepare_for_transfer(e, d, gop.ref);
++ spin_lock(&e->page_alloc_lock);
++
++ if ( unlikely(!okay) || unlikely(e->is_dying) )
++ {
++ bool_t drop_dom_ref = (e->tot_pages-- == 1);
++
++ spin_unlock(&e->page_alloc_lock);
++
++ if ( okay /* i.e. e->is_dying due to the surrounding if() */ )
++ gdprintk(XENLOG_INFO, "gnttab_transfer: "
++ "Transferee (d%d) is now dying\n", e->domain_id);
++
++ if ( drop_dom_ref )
++ put_domain(e);
++ rcu_unlock_domain(e);
++
++ put_gfn(d, gop.mfn);
++ page->count_info &= ~(PGC_count_mask|PGC_allocated);
++ free_domheap_page(page);
++ gop.status = GNTST_general_error;
++ goto copyback;
++ }
++
+ page_list_add_tail(page, &e->page_list);
+ page_set_owner(page, e);
+
+--
+1.7.10.4
+
--- /dev/null
+nested VMX: VMLANUCH/VMRESUME emulation must check permission first thing
+
+Otherwise uninitialized data may be used, leading to crashes.
+
+This is CVE-2013-4551 / XSA-75.
+
+Reported-and-tested-by: Jeff Zimmerman <Jeff_Zimmerman@McAfee.com>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-and-tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/arch/x86/hvm/vmx/vvmx.c
++++ b/xen/arch/x86/hvm/vmx/vvmx.c
+@@ -1075,15 +1075,10 @@ int nvmx_handle_vmxoff(struct cpu_user_r
+ return X86EMUL_OKAY;
+ }
+
+-int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs)
++static int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs)
+ {
+ struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+ struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+- int rc;
+-
+- rc = vmx_inst_check_privilege(regs, 0);
+- if ( rc != X86EMUL_OKAY )
+- return rc;
+
+ /* check VMCS is valid and IO BITMAP is set */
+ if ( (nvcpu->nv_vvmcxaddr != VMCX_EADDR) &&
+@@ -1100,6 +1095,10 @@ int nvmx_handle_vmresume(struct cpu_user
+ {
+ int launched;
+ struct vcpu *v = current;
++ int rc = vmx_inst_check_privilege(regs, 0);
++
++ if ( rc != X86EMUL_OKAY )
++ return rc;
+
+ if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
+ {
+@@ -1119,8 +1118,11 @@ int nvmx_handle_vmresume(struct cpu_user
+ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
+ {
+ int launched;
+- int rc;
+ struct vcpu *v = current;
++ int rc = vmx_inst_check_privilege(regs, 0);
++
++ if ( rc != X86EMUL_OKAY )
++ return rc;
+
+ if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
+ {
--- /dev/null
+VT-d: fix TLB flushing in dma_pte_clear_one()
+
+The third parameter of __intel_iommu_iotlb_flush() is to indicate
+whether the to be flushed entry was a present one. A few lines before,
+we bailed if !dma_pte_present(*pte), so there's no need to check the
+flag here again - we can simply always pass TRUE here.
+
+This is CVE-2013-6375 / XSA-78.
+
+Suggested-by: Cheng Yueqiang <yqcheng.2008@phdis.smu.edu.sg>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -646,7 +646,7 @@ static void dma_pte_clear_one(struct dom
+ iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
+
+ if ( !this_cpu(iommu_dont_flush_iotlb) )
+- __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K , 0, 1);
++ __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K, 1, 1);
+
+ unmap_vtd_domain_page(page);
+
Summary: Xen is a virtual machine monitor
Name: xen
Version: 4.2.3
-Release: 23%{?dist}
+Release: 25%{?dist}
Group: Development/Libraries
License: GPLv2+ and LGPLv2+ and BSD
URL: http://xen.org/
#Patch130: xsa55-4.2-0023-libxc-Better-range-check-in-xc_dom_alloc_segment.patch
#Patch131: xsa57-4.2.patch
#Patch132: xsa58-4.2.patch
+#Patch133: xsa61-4.2-unstable.patch
+Patch134: xsa62.patch
+Patch135: xsa63.patch
+Patch136: xsa64.patch
+Patch137: xsa66.patch
+Patch138: xsa67.patch
+Patch139: xsa68.patch
+Patch140: xsa69.patch
+Patch141: xsa70.patch
+#Patch142: xsa71-qemu-xen-4.2.patch
+Patch143: xsa72.patch
+Patch144: xsa73-4.2.patch
+Patch145: xsa75-4.2.patch
+Patch146: xsa78.patch
+
Patch1000: xen-centos-disable-CFLAGS-for-qemu.patch
Patch1001: xen-centos-disableWerror-blktap25.patch
%patch106 -p1
%patch107 -p1
+%patch134 -p1
+%patch135 -p1
+#%patch136 -p1
+%patch137 -p1
+%patch138 -p1
+%patch139 -p1
+%patch140 -p1
+%patch141 -p1
+%patch143 -p1
+%patch144 -p1
+%patch145 -p1
+%patch146 -p1
+
%patch1000 -p1
pushd `pwd`
%patch1003 -p1
%patch1005 -p1
+
pushd `pwd`
cd ${RPM_BUILD_DIR}/%{name}-%{version}/tools/qemu-xen
%patch105 -p1
%endif
%changelog
+* Sat Nov 23 2013 Johnny Hughes <johnny@centos.org> - 4.2.3-25.el6.centos
+- Roll in patch 145 and 146 for XSA-75 (CVE-2013-4551), XSA-78 (CVE-2013-6375)
+
+* Wed Nov 4 2013 Johnny Hughes <johnny@centos.org> - 4.2.3-24.el6.centos
+- Roll in patches 134 to 141, 143 to 144 for the following XSAs:
+- XSA-62 (CVE-2013-1442), XSA-63 (CVE-2013-4355), XSA-72 (CVE-2013-4416)
+- XSA-64 (CVE-2013-4356), XSA-66 (CVE-2013-4361), XSA-67 (CVE-2013-4368)
+- XSA-68 (CVE-2013-4369), XSA-69 (CVE-2013-4370), XSA-70 (CVE-2013-4371)
+- XSA-73 (CVE-2013-4494)
+
* Wed Sep 11 2013 Johnny Hughes <johnny@centos.org> - 4.2.3-23.el6.centos
- upgraded to upstream 4.2.3
- removed patches 66-75, 92-94, 108-132 as they are now rolled into xen-4.2.3