From e296305acc8e50464112f862fb8622ea85e7ffb3 Mon Sep 17 00:00:00 2001 From: Karanbir Singh Date: Tue, 7 Oct 2014 14:55:09 +0000 Subject: [PATCH] import xen-4.2.5-33.el6.centos.alt --- .xen.metadata | 2 +- SOURCES/xen-pygrub-fix-for-rhel7.patch | 25 -- SOURCES/xsa104.patch | 44 +++ SOURCES/xsa105.patch | 37 ++ SOURCES/xsa106.patch | 23 ++ SOURCES/xsa89.patch | 96 ----- SOURCES/xsa92-4.2.patch | 36 -- SOURCES/xsa96.patch | 38 -- SOURCES/xsa97-hap-4.2.patch | 485 +++++++++++++++++++++++++ SPECS/xen.spec | 28 +- 10 files changed, 608 insertions(+), 206 deletions(-) delete mode 100644 SOURCES/xen-pygrub-fix-for-rhel7.patch create mode 100644 SOURCES/xsa104.patch create mode 100644 SOURCES/xsa105.patch create mode 100644 SOURCES/xsa106.patch delete mode 100644 SOURCES/xsa89.patch delete mode 100644 SOURCES/xsa92-4.2.patch delete mode 100644 SOURCES/xsa96.patch create mode 100644 SOURCES/xsa97-hap-4.2.patch diff --git a/.xen.metadata b/.xen.metadata index c6850d7..94278d6 100644 --- a/.xen.metadata +++ b/.xen.metadata @@ -3,5 +3,5 @@ 772e4d550e07826665ed0528c071dd5404ef7dbe1825a38c8adbc2a00bca948f SOURCES/lwip-1.3.0.tar.gz db426394965c48c1d29023e1cc6d965ea6b9a9035d8a849be2750ca4659a3d07 SOURCES/newlib-1.16.0.tar.gz f60ae61cfbd5da1d849d0beaa21f593c38dac9359f0b3ddc612f447408265b24 SOURCES/pciutils-2.2.9.tar.bz2 -e23e6292affd7a6d82da99527e8bf3964e57eb7322144c67c2025692b1a21550 SOURCES/xen-4.2.4.tar.gz +3cf440866315e8085050eb0586f0447b6b47a08dbed6a72226bf5ed5d89ec567 SOURCES/xen-4.2.5.tar.gz 1795c7d067a43174113fdf03447532f373e1c6c57c08d61d9e4e9be5e244b05e SOURCES/zlib-1.2.3.tar.gz diff --git a/SOURCES/xen-pygrub-fix-for-rhel7.patch b/SOURCES/xen-pygrub-fix-for-rhel7.patch deleted file mode 100644 index 1ad5d78..0000000 --- a/SOURCES/xen-pygrub-fix-for-rhel7.patch +++ /dev/null @@ -1,25 +0,0 @@ -# HG changeset patch -# Parent df2b08554a3b5da3493e536f1dbe762cd65d61f3 - -diff --git a/tools/pygrub/src/GrubConf.py b/tools/pygrub/src/GrubConf.py ---- a/tools/pygrub/src/GrubConf.py -+++ b/tools/pygrub/src/GrubConf.py -@@ -348,7 +348,9 @@ - - commands = {'set:root': 'root', - 'linux': 'kernel', -+ 'linux16': 'kernel', - 'initrd': 'initrd', -+ 'initrd16': 'initrd', - 'echo': None, - 'insmod': None, - 'search': None} -@@ -394,7 +396,7 @@ - continue - - # new image -- title_match = re.match('^menuentry ["\'](.*)["\'] (.*){', l) -+ title_match = re.match('^menuentry ["\'](.*?)["\'] (.*){', l) - if title_match: - if img is not None: - raise RuntimeError, "syntax error: cannot nest menuentry (%d %s)" % (len(img),img) diff --git a/SOURCES/xsa104.patch b/SOURCES/xsa104.patch new file mode 100644 index 0000000..2c5b39e --- /dev/null +++ b/SOURCES/xsa104.patch @@ -0,0 +1,44 @@ +x86/shadow: fix race condition sampling the dirty vram state + +d->arch.hvm_domain.dirty_vram must be read with the domain's paging lock held. + +If not, two concurrent hypercalls could both end up attempting to free +dirty_vram (the second of which will free a wild pointer), or both end up +allocating a new dirty_vram structure (the first of which will be leaked). + +This is XSA-104. + +Signed-off-by: Andrew Cooper +Reviewed-by: Tim Deegan + +--- a/xen/arch/x86/mm/shadow/common.c ++++ b/xen/arch/x86/mm/shadow/common.c +@@ -3485,7 +3485,7 @@ int shadow_track_dirty_vram(struct domai + int flush_tlb = 0; + unsigned long i; + p2m_type_t t; +- struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; ++ struct sh_dirty_vram *dirty_vram; + struct p2m_domain *p2m = p2m_get_hostp2m(d); + + if ( end_pfn < begin_pfn || end_pfn > p2m->max_mapped_pfn + 1 ) +@@ -3495,6 +3495,8 @@ int shadow_track_dirty_vram(struct domai + p2m_lock(p2m_get_hostp2m(d)); + paging_lock(d); + ++ dirty_vram = d->arch.hvm_domain.dirty_vram; ++ + if ( dirty_vram && (!nr || + ( begin_pfn != dirty_vram->begin_pfn + || end_pfn != dirty_vram->end_pfn )) ) +--- a/xen/include/asm-x86/hvm/domain.h ++++ b/xen/include/asm-x86/hvm/domain.h +@@ -112,7 +112,7 @@ struct hvm_domain { + /* Memory ranges with pinned cache attributes. */ + struct list_head pinned_cacheattr_ranges; + +- /* VRAM dirty support. */ ++ /* VRAM dirty support. Protect with the domain paging lock. */ + struct sh_dirty_vram *dirty_vram; + + /* If one of vcpus of this domain is in no_fill_mode or diff --git a/SOURCES/xsa105.patch b/SOURCES/xsa105.patch new file mode 100644 index 0000000..cc7cafd --- /dev/null +++ b/SOURCES/xsa105.patch @@ -0,0 +1,37 @@ +x86/emulate: check cpl for all privileged instructions + +Without this, it is possible for userspace to load its own IDT or GDT. + +This is XSA-105. + +Reported-by: Andrei LUTAS +Signed-off-by: Andrew Cooper +Reviewed-by: Jan Beulich +Tested-by: Andrei LUTAS + +--- a/xen/arch/x86/x86_emulate/x86_emulate.c ++++ b/xen/arch/x86/x86_emulate/x86_emulate.c +@@ -3314,6 +3314,7 @@ x86_emulate( + goto swint; + + case 0xf4: /* hlt */ ++ generate_exception_if(!mode_ring0(), EXC_GP, 0); + ctxt->retire.flags.hlt = 1; + break; + +@@ -3710,6 +3711,7 @@ x86_emulate( + break; + case 2: /* lgdt */ + case 3: /* lidt */ ++ generate_exception_if(!mode_ring0(), EXC_GP, 0); + generate_exception_if(ea.type != OP_MEM, EXC_UD, -1); + fail_if(ops->write_segment == NULL); + memset(®, 0, sizeof(reg)); +@@ -3738,6 +3740,7 @@ x86_emulate( + case 6: /* lmsw */ + fail_if(ops->read_cr == NULL); + fail_if(ops->write_cr == NULL); ++ generate_exception_if(!mode_ring0(), EXC_GP, 0); + if ( (rc = ops->read_cr(0, &cr0, ctxt)) ) + goto done; + if ( ea.type == OP_REG ) diff --git a/SOURCES/xsa106.patch b/SOURCES/xsa106.patch new file mode 100644 index 0000000..436724d --- /dev/null +++ b/SOURCES/xsa106.patch @@ -0,0 +1,23 @@ +x86emul: only emulate software interrupt injection for real mode + +Protected mode emulation currently lacks proper privilege checking of +the referenced IDT entry, and there's currently no legitimate way for +any of the respective instructions to reach the emulator when the guest +is in protected mode. + +This is XSA-106. + +Reported-by: Andrei LUTAS +Signed-off-by: Jan Beulich +Acked-by: Keir Fraser + +--- a/xen/arch/x86/x86_emulate/x86_emulate.c ++++ b/xen/arch/x86/x86_emulate/x86_emulate.c +@@ -2634,6 +2634,7 @@ x86_emulate( + case 0xcd: /* int imm8 */ + src.val = insn_fetch_type(uint8_t); + swint: ++ fail_if(!in_realmode(ctxt, ops)); /* XSA-106 */ + fail_if(ops->inject_sw_interrupt == NULL); + rc = ops->inject_sw_interrupt(src.val, _regs.eip - ctxt->regs->eip, + ctxt) ? : X86EMUL_EXCEPTION; diff --git a/SOURCES/xsa89.patch b/SOURCES/xsa89.patch deleted file mode 100644 index 3443e33..0000000 --- a/SOURCES/xsa89.patch +++ /dev/null @@ -1,96 +0,0 @@ -x86: enforce preemption in HVM_set_mem_access / p2m_set_mem_access() - -Processing up to 4G PFNs may take almost arbitrarily long, so -preemption is needed here. - -This is XSA-89. - -Signed-off-by: Jan Beulich -Reviewed-by: Tim Deegan - ---- a/xen/arch/x86/hvm/hvm.c -+++ b/xen/arch/x86/hvm/hvm.c -@@ -4593,6 +4593,15 @@ long do_hvm_op(unsigned long op, XEN_GUE - goto param_fail5; - - rc = p2m_set_mem_access(d, a.first_pfn, a.nr, a.hvmmem_access); -+ if ( rc > 0 ) -+ { -+ a.first_pfn += a.nr - rc; -+ a.nr = rc; -+ if ( __copy_to_guest(arg, &a, 1) ) -+ rc = -EFAULT; -+ else -+ rc = -EAGAIN; -+ } - - param_fail5: - rcu_unlock_domain(d); ---- a/xen/arch/x86/mm/p2m.c -+++ b/xen/arch/x86/mm/p2m.c -@@ -1333,15 +1333,14 @@ void p2m_mem_access_resume(struct domain - - /* Set access type for a region of pfns. - * If start_pfn == -1ul, sets the default access type */ --int p2m_set_mem_access(struct domain *d, unsigned long start_pfn, -- uint32_t nr, hvmmem_access_t access) -+long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr, -+ hvmmem_access_t access) - { - struct p2m_domain *p2m = p2m_get_hostp2m(d); -- unsigned long pfn; - p2m_access_t a, _a; - p2m_type_t t; - mfn_t mfn; -- int rc = 0; -+ long rc; - - /* N.B. _not_ static: initializer depends on p2m->default_access */ - p2m_access_t memaccess[] = { -@@ -1364,14 +1363,17 @@ int p2m_set_mem_access(struct domain *d, - a = memaccess[access]; - - /* If request to set default access */ -- if ( start_pfn == ~0ull ) -+ if ( pfn == ~0ul ) - { - p2m->default_access = a; - return 0; - } - -+ if ( !nr ) -+ return 0; -+ - p2m_lock(p2m); -- for ( pfn = start_pfn; pfn < start_pfn + nr; pfn++ ) -+ for ( ; ; ++pfn ) - { - mfn = p2m->get_entry(p2m, pfn, &t, &_a, 0, NULL); - if ( p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a) == 0 ) -@@ -1379,6 +1381,13 @@ int p2m_set_mem_access(struct domain *d, - rc = -ENOMEM; - break; - } -+ -+ /* Check for continuation if it's not the last interation. */ -+ if ( !--nr || hypercall_preempt_check() ) -+ { -+ rc = nr; -+ break; -+ } - } - p2m_unlock(p2m); - return rc; ---- a/xen/include/asm-x86/p2m.h -+++ b/xen/include/asm-x86/p2m.h -@@ -576,8 +576,8 @@ void p2m_mem_access_resume(struct domain - - /* Set access type for a region of pfns. - * If start_pfn == -1ul, sets the default access type */ --int p2m_set_mem_access(struct domain *d, unsigned long start_pfn, -- uint32_t nr, hvmmem_access_t access); -+long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, -+ uint32_t nr, hvmmem_access_t access); - - /* Get access type for a pfn - * If pfn == -1ul, gets the default access type */ diff --git a/SOURCES/xsa92-4.2.patch b/SOURCES/xsa92-4.2.patch deleted file mode 100644 index 1ec2111..0000000 --- a/SOURCES/xsa92-4.2.patch +++ /dev/null @@ -1,36 +0,0 @@ -x86/HVM: restrict HVMOP_set_mem_type - -Permitting arbitrary type changes here has the potential of creating -present P2M (and hence EPT/NPT/IOMMU) entries pointing to an invalid -MFN (INVALID_MFN truncated to the respective hardware structure field's -width). This would become a problem the latest when something real sat -at the end of the physical address space; I'm suspecting though that -other things might break with such bogus entries. - -Along with that drop a bogus (and otherwise becoming stale) log -message. - -Afaict the similar operation in p2m_set_mem_access() is safe. - -This is XSA-92. - -Signed-off-by: Jan Beulich -Reviewed-by: Tim Deegan - ---- a/xen/arch/x86/hvm/hvm.c -+++ b/xen/arch/x86/hvm/hvm.c -@@ -4245,12 +4245,10 @@ long do_hvm_op(unsigned long op, XEN_GUE - rc = -EINVAL; - goto param_fail4; - } -- if ( p2m_is_grant(t) ) -+ if ( !p2m_is_ram(t) && -+ (!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) ) - { - put_gfn(d, pfn); -- gdprintk(XENLOG_WARNING, -- "type for pfn 0x%lx changed to grant while " -- "we were working?\n", pfn); - goto param_fail4; - } - else diff --git a/SOURCES/xsa96.patch b/SOURCES/xsa96.patch deleted file mode 100644 index ef6443f..0000000 --- a/SOURCES/xsa96.patch +++ /dev/null @@ -1,38 +0,0 @@ -x86/HVM: eliminate vulnerabilities from hvm_inject_msi() - -- pirq_info() returns NULL for a non-allocated pIRQ, and hence we - mustn't unconditionally de-reference it, and we need to invoke it - another time after having called map_domain_emuirq_pirq() -- don't use printk(), namely without XENLOG_GUEST, for error reporting - -This is XSA-96. - -Signed-off-by: Jan Beulich - ---- a/xen/arch/x86/hvm/irq.c -+++ b/xen/arch/x86/hvm/irq.c -@@ -289,20 +289,18 @@ void hvm_inject_msi(struct domain *d, ui - struct pirq *info = pirq_info(d, pirq); - - /* if it is the first time, allocate the pirq */ -- if (info->arch.hvm.emuirq == IRQ_UNBOUND) -+ if ( !info || info->arch.hvm.emuirq == IRQ_UNBOUND ) - { - spin_lock(&d->event_lock); - map_domain_emuirq_pirq(d, pirq, IRQ_MSI_EMU); - spin_unlock(&d->event_lock); -+ info = pirq_info(d, pirq); -+ if ( !info ) -+ return; - } else if (info->arch.hvm.emuirq != IRQ_MSI_EMU) -- { -- printk("%s: pirq %d does not correspond to an emulated MSI\n", __func__, pirq); - return; -- } - send_guest_pirq(d, info); - return; -- } else { -- printk("%s: error getting pirq from MSI: pirq = %d\n", __func__, pirq); - } - } - diff --git a/SOURCES/xsa97-hap-4.2.patch b/SOURCES/xsa97-hap-4.2.patch new file mode 100644 index 0000000..5f89b58 --- /dev/null +++ b/SOURCES/xsa97-hap-4.2.patch @@ -0,0 +1,485 @@ +x86/paging: make log-dirty operations preemptible + +Both the freeing and the inspection of the bitmap get done in (nested) +loops which - besides having a rather high iteration count in general, +albeit that would be covered by XSA-77 - have the number of non-trivial +iterations they need to perform (indirectly) controllable by both the +guest they are for and any domain controlling the guest (including the +one running qemu for it). + +This is XSA-97. + +Signed-off-by: Jan Beulich +Reviewed-by: Tim Deegan + +--- a/xen/arch/x86/domain.c ++++ b/xen/arch/x86/domain.c +@@ -2136,7 +2136,9 @@ int domain_relinquish_resources(struct d + pci_release_devices(d); + + /* Tear down paging-assistance stuff. */ +- paging_teardown(d); ++ ret = paging_teardown(d); ++ if ( ret ) ++ return ret; + + /* Drop the in-use references to page-table bases. */ + for_each_vcpu ( d, v ) +--- a/xen/arch/x86/domctl.c ++++ b/xen/arch/x86/domctl.c +@@ -66,6 +66,9 @@ long arch_do_domctl( + &domctl->u.shadow_op, + guest_handle_cast(u_domctl, void)); + rcu_unlock_domain(d); ++ if ( ret == -EAGAIN ) ++ return hypercall_create_continuation(__HYPERVISOR_domctl, ++ "h", u_domctl); + copy_to_guest(u_domctl, domctl, 1); + } + } +--- a/xen/arch/x86/mm/hap/hap.c ++++ b/xen/arch/x86/mm/hap/hap.c +@@ -678,8 +678,7 @@ int hap_domctl(struct domain *d, xen_dom + paging_unlock(d); + if ( preempted ) + /* Not finished. Set up to re-run the call. */ +- rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h", +- u_domctl); ++ rc = -EAGAIN; + else + /* Finished. Return the new allocation */ + sc->mb = hap_get_allocation(d); +--- a/xen/arch/x86/mm/paging.c ++++ b/xen/arch/x86/mm/paging.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -116,26 +117,46 @@ static void paging_free_log_dirty_page(s + d->arch.paging.free_page(d, mfn_to_page(mfn)); + } + +-void paging_free_log_dirty_bitmap(struct domain *d) ++static int paging_free_log_dirty_bitmap(struct domain *d, int rc) + { + mfn_t *l4, *l3, *l2; + int i4, i3, i2; + ++ paging_lock(d); ++ + if ( !mfn_valid(d->arch.paging.log_dirty.top) ) +- return; ++ { ++ paging_unlock(d); ++ return 0; ++ } + +- paging_lock(d); ++ if ( !d->arch.paging.preempt.vcpu ) ++ { ++ memset(&d->arch.paging.preempt.log_dirty, 0, ++ sizeof(d->arch.paging.preempt.log_dirty)); ++ ASSERT(rc <= 0); ++ d->arch.paging.preempt.log_dirty.done = -rc; ++ } ++ else if ( d->arch.paging.preempt.vcpu != current || ++ d->arch.paging.preempt.op != XEN_DOMCTL_SHADOW_OP_OFF ) ++ { ++ paging_unlock(d); ++ return -EBUSY; ++ } + + l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top)); ++ i4 = d->arch.paging.preempt.log_dirty.i4; ++ i3 = d->arch.paging.preempt.log_dirty.i3; ++ rc = 0; + +- for ( i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++ ) ++ for ( ; i4 < LOGDIRTY_NODE_ENTRIES; i4++, i3 = 0 ) + { + if ( !mfn_valid(l4[i4]) ) + continue; + + l3 = map_domain_page(mfn_x(l4[i4])); + +- for ( i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++ ) ++ for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ ) + { + if ( !mfn_valid(l3[i3]) ) + continue; +@@ -148,20 +169,54 @@ void paging_free_log_dirty_bitmap(struct + + unmap_domain_page(l2); + paging_free_log_dirty_page(d, l3[i3]); ++ l3[i3] = _mfn(INVALID_MFN); ++ ++ if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() ) ++ { ++ d->arch.paging.preempt.log_dirty.i3 = i3 + 1; ++ d->arch.paging.preempt.log_dirty.i4 = i4; ++ rc = -EAGAIN; ++ break; ++ } + } + + unmap_domain_page(l3); ++ if ( rc ) ++ break; + paging_free_log_dirty_page(d, l4[i4]); ++ l4[i4] = _mfn(INVALID_MFN); ++ ++ if ( i4 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() ) ++ { ++ d->arch.paging.preempt.log_dirty.i3 = 0; ++ d->arch.paging.preempt.log_dirty.i4 = i4 + 1; ++ rc = -EAGAIN; ++ break; ++ } + } + + unmap_domain_page(l4); +- paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top); +- d->arch.paging.log_dirty.top = _mfn(INVALID_MFN); + +- ASSERT(d->arch.paging.log_dirty.allocs == 0); +- d->arch.paging.log_dirty.failed_allocs = 0; ++ if ( !rc ) ++ { ++ paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top); ++ d->arch.paging.log_dirty.top = _mfn(INVALID_MFN); ++ ++ ASSERT(d->arch.paging.log_dirty.allocs == 0); ++ d->arch.paging.log_dirty.failed_allocs = 0; ++ ++ rc = -d->arch.paging.preempt.log_dirty.done; ++ d->arch.paging.preempt.vcpu = NULL; ++ } ++ else ++ { ++ d->arch.paging.preempt.vcpu = current; ++ d->arch.paging.preempt.op = XEN_DOMCTL_SHADOW_OP_OFF; ++ } + + paging_unlock(d); ++ ++ return rc; + } + + int paging_log_dirty_enable(struct domain *d) +@@ -178,15 +233,25 @@ int paging_log_dirty_enable(struct domai + return ret; + } + +-int paging_log_dirty_disable(struct domain *d) ++static int paging_log_dirty_disable(struct domain *d, bool_t resuming) + { +- int ret; ++ int ret = 1; ++ ++ if ( !resuming ) ++ { ++ domain_pause(d); ++ /* Safe because the domain is paused. */ ++ ret = d->arch.paging.log_dirty.disable_log_dirty(d); ++ ASSERT(ret <= 0); ++ } + +- domain_pause(d); +- /* Safe because the domain is paused. */ +- ret = d->arch.paging.log_dirty.disable_log_dirty(d); + if ( !paging_mode_log_dirty(d) ) +- paging_free_log_dirty_bitmap(d); ++ { ++ ret = paging_free_log_dirty_bitmap(d, ret); ++ if ( ret == -EAGAIN ) ++ return ret; ++ } ++ + domain_unpause(d); + + return ret; +@@ -326,7 +391,9 @@ int paging_mfn_is_dirty(struct domain *d + + /* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN, + * clear the bitmap and stats as well. */ +-int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc) ++static int paging_log_dirty_op(struct domain *d, ++ struct xen_domctl_shadow_op *sc, ++ bool_t resuming) + { + int rv = 0, clean = 0, peek = 1; + unsigned long pages = 0; +@@ -334,9 +401,22 @@ int paging_log_dirty_op(struct domain *d + unsigned long *l1 = NULL; + int i4, i3, i2; + +- domain_pause(d); ++ if ( !resuming ) ++ domain_pause(d); + paging_lock(d); + ++ if ( !d->arch.paging.preempt.vcpu ) ++ memset(&d->arch.paging.preempt.log_dirty, 0, ++ sizeof(d->arch.paging.preempt.log_dirty)); ++ else if ( d->arch.paging.preempt.vcpu != current || ++ d->arch.paging.preempt.op != sc->op ) ++ { ++ paging_unlock(d); ++ ASSERT(!resuming); ++ domain_unpause(d); ++ return -EBUSY; ++ } ++ + clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN); + + PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n", +@@ -365,17 +445,15 @@ int paging_log_dirty_op(struct domain *d + goto out; + } + +- pages = 0; + l4 = paging_map_log_dirty_bitmap(d); ++ i4 = d->arch.paging.preempt.log_dirty.i4; ++ i3 = d->arch.paging.preempt.log_dirty.i3; ++ pages = d->arch.paging.preempt.log_dirty.done; + +- for ( i4 = 0; +- (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); +- i4++ ) ++ for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 ) + { + l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL; +- for ( i3 = 0; +- (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); +- i3++ ) ++ for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ ) + { + l2 = ((l3 && mfn_valid(l3[i3])) ? + map_domain_page(mfn_x(l3[i3])) : NULL); +@@ -410,18 +488,51 @@ int paging_log_dirty_op(struct domain *d + } + if ( l2 ) + unmap_domain_page(l2); ++ ++ if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() ) ++ { ++ d->arch.paging.preempt.log_dirty.i4 = i4; ++ d->arch.paging.preempt.log_dirty.i3 = i3 + 1; ++ rv = -EAGAIN; ++ break; ++ } + } + if ( l3 ) + unmap_domain_page(l3); ++ ++ if ( !rv && i4 < LOGDIRTY_NODE_ENTRIES - 1 && ++ hypercall_preempt_check() ) ++ { ++ d->arch.paging.preempt.log_dirty.i4 = i4 + 1; ++ d->arch.paging.preempt.log_dirty.i3 = 0; ++ rv = -EAGAIN; ++ } ++ if ( rv ) ++ break; + } + if ( l4 ) + unmap_domain_page(l4); + +- if ( pages < sc->pages ) +- sc->pages = pages; ++ if ( !rv ) ++ d->arch.paging.preempt.vcpu = NULL; ++ else ++ { ++ d->arch.paging.preempt.vcpu = current; ++ d->arch.paging.preempt.op = sc->op; ++ d->arch.paging.preempt.log_dirty.done = pages; ++ } + + paging_unlock(d); + ++ if ( rv ) ++ { ++ /* Never leave the domain paused for other errors. */ ++ ASSERT(rv == -EAGAIN); ++ return rv; ++ } ++ ++ if ( pages < sc->pages ) ++ sc->pages = pages; + if ( clean ) + { + /* We need to further call clean_dirty_bitmap() functions of specific +@@ -432,6 +543,7 @@ int paging_log_dirty_op(struct domain *d + return rv; + + out: ++ d->arch.paging.preempt.vcpu = NULL; + paging_unlock(d); + domain_unpause(d); + +@@ -498,12 +610,6 @@ void paging_log_dirty_init(struct domain + d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap; + } + +-/* This function fress log dirty bitmap resources. */ +-static void paging_log_dirty_teardown(struct domain*d) +-{ +- paging_free_log_dirty_bitmap(d); +-} +- + /************************************************/ + /* CODE FOR PAGING SUPPORT */ + /************************************************/ +@@ -547,6 +653,7 @@ void paging_vcpu_init(struct vcpu *v) + int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc, + XEN_GUEST_HANDLE(void) u_domctl) + { ++ bool_t resuming = 0; + int rc; + + if ( unlikely(d == current->domain) ) +@@ -569,6 +676,20 @@ int paging_domctl(struct domain *d, xen_ + return -EINVAL; + } + ++ if ( d->arch.paging.preempt.vcpu ) ++ { ++ if ( d->arch.paging.preempt.vcpu != current || ++ d->arch.paging.preempt.op != sc->op ) ++ { ++ printk(XENLOG_G_DEBUG ++ "d%d:v%d: Paging op %#x on Dom%u with unfinished prior op %#x\n", ++ current->domain->domain_id, current->vcpu_id, ++ sc->op, d->domain_id, d->arch.paging.preempt.op); ++ return -EBUSY; ++ } ++ resuming = 1; ++ } ++ + rc = xsm_shadow_control(d, sc->op); + if ( rc ) + return rc; +@@ -594,13 +714,13 @@ int paging_domctl(struct domain *d, xen_ + + case XEN_DOMCTL_SHADOW_OP_OFF: + if ( paging_mode_log_dirty(d) ) +- if ( (rc = paging_log_dirty_disable(d)) != 0 ) ++ if ( (rc = paging_log_dirty_disable(d, resuming)) != 0 ) + return rc; + break; + + case XEN_DOMCTL_SHADOW_OP_CLEAN: + case XEN_DOMCTL_SHADOW_OP_PEEK: +- return paging_log_dirty_op(d, sc); ++ return paging_log_dirty_op(d, sc, resuming); + } + + /* Here, dispatch domctl to the appropriate paging code */ +@@ -611,18 +731,24 @@ int paging_domctl(struct domain *d, xen_ + } + + /* Call when destroying a domain */ +-void paging_teardown(struct domain *d) ++int paging_teardown(struct domain *d) + { ++ int rc; ++ + if ( hap_enabled(d) ) + hap_teardown(d); + else + shadow_teardown(d); + + /* clean up log dirty resources. */ +- paging_log_dirty_teardown(d); ++ rc = paging_free_log_dirty_bitmap(d, 0); ++ if ( rc == -EAGAIN ) ++ return rc; + + /* Move populate-on-demand cache back to domain_list for destruction */ + p2m_pod_empty_cache(d); ++ ++ return rc; + } + + /* Call once all of the references to the domain have gone away */ +--- a/xen/arch/x86/mm/shadow/common.c ++++ b/xen/arch/x86/mm/shadow/common.c +@@ -3829,8 +3829,7 @@ int shadow_domctl(struct domain *d, + paging_unlock(d); + if ( preempted ) + /* Not finished. Set up to re-run the call. */ +- rc = hypercall_create_continuation( +- __HYPERVISOR_domctl, "h", u_domctl); ++ rc = -EAGAIN; + else + /* Finished. Return the new allocation */ + sc->mb = shadow_get_allocation(d); +--- a/xen/common/domain.c ++++ b/xen/common/domain.c +@@ -479,7 +479,6 @@ int domain_kill(struct domain *d) + rc = domain_relinquish_resources(d); + if ( rc != 0 ) + { +- BUG_ON(rc != -EAGAIN); + break; + } + if ( sched_move_domain(d, cpupool0) ) +--- a/xen/include/asm-x86/domain.h ++++ b/xen/include/asm-x86/domain.h +@@ -193,6 +193,20 @@ struct paging_domain { + struct hap_domain hap; + /* log dirty support */ + struct log_dirty_domain log_dirty; ++ ++ /* preemption handling */ ++ struct { ++ struct vcpu *vcpu; ++ unsigned int op; ++ union { ++ struct { ++ unsigned long done:PADDR_BITS - PAGE_SHIFT; ++ unsigned long i4:PAGETABLE_ORDER; ++ unsigned long i3:PAGETABLE_ORDER; ++ } log_dirty; ++ }; ++ } preempt; ++ + /* alloc/free pages from the pool for paging-assistance structures + * (used by p2m and log-dirty code for their tries) */ + struct page_info * (*alloc_page)(struct domain *d); +--- a/xen/include/asm-x86/paging.h ++++ b/xen/include/asm-x86/paging.h +@@ -141,9 +141,6 @@ struct paging_mode { + /***************************************************************************** + * Log dirty code */ + +-/* free log dirty bitmap resource */ +-void paging_free_log_dirty_bitmap(struct domain *d); +- + /* get the dirty bitmap for a specific range of pfns */ + void paging_log_dirty_range(struct domain *d, + unsigned long begin_pfn, +@@ -153,9 +150,6 @@ void paging_log_dirty_range(struct domai + /* enable log dirty */ + int paging_log_dirty_enable(struct domain *d); + +-/* disable log dirty */ +-int paging_log_dirty_disable(struct domain *d); +- + /* log dirty initialization */ + void paging_log_dirty_init(struct domain *d, + int (*enable_log_dirty)(struct domain *d), +@@ -218,7 +212,7 @@ int paging_domctl(struct domain *d, xen_ + XEN_GUEST_HANDLE(void) u_domctl); + + /* Call when destroying a domain */ +-void paging_teardown(struct domain *d); ++int paging_teardown(struct domain *d); + + /* Call once all of the references to the domain have gone away */ + void paging_final_teardown(struct domain *d); diff --git a/SPECS/xen.spec b/SPECS/xen.spec index c8258ab..858ee49 100644 --- a/SPECS/xen.spec +++ b/SPECS/xen.spec @@ -18,7 +18,7 @@ Summary: Xen is a virtual machine monitor Name: xen -Version: 4.2.4 +Version: 4.2.5 Release: 33%{?dist} Group: Development/Libraries License: GPLv2+ and LGPLv2+ and BSD @@ -79,11 +79,11 @@ Patch100: xen-configure-xend.patch Patch106: xen-xl-autoballon-with-auto-option.patch Patch107: xen-xl-set-autoballon-default-auto.patch -Patch200: xsa89.patch -Patch201: xen-pygrub-fix-for-rhel7.patch -Patch202: xsa92-4.2.patch -Patch203: xsa96.patch - +#Patch200: xsa89.patch +Patch205: xsa97-hap-4.2.patch +Patch206: xsa104.patch +Patch207: xsa105.patch +Patch208: xsa106.patch Patch1000: xen-centos-disable-CFLAGS-for-qemu.patch Patch1001: xen-centos-disableWerror-blktap25.patch @@ -259,10 +259,10 @@ manage Xen virtual machines. %patch106 -p1 %patch107 -p1 -%patch200 -p1 -%patch201 -p1 -%patch202 -p1 -%patch203 -p1 +%patch205 -p1 +%patch206 -p1 +%patch207 -p1 +%patch208 -p1 %patch1000 -p1 @@ -811,6 +811,14 @@ rm -rf %{buildroot} %endif %changelog +* Fri Sep 26 2014 Johnny HUghes - 4.2.5-33.el6.centos +- upgrade to upstream Xen version 4.2.5 +- removed patches that are already part of 4.2.5 +- Added Patch205 (XSA-97, CVE-2014-5146,CVE-2014-5149) +- Added Patch206 (XSA-104, CVE-2014-7154) +- Added Patch207 (XSA-105, CVE-2014-7155) +- Added Patch208 (XSA-106, CVE-2014-7156) + * Mon Jun 16 2014 Johnny Hughes - 4.2.4-33.el6.centos - actually apply patch203 :) -- 2.39.5