]> xenbits.xensource.com Git - xtf.git/commitdiff
pv: Honour test_wants_user_mappings
authorAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 19 Jul 2016 13:58:46 +0000 (14:58 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 19 Jul 2016 15:09:34 +0000 (16:09 +0100)
32bit PV guests start on supervisor mappings, and previously unilaterally
switched to user mappings.  Now, only switch to user mappings if a test has
opted in.

64bit PV guests run at cpl3, must use user mappings and architecturally have
no choice in the matter.

This change is best reviewed with `git show --ignore-all-space`

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
arch/x86/pv/traps.c

index fa741b3401e6a450e0a3a37b82b88afbfe18e6d8..ed98dc6a6da895bcbe8e7dcfbfeca316373b15b0 100644 (file)
@@ -1,6 +1,7 @@
 #include <xtf/traps.h>
 #include <xtf/lib.h>
 #include <xtf/hypercall.h>
+#include <xtf/test.h>
 
 #include <arch/x86/lib.h>
 #include <arch/x86/processor.h>
@@ -97,65 +98,68 @@ void arch_init_traps(void)
         panic("Failed to set user %%cr3: %d\n", rc);
 
 #elif defined(__i386__)
-    /*
-     * Walk the live pagetables and set _PAGE_USER
-     *
-     * XTF uses a shared user/kernel address space, and _PAGE_USER must be set
-     * to permit cpl3 access to the virtual addresses without taking a
-     * pagefault.
-     *
-     * !!! WARNING !!!
-     *
-     * Because PV guests and Xen share CR4, Xen's setting of CR4.{SMEP,SMAP}
-     * interfere with 32bit PV guests.  For now, 32bit PV XTF tests require
-     * the hypervisor to be booted with "smep=0 smap=0".
-     *
-     * TODO - figure out how to work around this restriction while maintaining
-     * a shared user/kernel address space for the framework.
-     *
-     * !!! WARNING !!!
-     */
-    uint64_t *l3 = _p(start_info->pt_base);
-    unsigned long va = 0;
-
-    while ( va < __HYPERVISOR_VIRT_START_PAE )
+    if ( test_wants_user_mappings )
     {
-        unsigned int i3 = l3_table_offset(va);
-
-        if ( !(l3[i3] & _PAGE_PRESENT) )
-        {
-            va += 1UL << L3_PT_SHIFT;
-            continue;
-        }
-
-        uint64_t *l2 = maddr_to_virt(pte_to_paddr(l3[i3]));
-        unsigned int i2 = l2_table_offset(va);
-
-        if ( !(l2[i2] & _PAGE_PRESENT) )
+        /*
+         * Walk the live pagetables and set _PAGE_USER
+         *
+         * XTF uses a shared user/kernel address space, and _PAGE_USER must be
+         * set to permit cpl3 access to the virtual addresses without taking a
+         * pagefault.
+         *
+         * !!! WARNING !!!
+         *
+         * Because PV guests and Xen share CR4, Xen's setting of
+         * CR4.{SMEP,SMAP} interfere with 32bit PV guests.  For now, 32bit PV
+         * XTF tests require the hypervisor to be booted with "smep=0 smap=0".
+         *
+         * TODO - figure out how to work around this restriction while
+         * maintaining a shared user/kernel address space for the framework.
+         *
+         * !!! WARNING !!!
+         */
+        uint64_t *l3 = _p(start_info->pt_base);
+        unsigned long va = 0;
+
+        while ( va < __HYPERVISOR_VIRT_START_PAE )
         {
-            va += 1UL << L2_PT_SHIFT;
-            continue;
-        }
-
-        uint64_t *l1 = maddr_to_virt(pte_to_paddr(l2[i2]));
-        unsigned int i1 = l1_table_offset(va);
+            unsigned int i3 = l3_table_offset(va);
+
+            if ( !(l3[i3] & _PAGE_PRESENT) )
+            {
+                va += 1UL << L3_PT_SHIFT;
+                continue;
+            }
+
+            uint64_t *l2 = maddr_to_virt(pte_to_paddr(l3[i3]));
+            unsigned int i2 = l2_table_offset(va);
+
+            if ( !(l2[i2] & _PAGE_PRESENT) )
+            {
+                va += 1UL << L2_PT_SHIFT;
+                continue;
+            }
+
+            uint64_t *l1 = maddr_to_virt(pte_to_paddr(l2[i2]));
+            unsigned int i1 = l1_table_offset(va);
+
+            if ( !(l1[i1] & _PAGE_PRESENT) )
+            {
+                va += 1UL << L1_PT_SHIFT;
+                continue;
+            }
+
+            if ( !(l1[i1] & _PAGE_USER) )
+            {
+                rc = hypercall_update_va_mapping(_p(va), l1[i1] | _PAGE_USER,
+                                                 UVMF_INVLPG);
+                if ( rc )
+                    panic("update_va_mapping(%p, 0x%016"PRIx64") failed: %d\n",
+                          _p(va), l1[i1] | _PAGE_USER, rc);
+            }
 
-        if ( !(l1[i1] & _PAGE_PRESENT) )
-        {
             va += 1UL << L1_PT_SHIFT;
-            continue;
         }
-
-        if ( !(l1[i1] & _PAGE_USER) )
-        {
-            rc = hypercall_update_va_mapping(_p(va), l1[i1] | _PAGE_USER,
-                                             UVMF_INVLPG);
-            if ( rc )
-                panic("update_va_mapping(%p, 0x%016"PRIx64") failed: %d\n",
-                      _p(va), l1[i1] | _PAGE_USER, rc);
-        }
-
-        va += 1UL << L1_PT_SHIFT;
     }
 #endif
 }