]> xenbits.xensource.com Git - people/sstabellini/xen-unstable.git/.git/commitdiff
x86/pv: Move stack_switch()/seg_segment_base() into PV-only files
authorAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 3 Sep 2020 18:09:45 +0000 (19:09 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 9 Sep 2020 15:53:28 +0000 (16:53 +0100)
So they are excluded from !CONFIG_PV builds.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
xen/arch/x86/pv/misc-hypercalls.c
xen/arch/x86/x86_64/mm.c

index 136fa10c96f24d932795d04c7e3251859c9b07e4..b353972e3db9e8c6b62167fd736d86ee22a03246 100644 (file)
@@ -171,6 +171,107 @@ long set_debugreg(struct vcpu *v, unsigned int reg, unsigned long value)
     return 0;
 }
 
+long do_stack_switch(unsigned long ss, unsigned long esp)
+{
+    fixup_guest_stack_selector(current->domain, ss);
+    current->arch.pv.kernel_ss = ss;
+    current->arch.pv.kernel_sp = esp;
+
+    return 0;
+}
+
+long do_set_segment_base(unsigned int which, unsigned long base)
+{
+    struct vcpu *v = current;
+    long ret = 0;
+
+    if ( is_pv_32bit_vcpu(v) )
+        return -ENOSYS; /* x86/64 only. */
+
+    switch ( which )
+    {
+    case SEGBASE_FS:
+        if ( is_canonical_address(base) )
+            wrfsbase(base);
+        else
+            ret = -EINVAL;
+        break;
+
+    case SEGBASE_GS_USER:
+        if ( is_canonical_address(base) )
+        {
+            wrgsshadow(base);
+            v->arch.pv.gs_base_user = base;
+        }
+        else
+            ret = -EINVAL;
+        break;
+
+    case SEGBASE_GS_KERNEL:
+        if ( is_canonical_address(base) )
+            wrgsbase(base);
+        else
+            ret = -EINVAL;
+        break;
+
+    case SEGBASE_GS_USER_SEL:
+    {
+        unsigned int sel = (uint16_t)base;
+
+        /*
+         * We wish to update the user %gs from the GDT/LDT.  Currently, the
+         * guest kernel's GS_BASE is in context.
+         */
+        asm volatile ( "swapgs" );
+
+        if ( sel > 3 )
+            /* Fix up RPL for non-NUL selectors. */
+            sel |= 3;
+        else if ( boot_cpu_data.x86_vendor &
+                  (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+            /* Work around NUL segment behaviour on AMD hardware. */
+            asm volatile ( "mov %[sel], %%gs"
+                           :: [sel] "r" (FLAT_USER_DS32) );
+
+        /*
+         * Load the chosen selector, with fault handling.
+         *
+         * Errors ought to fail the hypercall, but that was never built in
+         * originally, and Linux will BUG() if this call fails.
+         *
+         * NUL the selector in the case of an error.  This too needs to deal
+         * with the AMD NUL segment behaviour, but it is already a slowpath in
+         * #GP context so perform the flat load unconditionally to avoid
+         * complicated logic.
+         *
+         * Anyone wanting to check for errors from this hypercall should
+         * re-read %gs and compare against the input.
+         */
+        asm volatile ( "1: mov %[sel], %%gs\n\t"
+                       ".section .fixup, \"ax\", @progbits\n\t"
+                       "2: mov %k[flat], %%gs\n\t"
+                       "   xor %[sel], %[sel]\n\t"
+                       "   jmp 1b\n\t"
+                       ".previous\n\t"
+                       _ASM_EXTABLE(1b, 2b)
+                       : [sel] "+r" (sel)
+                       : [flat] "r" (FLAT_USER_DS32) );
+
+        /* Update the cache of the inactive base, as read from the GDT/LDT. */
+        v->arch.pv.gs_base_user = rdgsbase();
+
+        asm volatile ( safe_swapgs );
+        break;
+    }
+
+    default:
+        ret = -EINVAL;
+        break;
+    }
+
+    return ret;
+}
+
 /*
  * Local variables:
  * mode: C
index b69cf2dc4fb7c15fa124a96c809c61fb99f39112..98581dfe5f3c5a38411e7291d7baa63193a78368 100644 (file)
@@ -1010,107 +1010,6 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
     return rc;
 }
 
-long do_stack_switch(unsigned long ss, unsigned long esp)
-{
-    fixup_guest_stack_selector(current->domain, ss);
-    current->arch.pv.kernel_ss = ss;
-    current->arch.pv.kernel_sp = esp;
-    return 0;
-}
-
-long do_set_segment_base(unsigned int which, unsigned long base)
-{
-    struct vcpu *v = current;
-    long ret = 0;
-
-    if ( is_pv_32bit_vcpu(v) )
-        return -ENOSYS; /* x86/64 only. */
-
-    switch ( which )
-    {
-    case SEGBASE_FS:
-        if ( is_canonical_address(base) )
-            wrfsbase(base);
-        else
-            ret = -EINVAL;
-        break;
-
-    case SEGBASE_GS_USER:
-        if ( is_canonical_address(base) )
-        {
-            wrgsshadow(base);
-            v->arch.pv.gs_base_user = base;
-        }
-        else
-            ret = -EINVAL;
-        break;
-
-    case SEGBASE_GS_KERNEL:
-        if ( is_canonical_address(base) )
-            wrgsbase(base);
-        else
-            ret = -EINVAL;
-        break;
-
-    case SEGBASE_GS_USER_SEL:
-    {
-        unsigned int sel = (uint16_t)base;
-
-        /*
-         * We wish to update the user %gs from the GDT/LDT.  Currently, the
-         * guest kernel's GS_BASE is in context.
-         */
-        asm volatile ( "swapgs" );
-
-        if ( sel > 3 )
-            /* Fix up RPL for non-NUL selectors. */
-            sel |= 3;
-        else if ( boot_cpu_data.x86_vendor &
-                  (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
-            /* Work around NUL segment behaviour on AMD hardware. */
-            asm volatile ( "mov %[sel], %%gs"
-                           :: [sel] "r" (FLAT_USER_DS32) );
-
-        /*
-         * Load the chosen selector, with fault handling.
-         *
-         * Errors ought to fail the hypercall, but that was never built in
-         * originally, and Linux will BUG() if this call fails.
-         *
-         * NUL the selector in the case of an error.  This too needs to deal
-         * with the AMD NUL segment behaviour, but it is already a slowpath in
-         * #GP context so perform the flat load unconditionally to avoid
-         * complicated logic.
-         *
-         * Anyone wanting to check for errors from this hypercall should
-         * re-read %gs and compare against the input.
-         */
-        asm volatile ( "1: mov %[sel], %%gs\n\t"
-                       ".section .fixup, \"ax\", @progbits\n\t"
-                       "2: mov %k[flat], %%gs\n\t"
-                       "   xor %[sel], %[sel]\n\t"
-                       "   jmp 1b\n\t"
-                       ".previous\n\t"
-                       _ASM_EXTABLE(1b, 2b)
-                       : [sel] "+r" (sel)
-                       : [flat] "r" (FLAT_USER_DS32) );
-
-        /* Update the cache of the inactive base, as read from the GDT/LDT. */
-        v->arch.pv.gs_base_user = rdgsbase();
-
-        asm volatile ( safe_swapgs );
-        break;
-    }
-
-    default:
-        ret = -EINVAL;
-        break;
-    }
-
-    return ret;
-}
-
-
 /* Returns TRUE if given descriptor is valid for GDT or LDT. */
 int check_descriptor(const struct domain *dom, seg_desc_t *d)
 {