]> xenbits.xensource.com Git - people/andrewcoop/xen.git/commitdiff
drop-vm86
authorAndrew Cooper <andrew.cooper3@citrix.com>
Sun, 29 Dec 2024 14:46:34 +0000 (14:46 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Mon, 6 Jan 2025 14:19:10 +0000 (14:19 +0000)
xen/arch/x86/cpu/common.c
xen/arch/x86/include/asm/cpu-user-regs.h
xen/arch/x86/include/asm/current.h
xen/arch/x86/include/asm/hvm/hvm.h
xen/arch/x86/include/asm/regs.h
xen/arch/x86/traps.c
xen/arch/x86/x86_64/asm-offsets.c
xen/arch/x86/x86_64/entry.S
xen/arch/x86/x86_64/traps.c

index f016c8f9ee0c3bdc0eaabc82ce42389cc315becd..b7331e6c3f7f7e74493ed29bbc804aae89e8aab3 100644 (file)
@@ -912,7 +912,7 @@ void load_system_tables(void)
         * Defer checks until exception support is sufficiently set up.
         */
        BUILD_BUG_ON((sizeof(struct cpu_info) -
-                     offsetof(struct cpu_info, guest_cpu_user_regs.es)) & 0xf);
+                     sizeof(struct cpu_user_regs)) & 0xf);
        BUG_ON(system_state != SYS_STATE_early_boot && (stack_bottom & 0xf));
 }
 
index 4a2445ff58300f68842eabaabdf53d7a1499ba4e..259de3cb6ea0fa6bf101fdd4e43d9f7a38ca1bc2 100644 (file)
@@ -54,10 +54,6 @@ struct cpu_user_regs
     DECL_REG_LO16(flags); /* rflags.IF == !saved_upcall_mask */
     DECL_REG_LO8(sp);
     uint16_t ss, _pad2[3];
-    uint16_t es, _pad3[3];
-    uint16_t ds, _pad4[3];
-    uint16_t fs, _pad5[3];
-    uint16_t gs, _pad6[3];
 };
 
 #undef DECL_REG_HI
index 243d17ef79fd23c022b3078acc908f2cb96a71d6..a7c9473428b2c7214082a02bd84f47811bbf2961 100644 (file)
@@ -106,12 +106,12 @@ static inline struct cpu_info *get_cpu_info(void)
 #define get_per_cpu_offset()  (get_cpu_info()->per_cpu_offset)
 
 /*
- * Get the bottom-of-stack, as stored in the per-CPU TSS. This actually points
- * into the middle of cpu_info.guest_cpu_user_regs, at the section that
- * precisely corresponds to a CPU trap frame.
+ * Get the bottom-of-stack, as stored in the per-CPU TSS. This points at the
+ * end of cpu_info.guest_cpu_user_regs, at the section that precisely
+ * corresponds to a CPU trap frame.
  */
 #define get_stack_bottom()                      \
-    ((unsigned long)&get_cpu_info()->guest_cpu_user_regs.es)
+    ((unsigned long)(&get_cpu_info()->guest_cpu_user_regs + 1))
 
 /*
  * Get the reasonable stack bounds for stack traces and stack dumps.  Stack
index cad3a9427801a292b4eb8f6317ad7122563be1d2..56eaa9117d9bdde165bbf0c906b07d2f768e14b3 100644 (file)
@@ -621,10 +621,6 @@ static inline void hvm_sanitize_regs_fields(struct cpu_user_regs *regs,
     regs->saved_upcall_mask = 0xbf;
     regs->cs = 0xbeef;
     regs->ss = 0xbeef;
-    regs->ds = 0xbeef;
-    regs->es = 0xbeef;
-    regs->fs = 0xbeef;
-    regs->gs = 0xbeef;
 #endif
 }
 
index c05b9207c281503bdba552adf7963cb34abd1f35..dcc45ac5af7f245a6d3e7fe7d0a99ae38ef886a0 100644 (file)
@@ -20,8 +20,7 @@
     (!is_pv_32bit_vcpu(v) ? ((tb)->eip == 0) : (((tb)->cs & ~3) == 0))
 
 /* Number of bytes of on-stack execution state to be context-switched. */
-/* NB. Segment registers and bases are not saved/restored on x86/64 stack. */
-#define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es))
+#define CTXT_SWITCH_STACK_BYTES sizeof(struct cpu_user_regs)
 
 #define guest_mode(r)                                                         \
 ({                                                                            \
index 613a0e877521b61767cb78b020c7179922e2eb7c..42d129c4686aebac87f2c3db23d7e441ee3d3a92 100644 (file)
@@ -420,7 +420,7 @@ unsigned long get_stack_trace_bottom(unsigned long sp)
     {
     case 1 ... 4:
         return ROUNDUP(sp, PAGE_SIZE) -
-            offsetof(struct cpu_user_regs, es) - sizeof(unsigned long);
+            sizeof(struct cpu_user_regs) - sizeof(unsigned long);
 
     case 6 ... 7:
         return ROUNDUP(sp, STACK_SIZE) -
index 630bdc39451d96c15eb0f298b984789f06d76593..2258b4ce1b957453c64d42926df9b854c024038e 100644 (file)
@@ -52,7 +52,7 @@ void __dummy__(void)
     OFFSET(UREGS_eflags, struct cpu_user_regs, rflags);
     OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
     OFFSET(UREGS_ss, struct cpu_user_regs, ss);
-    OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
+    DEFINE(UREGS_kernel_sizeof, sizeof(struct cpu_user_regs));
     BLANK();
 
     /*
index 40d094d5b2eeffa8c2806ff6124ae3426d661a30..f9e25245ab21f297e4c073712091ca0d480258ca 100644 (file)
@@ -924,6 +924,9 @@ exception_with_ints_disabled:
         call  search_pre_exception_table
         testq %rax,%rax                 # no fixup code for faulting EIP?
         jz    .Ldispatch_exceptions
+
+/* Exception recovery from the pre exception table (fault on IRET instruction). */
+
         movq  %rax,UREGS_rip(%rsp)      # fixup regular stack
 
 #ifdef CONFIG_XEN_SHSTK
@@ -941,7 +944,7 @@ exception_with_ints_disabled:
         subq  $8,%rsp
         movq  %rsp,%rdi
         movq  $UREGS_kernel_sizeof/8,%rcx
-        rep;  movsq                     # make room for ec/ev
+        rep movsq                       # make room for ec/ev
 1:      movq  UREGS_error_code(%rsp),%rax # ec/ev
         movq  %rax,UREGS_kernel_sizeof(%rsp)
         mov   %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
index dbca9d1430b93338500b8dc43727f0ede2fa1199..70ab71d370ad19556bcf12bb10e3ffa66c028fe6 100644 (file)
@@ -135,17 +135,11 @@ static void _show_registers(
 
 void show_registers(const struct cpu_user_regs *regs)
 {
-    struct cpu_user_regs fault_regs;
+    struct cpu_user_regs fault_regs = *regs;
     struct extra_state fault_state;
     enum context context;
     struct vcpu *v = system_state >= SYS_STATE_smp_boot ? current : NULL;
 
-    /*
-     * Don't read beyond the end of the hardware frame.  It is out of bounds
-     * for WARN()/etc.
-     */
-    memcpy(&fault_regs, regs, offsetof(struct cpu_user_regs, es));
-
     if ( guest_mode(regs) && is_hvm_vcpu(v) )
     {
         get_hvm_registers(v, &fault_regs, &fault_state);