* Defer checks until exception support is sufficiently set up.
*/
BUILD_BUG_ON((sizeof(struct cpu_info) -
- offsetof(struct cpu_info, guest_cpu_user_regs.es)) & 0xf);
+ sizeof(struct cpu_user_regs)) & 0xf);
BUG_ON(system_state != SYS_STATE_early_boot && (stack_bottom & 0xf));
}
DECL_REG_LO16(flags); /* rflags.IF == !saved_upcall_mask */
DECL_REG_LO8(sp);
uint16_t ss, _pad2[3];
- uint16_t es, _pad3[3];
- uint16_t ds, _pad4[3];
- uint16_t fs, _pad5[3];
- uint16_t gs, _pad6[3];
};
#undef DECL_REG_HI
#define get_per_cpu_offset() (get_cpu_info()->per_cpu_offset)
/*
- * Get the bottom-of-stack, as stored in the per-CPU TSS. This actually points
- * into the middle of cpu_info.guest_cpu_user_regs, at the section that
- * precisely corresponds to a CPU trap frame.
+ * Get the bottom-of-stack, as stored in the per-CPU TSS. This points at the
+ * end of cpu_info.guest_cpu_user_regs, at the section that precisely
+ * corresponds to a CPU trap frame.
*/
#define get_stack_bottom() \
- ((unsigned long)&get_cpu_info()->guest_cpu_user_regs.es)
+ ((unsigned long)(&get_cpu_info()->guest_cpu_user_regs + 1))
/*
* Get the reasonable stack bounds for stack traces and stack dumps. Stack
regs->saved_upcall_mask = 0xbf;
regs->cs = 0xbeef;
regs->ss = 0xbeef;
- regs->ds = 0xbeef;
- regs->es = 0xbeef;
- regs->fs = 0xbeef;
- regs->gs = 0xbeef;
#endif
}
(!is_pv_32bit_vcpu(v) ? ((tb)->eip == 0) : (((tb)->cs & ~3) == 0))
/* Number of bytes of on-stack execution state to be context-switched. */
-/* NB. Segment registers and bases are not saved/restored on x86/64 stack. */
-#define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es))
+#define CTXT_SWITCH_STACK_BYTES sizeof(struct cpu_user_regs)
#define guest_mode(r) \
({ \
{
case 1 ... 4:
return ROUNDUP(sp, PAGE_SIZE) -
- offsetof(struct cpu_user_regs, es) - sizeof(unsigned long);
+ sizeof(struct cpu_user_regs) - sizeof(unsigned long);
case 6 ... 7:
return ROUNDUP(sp, STACK_SIZE) -
OFFSET(UREGS_eflags, struct cpu_user_regs, rflags);
OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
OFFSET(UREGS_ss, struct cpu_user_regs, ss);
- OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
+ DEFINE(UREGS_kernel_sizeof, sizeof(struct cpu_user_regs));
BLANK();
/*
call search_pre_exception_table
testq %rax,%rax # no fixup code for faulting EIP?
jz .Ldispatch_exceptions
+
+/* Exception recovery from the pre exception table (fault on IRET instruction). */
+
movq %rax,UREGS_rip(%rsp) # fixup regular stack
#ifdef CONFIG_XEN_SHSTK
subq $8,%rsp
movq %rsp,%rdi
movq $UREGS_kernel_sizeof/8,%rcx
- rep; movsq # make room for ec/ev
+ rep movsq # make room for ec/ev
1: movq UREGS_error_code(%rsp),%rax # ec/ev
movq %rax,UREGS_kernel_sizeof(%rsp)
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
void show_registers(const struct cpu_user_regs *regs)
{
- struct cpu_user_regs fault_regs;
+ struct cpu_user_regs fault_regs = *regs;
struct extra_state fault_state;
enum context context;
struct vcpu *v = system_state >= SYS_STATE_smp_boot ? current : NULL;
- /*
- * Don't read beyond the end of the hardware frame. It is out of bounds
- * for WARN()/etc.
- */
- memcpy(&fault_regs, regs, offsetof(struct cpu_user_regs, es));
-
if ( guest_mode(regs) && is_hvm_vcpu(v) )
{
get_hvm_registers(v, &fault_regs, &fault_state);