ia64/xen-unstable

changeset 7553:b5903c9aeda5

Fix floating-point corruption (a nasty race in fp task-switch
exception handling).
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Oct 30 10:45:49 2005 +0100 (2005-10-30)
parents 07aa5213d811
children bcccadcc56e5
files linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c xen/arch/x86/traps.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S	Sun Oct 30 09:39:55 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S	Sun Oct 30 10:45:49 2005 +0100
     1.3 @@ -653,7 +653,7 @@ ENTRY(simd_coprocessor_error)
     1.4  ENTRY(device_not_available)
     1.5  	pushl $-1			# mark this as an int
     1.6  	SAVE_ALL
     1.7 -	preempt_stop
     1.8 +	#preempt_stop /* This is already an interrupt gate on Xen. */
     1.9  	call math_state_restore
    1.10  	jmp ret_from_exception
    1.11  
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c	Sun Oct 30 09:39:55 2005 +0100
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c	Sun Oct 30 10:45:49 2005 +0100
     2.3 @@ -648,6 +648,12 @@ fastcall void do_int3(struct pt_regs *re
     2.4  }
     2.5  #endif
     2.6  
     2.7 +static inline void conditional_sti(struct pt_regs *regs)
     2.8 +{
     2.9 +	if ((uint8_t)(regs->xcs >> 16) == 0)
    2.10 +		local_irq_enable();
    2.11 +}
    2.12 +
    2.13  /*
    2.14   * Our handling of the processor debug registers is non-trivial.
    2.15   * We do not clear them on entry and exit from the kernel. Therefore
    2.16 @@ -680,11 +686,9 @@ fastcall void do_debug(struct pt_regs * 
    2.17  	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
    2.18  					SIGTRAP) == NOTIFY_STOP)
    2.19  		return;
    2.20 -#if 0
    2.21 +
    2.22  	/* It's safe to allow irq's after DR6 has been saved */
    2.23 -	if (regs->eflags & X86_EFLAGS_IF)
    2.24 -		local_irq_enable();
    2.25 -#endif
    2.26 +	conditional_sti(regs);
    2.27  
    2.28  	/* Mask out spurious debug traps due to lazy DR7 setting */
    2.29  	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
    2.30 @@ -967,15 +971,18 @@ void __init trap_init_f00f_bug(void)
    2.31  #endif
    2.32  
    2.33  
    2.34 -/* NB. All these are "trap gates" (i.e. events_mask isn't cleared). */
    2.35 +/*
    2.36 + * NB. All these are "trap gates" (i.e. events_mask isn't cleared) except
    2.37 + * for those that specify <dpl>|4 in the second field.
    2.38 + */
    2.39  static trap_info_t trap_table[] = {
    2.40  	{  0, 0, __KERNEL_CS, (unsigned long)divide_error		},
    2.41 -	{  1, 0, __KERNEL_CS, (unsigned long)debug			},
    2.42 -	{  3, 3, __KERNEL_CS, (unsigned long)int3			},
    2.43 +	{  1, 0|4, __KERNEL_CS, (unsigned long)debug			},
    2.44 +	{  3, 3|4, __KERNEL_CS, (unsigned long)int3			},
    2.45  	{  4, 3, __KERNEL_CS, (unsigned long)overflow			},
    2.46  	{  5, 3, __KERNEL_CS, (unsigned long)bounds			},
    2.47  	{  6, 0, __KERNEL_CS, (unsigned long)invalid_op			},
    2.48 -	{  7, 0, __KERNEL_CS, (unsigned long)device_not_available	},
    2.49 +	{  7, 0|4, __KERNEL_CS, (unsigned long)device_not_available	},
    2.50  	{  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
    2.51  	{ 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS		},
    2.52  	{ 11, 0, __KERNEL_CS, (unsigned long)segment_not_present	},
     3.1 --- a/xen/arch/x86/traps.c	Sun Oct 30 09:39:55 2005 +0100
     3.2 +++ b/xen/arch/x86/traps.c	Sun Oct 30 10:45:49 2005 +0100
     3.3 @@ -1147,6 +1147,9 @@ asmlinkage void do_nmi(struct cpu_user_r
     3.4  
     3.5  asmlinkage int math_state_restore(struct cpu_user_regs *regs)
     3.6  {
     3.7 +    struct trap_bounce *tb;
     3.8 +    trap_info_t *ti;
     3.9 +
    3.10      /* Prevent recursion. */
    3.11      clts();
    3.12  
    3.13 @@ -1154,10 +1157,15 @@ asmlinkage int math_state_restore(struct
    3.14  
    3.15      if ( current->arch.guest_context.ctrlreg[0] & X86_CR0_TS )
    3.16      {
    3.17 -        struct trap_bounce *tb = &current->arch.trap_bounce;
    3.18 +        tb = &current->arch.trap_bounce;
    3.19 +        ti = &current->arch.guest_context.trap_ctxt[TRAP_no_device];
    3.20 +
    3.21          tb->flags = TBF_EXCEPTION;
    3.22 -        tb->cs    = current->arch.guest_context.trap_ctxt[7].cs;
    3.23 -        tb->eip   = current->arch.guest_context.trap_ctxt[7].address;
    3.24 +        tb->cs    = ti->cs;
    3.25 +        tb->eip   = ti->address;
    3.26 +        if ( TI_GET_IF(ti) )
    3.27 +            tb->flags |= TBF_INTERRUPT;
    3.28 +
    3.29          current->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS;
    3.30      }
    3.31  
    3.32 @@ -1169,6 +1177,7 @@ asmlinkage int do_debug(struct cpu_user_
    3.33      unsigned long condition;
    3.34      struct vcpu *v = current;
    3.35      struct trap_bounce *tb = &v->arch.trap_bounce;
    3.36 +    trap_info_t *ti;
    3.37  
    3.38      __asm__ __volatile__("mov %%db6,%0" : "=r" (condition));
    3.39  
    3.40 @@ -1198,9 +1207,12 @@ asmlinkage int do_debug(struct cpu_user_
    3.41      /* Save debug status register where guest OS can peek at it */
    3.42      v->arch.guest_context.debugreg[6] = condition;
    3.43  
    3.44 +    ti = &v->arch.guest_context.trap_ctxt[TRAP_debug];
    3.45      tb->flags = TBF_EXCEPTION;
    3.46 -    tb->cs    = v->arch.guest_context.trap_ctxt[TRAP_debug].cs;
    3.47 -    tb->eip   = v->arch.guest_context.trap_ctxt[TRAP_debug].address;
    3.48 +    tb->cs    = ti->cs;
    3.49 +    tb->eip   = ti->address;
    3.50 +    if ( TI_GET_IF(ti) )
    3.51 +        tb->flags |= TBF_INTERRUPT;
    3.52  
    3.53   out:
    3.54      return EXCRET_not_a_fault;