ia64/xen-unstable

changeset 8590:19f5ffa02154

Fix x86/32 do_iret implementation, fixes VM86 mode.

Do not clobber a freshly restored esp when performing an iret.

Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Ian Campbell <Ian.Campbell@XenSource.com>
author Ian.Campbell@xensource.com
date Fri Jan 13 11:04:04 2006 +0000 (2006-01-13)
parents ec4ef8c5f04d
children 956bf08c91c3
files xen/arch/x86/x86_32/traps.c
line diff
     1.1 --- a/xen/arch/x86/x86_32/traps.c	Fri Jan 13 11:38:33 2006 +0100
     1.2 +++ b/xen/arch/x86/x86_32/traps.c	Fri Jan 13 11:04:04 2006 +0000
     1.3 @@ -157,14 +157,6 @@ asmlinkage void do_double_fault(void)
     1.4          __asm__ __volatile__ ( "hlt" );
     1.5  }
     1.6  
     1.7 -static inline void pop_from_guest_stack(
     1.8 -    void *dst, struct cpu_user_regs *regs, unsigned int bytes)
     1.9 -{
    1.10 -    if ( unlikely(__copy_from_user(dst, (void __user *)regs->esp, bytes)) )
    1.11 -        domain_crash_synchronous();
    1.12 -    regs->esp += bytes;
    1.13 -}
    1.14 -
    1.15  asmlinkage unsigned long do_iret(void)
    1.16  {
    1.17      struct cpu_user_regs *regs = guest_cpu_user_regs();
    1.18 @@ -175,22 +167,29 @@ asmlinkage unsigned long do_iret(void)
    1.19          domain_crash_synchronous();
    1.20  
    1.21      /* Pop and restore EAX (clobbered by hypercall). */
    1.22 -    pop_from_guest_stack(&regs->eax, regs, 4);
    1.23 +    if ( unlikely(__copy_from_user(&regs->eax, (void __user *)regs->esp, 4)) )
    1.24 +        domain_crash_synchronous();
    1.25 +    regs->esp += 4;
    1.26  
    1.27      /* Pop and restore CS and EIP. */
    1.28 -    pop_from_guest_stack(&regs->eip, regs, 8);
    1.29 +    if ( unlikely(__copy_from_user(&regs->eip, (void __user *)regs->esp, 8)) )
    1.30 +        domain_crash_synchronous();
    1.31 +    regs->esp += 8;
    1.32  
    1.33      /*
    1.34       * Pop, fix up and restore EFLAGS. We fix up in a local staging area
    1.35       * to avoid firing the BUG_ON(IOPL) check in arch_getdomaininfo_ctxt.
    1.36       */
    1.37 -    pop_from_guest_stack(&eflags, regs, 4);
    1.38 +    if ( unlikely(__copy_from_user(&eflags, (void __user *)regs->esp, 4)) )
    1.39 +        domain_crash_synchronous();
    1.40 +    regs->esp += 4;
    1.41      regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
    1.42  
    1.43      if ( VM86_MODE(regs) )
    1.44      {
    1.45          /* Return to VM86 mode: pop and restore ESP,SS,ES,DS,FS and GS. */
    1.46 -        pop_from_guest_stack(&regs->esp, regs, 24);
    1.47 +        if ( __copy_from_user(&regs->esp, (void __user *)regs->esp, 24) )
    1.48 +            domain_crash_synchronous();
    1.49      }
    1.50      else if ( unlikely(RING_0(regs)) )
    1.51      {
    1.52 @@ -199,7 +198,8 @@ asmlinkage unsigned long do_iret(void)
    1.53      else if ( !RING_1(regs) )
    1.54      {
    1.55          /* Return to ring 2/3: pop and restore ESP and SS. */
    1.56 -        pop_from_guest_stack(&regs->esp, regs, 8);
    1.57 +        if ( __copy_from_user(&regs->esp, (void __user *)regs->esp, 8) )
    1.58 +            domain_crash_synchronous();
    1.59      }
    1.60  
    1.61      /* No longer in NMI context. */