ia64/xen-unstable

changeset 8560:fe4d06b15a36

Pass NMIs to DOM0 via a dedicated callback, Xen x86_32 support.

Handle NMI interrupts and dispatch to dom0 on x86_32.

Renames the switch_vm86 hypercall to iret and implements full iret
semantics instead only what is required by VM86 returns.

Plumb in nmi_op hypercall to generic code.

Signed-off-by: Ian Campbell <Ian.Campbell@XenSource.com>
author Ian.Campbell@xensource.com
date Wed Jan 11 15:52:12 2006 +0000 (2006-01-11)
parents c6f7774cae63
children 06ab200a9e23
files xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_32/traps.c xen/include/public/xen.h
line diff
     1.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Wed Jan 11 15:51:56 2006 +0000
     1.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Wed Jan 11 15:52:12 2006 +0000
     1.3 @@ -65,6 +65,10 @@ void __dummy__(void)
     1.4             arch.guest_context.kernel_ss);
     1.5      OFFSET(VCPU_kernel_sp, struct vcpu,
     1.6             arch.guest_context.kernel_sp);
     1.7 +    OFFSET(VCPU_flags, struct vcpu, vcpu_flags);
     1.8 +    OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
     1.9 +    DEFINE(_VCPUF_nmi_pending, _VCPUF_nmi_pending);
    1.10 +    DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked);
    1.11      BLANK();
    1.12  
    1.13      OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
     2.1 --- a/xen/arch/x86/x86_32/entry.S	Wed Jan 11 15:51:56 2006 +0000
     2.2 +++ b/xen/arch/x86/x86_32/entry.S	Wed Jan 11 15:52:12 2006 +0000
     2.3 @@ -326,7 +326,9 @@ test_all_events:
     2.4          shl  $IRQSTAT_shift,%eax
     2.5          test %ecx,irq_stat(%eax,1)
     2.6          jnz  process_softirqs
     2.7 -/*test_guest_events:*/
     2.8 +        btr  $_VCPUF_nmi_pending,VCPU_flags(%ebx)
     2.9 +        jc   process_nmi
    2.10 +test_guest_events:
    2.11          movl VCPU_vcpu_info(%ebx),%eax
    2.12          testb $0xFF,VCPUINFO_upcall_mask(%eax)
    2.13          jnz  restore_all_guest
    2.14 @@ -348,7 +350,24 @@ process_softirqs:
    2.15          sti       
    2.16          call do_softirq
    2.17          jmp  test_all_events
    2.18 -                
    2.19 +	
    2.20 +	ALIGN
    2.21 +process_nmi:
    2.22 +        movl VCPU_nmi_addr(%ebx),%eax
    2.23 +        test %eax,%eax
    2.24 +        jz   test_all_events
    2.25 +        bts  $_VCPUF_nmi_masked,VCPU_flags(%ebx)
    2.26 +        jc   1f
    2.27 +        sti
    2.28 +        leal VCPU_trap_bounce(%ebx),%edx
    2.29 +        movl %eax,TRAPBOUNCE_eip(%edx)
    2.30 +        movw $FLAT_KERNEL_CS,TRAPBOUNCE_cs(%edx)
    2.31 +        movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
    2.32 +        call create_bounce_frame
    2.33 +        jmp  test_all_events
    2.34 +1:      bts  $_VCPUF_nmi_pending,VCPU_flags(%ebx)
    2.35 +        jmp  test_guest_events
    2.36 +
    2.37  /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
    2.38  /*   {EIP, CS, EFLAGS, [ESP, SS]}                                        */
    2.39  /* %edx == trap_bounce, %ebx == struct vcpu                       */
    2.40 @@ -620,9 +639,7 @@ ENTRY(nmi)
    2.41          jne   defer_nmi
    2.42  
    2.43  continue_nmi:
    2.44 -        movl  $(__HYPERVISOR_DS),%edx
    2.45 -        movl  %edx,%ds
    2.46 -        movl  %edx,%es
    2.47 +        SET_XEN_SEGMENTS(d)
    2.48          movl  %esp,%edx
    2.49          pushl %edx
    2.50          call  do_nmi
    2.51 @@ -660,42 +677,6 @@ do_arch_sched_op:
    2.52          movl %eax,UREGS_eax(%ecx)
    2.53          jmp  do_sched_op
    2.54  
    2.55 -do_switch_vm86:
    2.56 -        # Reset the stack pointer
    2.57 -        GET_GUEST_REGS(%ecx)
    2.58 -        movl %ecx,%esp
    2.59 -
    2.60 -        # GS:ESI == Ring-1 stack activation
    2.61 -        movl UREGS_esp(%esp),%esi
    2.62 -VFLT1:  mov  UREGS_ss(%esp),%gs
    2.63 -
    2.64 -        # ES:EDI == Ring-0 stack activation
    2.65 -        leal UREGS_eip(%esp),%edi
    2.66 -
    2.67 -        # Restore the hypercall-number-clobbered EAX on our stack frame
    2.68 -VFLT2:  movl %gs:(%esi),%eax
    2.69 -        movl %eax,UREGS_eax(%esp)
    2.70 -        addl $4,%esi
    2.71 -        	
    2.72 -      	# Copy the VM86 activation from the ring-1 stack to the ring-0 stack
    2.73 -        movl $(UREGS_user_sizeof-UREGS_eip)/4,%ecx
    2.74 -VFLT3:  movl %gs:(%esi),%eax
    2.75 -        stosl
    2.76 -        addl $4,%esi
    2.77 -        loop VFLT3
    2.78 -
    2.79 -        # Fix up EFLAGS: IOPL=0, IF=1, VM=1
    2.80 -        andl $~X86_EFLAGS_IOPL,UREGS_eflags(%esp)
    2.81 -        orl  $X86_EFLAGS_IF|X86_EFLAGS_VM,UREGS_eflags(%esp)
    2.82 -        
    2.83 -        jmp test_all_events
    2.84 -
    2.85 -.section __ex_table,"a"
    2.86 -        .long VFLT1,domain_crash_synchronous
    2.87 -        .long VFLT2,domain_crash_synchronous
    2.88 -        .long VFLT3,domain_crash_synchronous
    2.89 -.previous
    2.90 -
    2.91  .data
    2.92  
    2.93  ENTRY(exception_table)
    2.94 @@ -744,11 +725,12 @@ ENTRY(hypercall_table)
    2.95          .long do_grant_table_op     /* 20 */
    2.96          .long do_vm_assist
    2.97          .long do_update_va_mapping_otherdomain
    2.98 -        .long do_switch_vm86
    2.99 +        .long do_iret
   2.100          .long do_vcpu_op
   2.101          .long do_ni_hypercall       /* 25 */
   2.102          .long do_mmuext_op
   2.103 -        .long do_acm_op             /* 27 */
   2.104 +        .long do_acm_op
   2.105 +        .long do_nmi_op
   2.106          .rept NR_hypercalls-((.-hypercall_table)/4)
   2.107          .long do_ni_hypercall
   2.108          .endr
   2.109 @@ -777,11 +759,12 @@ ENTRY(hypercall_args_table)
   2.110          .byte 3 /* do_grant_table_op    */  /* 20 */
   2.111          .byte 2 /* do_vm_assist         */
   2.112          .byte 5 /* do_update_va_mapping_otherdomain */
   2.113 -        .byte 0 /* do_switch_vm86       */
   2.114 +        .byte 0 /* do_iret              */
   2.115          .byte 3 /* do_vcpu_op           */
   2.116          .byte 0 /* do_ni_hypercall      */  /* 25 */
   2.117          .byte 4 /* do_mmuext_op         */
   2.118          .byte 1 /* do_acm_op            */
   2.119 +        .byte 2 /* do_nmi_op            */
   2.120          .rept NR_hypercalls-(.-hypercall_args_table)
   2.121          .byte 0 /* do_ni_hypercall      */
   2.122          .endr
     3.1 --- a/xen/arch/x86/x86_32/traps.c	Wed Jan 11 15:51:56 2006 +0000
     3.2 +++ b/xen/arch/x86/x86_32/traps.c	Wed Jan 11 15:52:12 2006 +0000
     3.3 @@ -157,6 +157,49 @@ asmlinkage void do_double_fault(void)
     3.4          __asm__ __volatile__ ( "hlt" );
     3.5  }
     3.6  
     3.7 +asmlinkage unsigned long do_iret(void)
     3.8 +{
     3.9 +    struct cpu_user_regs *regs = guest_cpu_user_regs();
    3.10 +
    3.11 +    /* Restore EAX (clobbered by hypercall) */
    3.12 +    if (copy_from_user(&regs->eax, (void __user *)regs->esp, 4))
    3.13 +        domain_crash_synchronous();
    3.14 +    regs->esp += 4;
    3.15 +
    3.16 +    /* Restore EFLAGS, CS and EIP */
    3.17 +    if (copy_from_user(&regs->eip, (void __user *)regs->esp, 12))
    3.18 +        domain_crash_synchronous();
    3.19 +
    3.20 +    if (VM86_MODE(regs)) {
    3.21 +        /* return to VM86 mode: restore ESP,SS,ES,DS,FS and GS */
    3.22 +        if(copy_from_user(&regs->esp, (void __user *)(regs->esp+12), 24))
    3.23 +            domain_crash_synchronous();
    3.24 +    } else if (RING_0(regs)) {
    3.25 +        domain_crash_synchronous();
    3.26 +    } else if (RING_1(regs)) {
    3.27 +        /* return to ring 1: pop EFLAGS,CS and EIP */
    3.28 +        regs->esp += 12;
    3.29 +    } else {
    3.30 +        /* return to ring 2/3: restore ESP and SS */
    3.31 +        if(copy_from_user(&regs->esp, (void __user *)(regs->esp+12), 8))
    3.32 +            domain_crash_synchronous();
    3.33 +    }
    3.34 +
    3.35 +    /* Fixup EFLAGS */
    3.36 +    regs->eflags &= ~X86_EFLAGS_IOPL;
    3.37 +    regs->eflags |= X86_EFLAGS_IF;
    3.38 +
    3.39 +    /* No longer in NMI context */
    3.40 +    clear_bit(_VCPUF_nmi_masked, &current->vcpu_flags);
    3.41 +
    3.42 +    /* Restore upcall mask from saved value */
    3.43 +    current->vcpu_info->evtchn_upcall_mask = regs->saved_upcall_mask;
    3.44 +
    3.45 +    /* the hypercall exit path will overwrite eax
    3.46 +     * with this return value */
    3.47 +    return regs->eax;
    3.48 +}
    3.49 +
    3.50  BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi)
    3.51  asmlinkage void smp_deferred_nmi(struct cpu_user_regs regs)
    3.52  {
     4.1 --- a/xen/include/public/xen.h	Wed Jan 11 15:51:56 2006 +0000
     4.2 +++ b/xen/include/public/xen.h	Wed Jan 11 15:52:12 2006 +0000
     4.3 @@ -53,7 +53,8 @@
     4.4  #define __HYPERVISOR_grant_table_op       20
     4.5  #define __HYPERVISOR_vm_assist            21
     4.6  #define __HYPERVISOR_update_va_mapping_otherdomain 22
     4.7 -#define __HYPERVISOR_switch_vm86          23 /* x86/32 only */
     4.8 +#define __HYPERVISOR_iret                 23 /* x86/32 only */
     4.9 +#define __HYPERVISOR_switch_vm86          23 /* x86/32 only (obsolete name) */
    4.10  #define __HYPERVISOR_switch_to_user       23 /* x86/64 only */
    4.11  #define __HYPERVISOR_vcpu_op              24
    4.12  #define __HYPERVISOR_set_segment_base     25 /* x86/64 only */