ia64/xen-unstable

changeset 10311:5e3827f7a93a

[HVM][VMX] Interrupts must be kept disabled when entering Xen for
external interrupt processing. Remove code that immediately
reenabled interrupt delivery on VMEXIT.
Signed-off-by: Seteven Smith <sos22@cam.ac.uk>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jun 05 17:17:27 2006 +0100 (2006-06-05)
parents e3af1912794b
children 89d7acdd8951
files xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/x86_32/exits.S xen/arch/x86/hvm/vmx/x86_64/exits.S
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Mon Jun 05 17:03:19 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Mon Jun 05 17:17:27 2006 +0100
     1.3 @@ -1970,7 +1970,6 @@ static inline void vmx_vmexit_do_extint(
     1.4          __hvm_bug(regs);
     1.5  
     1.6      vector &= INTR_INFO_VECTOR_MASK;
     1.7 -    local_irq_disable();
     1.8      TRACE_VMEXIT(1,vector);
     1.9  
    1.10      switch(vector) {
    1.11 @@ -2065,30 +2064,33 @@ asmlinkage void vmx_vmexit_handler(struc
    1.12      struct vcpu *v = current;
    1.13      int error;
    1.14  
    1.15 -    if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
    1.16 -        __hvm_bug(&regs);
    1.17 +    error = __vmread(VM_EXIT_REASON, &exit_reason);
    1.18 +    BUG_ON(error);
    1.19  
    1.20      perfc_incra(vmexits, exit_reason);
    1.21  
    1.22 -    /* don't bother H/W interrutps */
    1.23 -    if (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
    1.24 -        exit_reason != EXIT_REASON_VMCALL &&
    1.25 -        exit_reason != EXIT_REASON_IO_INSTRUCTION) 
    1.26 +    if ( (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT) &&
    1.27 +         (exit_reason != EXIT_REASON_VMCALL) &&
    1.28 +         (exit_reason != EXIT_REASON_IO_INSTRUCTION) )
    1.29          HVM_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
    1.30  
    1.31 -    if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
    1.32 +    if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT )
    1.33 +        local_irq_enable();
    1.34 +
    1.35 +    if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
    1.36 +    {
    1.37          printk("Failed vm entry (reason 0x%x)\n", exit_reason);
    1.38          printk("*********** VMCS Area **************\n");
    1.39          vmcs_dump_vcpu();
    1.40          printk("**************************************\n");
    1.41          domain_crash_synchronous();
    1.42 -        return;
    1.43      }
    1.44  
    1.45      __vmread(GUEST_RIP, &eip);
    1.46      TRACE_VMEXIT(0,exit_reason);
    1.47  
    1.48 -    switch (exit_reason) {
    1.49 +    switch ( exit_reason )
    1.50 +    {
    1.51      case EXIT_REASON_EXCEPTION_NMI:
    1.52      {
    1.53          /*
     2.1 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S	Mon Jun 05 17:03:19 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S	Mon Jun 05 17:17:27 2006 +0100
     2.3 @@ -55,29 +55,26 @@
     2.4   * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
     2.5   */
     2.6  
     2.7 -#define HVM_MONITOR_EFLAGS	0x202 /* IF on */
     2.8  #define NR_SKIPPED_REGS	6	/* See the above explanation */
     2.9 -#define HVM_SAVE_ALL_NOSEGREGS \
    2.10 -        pushl $HVM_MONITOR_EFLAGS; \
    2.11 -        popf; \
    2.12 -        subl $(NR_SKIPPED_REGS*4), %esp; \
    2.13 +#define HVM_SAVE_ALL_NOSEGREGS                                              \
    2.14 +        subl $(NR_SKIPPED_REGS*4), %esp;                                    \
    2.15          movl $0, 0xc(%esp);  /* XXX why do we need to force eflags==0 ?? */ \
    2.16 -        pushl %eax; \
    2.17 -        pushl %ebp; \
    2.18 -        pushl %edi; \
    2.19 -        pushl %esi; \
    2.20 -        pushl %edx; \
    2.21 -        pushl %ecx; \
    2.22 +        pushl %eax;                                                         \
    2.23 +        pushl %ebp;                                                         \
    2.24 +        pushl %edi;                                                         \
    2.25 +        pushl %esi;                                                         \
    2.26 +        pushl %edx;                                                         \
    2.27 +        pushl %ecx;                                                         \
    2.28          pushl %ebx;
    2.29  
    2.30 -#define HVM_RESTORE_ALL_NOSEGREGS   \
    2.31 -        popl %ebx;  \
    2.32 -        popl %ecx;  \
    2.33 -        popl %edx;  \
    2.34 -        popl %esi;  \
    2.35 -        popl %edi;  \
    2.36 -        popl %ebp;  \
    2.37 -        popl %eax;  \
    2.38 +#define HVM_RESTORE_ALL_NOSEGREGS               \
    2.39 +        popl %ebx;                              \
    2.40 +        popl %ecx;                              \
    2.41 +        popl %edx;                              \
    2.42 +        popl %esi;                              \
    2.43 +        popl %edi;                              \
    2.44 +        popl %ebp;                              \
    2.45 +        popl %eax;                              \
    2.46          addl $(NR_SKIPPED_REGS*4), %esp
    2.47  
    2.48          ALIGN
     3.1 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S	Mon Jun 05 17:03:19 2006 +0100
     3.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S	Mon Jun 05 17:17:27 2006 +0100
     3.3 @@ -51,45 +51,42 @@
     3.4   * (2/1)  u32 entry_vector;
     3.5   * (1/1)  u32 error_code;
     3.6   */
     3.7 -#define HVM_MONITOR_RFLAGS	0x202 /* IF on */
     3.8  #define NR_SKIPPED_REGS	6	/* See the above explanation */
     3.9 -#define HVM_SAVE_ALL_NOSEGREGS \
    3.10 -        pushq $HVM_MONITOR_RFLAGS; \
    3.11 -        popfq; \
    3.12 -        subq $(NR_SKIPPED_REGS*8), %rsp; \
    3.13 -        pushq %rdi; \
    3.14 -        pushq %rsi; \
    3.15 -        pushq %rdx; \
    3.16 -        pushq %rcx; \
    3.17 -        pushq %rax; \
    3.18 -        pushq %r8;  \
    3.19 -        pushq %r9;  \
    3.20 -        pushq %r10; \
    3.21 -        pushq %r11; \
    3.22 -        pushq %rbx; \
    3.23 -        pushq %rbp; \
    3.24 -        pushq %r12; \
    3.25 -        pushq %r13; \
    3.26 -        pushq %r14; \
    3.27 -        pushq %r15; \
    3.28 +#define HVM_SAVE_ALL_NOSEGREGS                  \
    3.29 +        subq $(NR_SKIPPED_REGS*8), %rsp;        \
    3.30 +        pushq %rdi;                             \
    3.31 +        pushq %rsi;                             \
    3.32 +        pushq %rdx;                             \
    3.33 +        pushq %rcx;                             \
    3.34 +        pushq %rax;                             \
    3.35 +        pushq %r8;                              \
    3.36 +        pushq %r9;                              \
    3.37 +        pushq %r10;                             \
    3.38 +        pushq %r11;                             \
    3.39 +        pushq %rbx;                             \
    3.40 +        pushq %rbp;                             \
    3.41 +        pushq %r12;                             \
    3.42 +        pushq %r13;                             \
    3.43 +        pushq %r14;                             \
    3.44 +        pushq %r15;
    3.45  
    3.46 -#define HVM_RESTORE_ALL_NOSEGREGS \
    3.47 -        popq %r15; \
    3.48 -        popq %r14; \
    3.49 -        popq %r13; \
    3.50 -        popq %r12; \
    3.51 -        popq %rbp; \
    3.52 -        popq %rbx; \
    3.53 -        popq %r11; \
    3.54 -        popq %r10; \
    3.55 -        popq %r9;  \
    3.56 -        popq %r8;  \
    3.57 -        popq %rax; \
    3.58 -        popq %rcx; \
    3.59 -        popq %rdx; \
    3.60 -        popq %rsi; \
    3.61 -        popq %rdi; \
    3.62 -        addq $(NR_SKIPPED_REGS*8), %rsp; \
    3.63 +#define HVM_RESTORE_ALL_NOSEGREGS               \
    3.64 +        popq %r15;                              \
    3.65 +        popq %r14;                              \
    3.66 +        popq %r13;                              \
    3.67 +        popq %r12;                              \
    3.68 +        popq %rbp;                              \
    3.69 +        popq %rbx;                              \
    3.70 +        popq %r11;                              \
    3.71 +        popq %r10;                              \
    3.72 +        popq %r9;                               \
    3.73 +        popq %r8;                               \
    3.74 +        popq %rax;                              \
    3.75 +        popq %rcx;                              \
    3.76 +        popq %rdx;                              \
    3.77 +        popq %rsi;                              \
    3.78 +        popq %rdi;                              \
    3.79 +        addq $(NR_SKIPPED_REGS*8), %rsp;
    3.80  
    3.81  ENTRY(vmx_asm_vmexit_handler)
    3.82          /* selectors are restored/saved by VMX */