ia64/xen-unstable

changeset 6341:5f3f9701ac11

Remove double invocation of vmx_intr_assist when VM_EXIT
causes a domain switch.

Signed-off-by: Eddie Dong <eddie.dong@intel.com>
Signed-off-by: Edwin Zhai <edwin.zhai@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Aug 23 09:26:11 2005 +0000 (2005-08-23)
parents 43d91cbb1bfb
children 36cf17b65423
files xen/arch/x86/vmx.c xen/arch/x86/vmx_io.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_64/entry.S xen/include/asm-x86/vmx.h
line diff
     1.1 --- a/xen/arch/x86/vmx.c	Mon Aug 22 23:07:37 2005 +0000
     1.2 +++ b/xen/arch/x86/vmx.c	Tue Aug 23 09:26:11 2005 +0000
     1.3 @@ -1712,9 +1712,6 @@ asmlinkage void vmx_vmexit_handler(struc
     1.4      default:
     1.5          __vmx_bug(&regs);       /* should not happen */
     1.6      }
     1.7 -
     1.8 -    vmx_intr_assist(v);
     1.9 -    return;
    1.10  }
    1.11  
    1.12  asmlinkage void load_cr2(void)
     2.1 --- a/xen/arch/x86/vmx_io.c	Mon Aug 22 23:07:37 2005 +0000
     2.2 +++ b/xen/arch/x86/vmx_io.c	Tue Aug 23 09:26:11 2005 +0000
     2.3 @@ -631,12 +631,14 @@ static inline int irq_masked(unsigned lo
     2.4      return ((eflags & X86_EFLAGS_IF) == 0);
     2.5  }
     2.6  
     2.7 -void vmx_intr_assist(struct vcpu *v) 
     2.8 +asmlinkage void vmx_intr_assist(void) 
     2.9  {
    2.10      int intr_type = 0;
    2.11 -    int highest_vector = find_highest_pending_irq(v, &intr_type);
    2.12 +    int highest_vector;
    2.13      unsigned long intr_fields, eflags, interruptibility, cpu_exec_control;
    2.14 +    struct vcpu *v = current;
    2.15  
    2.16 +    highest_vector = find_highest_pending_irq(v, &intr_type);
    2.17      __vmread(CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control);
    2.18  
    2.19      if (highest_vector == -1) {
    2.20 @@ -712,9 +714,6 @@ void vmx_do_resume(struct vcpu *d)
    2.21  
    2.22      /* We can't resume the guest if we're waiting on I/O */
    2.23      ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags));
    2.24 -
    2.25 -    /* We always check for interrupts before resuming guest */
    2.26 -    vmx_intr_assist(d);
    2.27  }
    2.28  
    2.29  #endif /* CONFIG_VMX */
     3.1 --- a/xen/arch/x86/x86_32/entry.S	Mon Aug 22 23:07:37 2005 +0000
     3.2 +++ b/xen/arch/x86/x86_32/entry.S	Tue Aug 23 09:26:11 2005 +0000
     3.3 @@ -140,6 +140,7 @@ 1:
     3.4          jnz 2f
     3.5  
     3.6  /* vmx_restore_all_guest */
     3.7 +        call vmx_intr_assist
     3.8          call load_cr2
     3.9          .endif
    3.10          VMX_RESTORE_ALL_NOSEGREGS
     4.1 --- a/xen/arch/x86/x86_64/entry.S	Mon Aug 22 23:07:37 2005 +0000
     4.2 +++ b/xen/arch/x86/x86_64/entry.S	Tue Aug 23 09:26:11 2005 +0000
     4.3 @@ -233,6 +233,7 @@ 1:
     4.4          jnz  2f 
     4.5  
     4.6  /* vmx_restore_all_guest */
     4.7 +        call vmx_intr_assist
     4.8          call load_cr2
     4.9          .endif
    4.10          /* 
     5.1 --- a/xen/include/asm-x86/vmx.h	Mon Aug 22 23:07:37 2005 +0000
     5.2 +++ b/xen/include/asm-x86/vmx.h	Tue Aug 23 09:26:11 2005 +0000
     5.3 @@ -31,7 +31,7 @@
     5.4  extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
     5.5  extern void vmx_asm_do_resume(void);
     5.6  extern void vmx_asm_do_launch(void);
     5.7 -extern void vmx_intr_assist(struct vcpu *d);
     5.8 +extern void vmx_intr_assist(void);
     5.9  
    5.10  extern void arch_vmx_do_launch(struct vcpu *);
    5.11  extern void arch_vmx_do_resume(struct vcpu *);
    5.12 @@ -355,7 +355,7 @@ static inline int __vmxon (u64 addr)
    5.13  }
    5.14  
    5.15  /* Make sure that xen intercepts any FP accesses from current */
    5.16 -static inline void vmx_stts()
    5.17 +static inline void vmx_stts(void)
    5.18  {
    5.19      unsigned long cr0;
    5.20