ia64/xen-unstable

changeset 18713:4941c5a14598

x86, hvm: Move return-to-guest timer and interrupt cranking logic
outside of IRQ-safe context. This allows us to safely take
non-IRQ-safe spinlocks.

The drawback is that {vmx,svm}_intr_assist() now races new event
notifications delivered by IRQ or IPI. We close down this race by
having vcpu_kick() send a dummy softirq -- this gets picked up in
IRQ-sage context and will cause retry of *_intr_assist(). We avoid
delivering the softirq where possible by avoiding it when we are
running in the non-IRQ context of the VCPU to be kicked.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Oct 23 11:40:59 2008 +0100 (2008-10-23)
parents 50fc79012db7
children 0358305c6883
files xen/arch/x86/domain.c xen/arch/x86/hvm/svm/entry.S xen/arch/x86/hvm/vmx/entry.S xen/include/asm-x86/event.h xen/include/asm-x86/softirq.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Thu Oct 23 11:20:44 2008 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Thu Oct 23 11:40:59 2008 +0100
     1.3 @@ -1892,6 +1892,54 @@ void domain_cpuid(
     1.4      *eax = *ebx = *ecx = *edx = 0;
     1.5  }
     1.6  
     1.7 +void vcpu_kick(struct vcpu *v)
     1.8 +{
     1.9 +    /*
    1.10 +     * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
    1.11 +     * pending flag. These values may fluctuate (after all, we hold no
    1.12 +     * locks) but the key insight is that each change will cause
    1.13 +     * evtchn_upcall_pending to be polled.
    1.14 +     * 
    1.15 +     * NB2. We save the running flag across the unblock to avoid a needless
    1.16 +     * IPI for domains that we IPI'd to unblock.
    1.17 +     */
    1.18 +    bool_t running = v->is_running;
    1.19 +    vcpu_unblock(v);
    1.20 +    if ( running && (in_irq() || (v != current)) )
    1.21 +        cpu_raise_softirq(v->processor, VCPU_KICK_SOFTIRQ);
    1.22 +}
    1.23 +
    1.24 +void vcpu_mark_events_pending(struct vcpu *v)
    1.25 +{
    1.26 +    int already_pending = test_and_set_bit(
    1.27 +        0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
    1.28 +
    1.29 +    if ( already_pending )
    1.30 +        return;
    1.31 +
    1.32 +    if ( is_hvm_vcpu(v) )
    1.33 +        hvm_assert_evtchn_irq(v);
    1.34 +    else
    1.35 +        vcpu_kick(v);
    1.36 +}
    1.37 +
    1.38 +static void vcpu_kick_softirq(void)
    1.39 +{
    1.40 +    /*
    1.41 +     * Nothing to do here: we merely prevent notifiers from racing with checks
    1.42 +     * executed on return to guest context with interrupts enabled. See, for
    1.43 +     * example, xxx_intr_assist() executed on return to HVM guest context.
    1.44 +     */
    1.45 +}
    1.46 +
    1.47 +static int __init init_vcpu_kick_softirq(void)
    1.48 +{
    1.49 +    open_softirq(VCPU_KICK_SOFTIRQ, vcpu_kick_softirq);
    1.50 +    return 0;
    1.51 +}
    1.52 +__initcall(init_vcpu_kick_softirq);
    1.53 +
    1.54 +
    1.55  /*
    1.56   * Local variables:
    1.57   * mode: C
     2.1 --- a/xen/arch/x86/hvm/svm/entry.S	Thu Oct 23 11:20:44 2008 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/entry.S	Thu Oct 23 11:40:59 2008 +0100
     2.3 @@ -57,6 +57,8 @@
     2.4  #endif
     2.5  
     2.6  ENTRY(svm_asm_do_resume)
     2.7 +        call svm_intr_assist
     2.8 +
     2.9          get_current(bx)
    2.10          CLGI
    2.11  
    2.12 @@ -67,7 +69,6 @@ ENTRY(svm_asm_do_resume)
    2.13          jnz  .Lsvm_process_softirqs
    2.14  
    2.15          call svm_asid_handle_vmrun
    2.16 -        call svm_intr_assist
    2.17  
    2.18          cmpb $0,addr_of(tb_init_done)
    2.19          jnz  .Lsvm_trace
     3.1 --- a/xen/arch/x86/hvm/vmx/entry.S	Thu Oct 23 11:20:44 2008 +0100
     3.2 +++ b/xen/arch/x86/hvm/vmx/entry.S	Thu Oct 23 11:40:59 2008 +0100
     3.3 @@ -122,6 +122,8 @@ vmx_asm_vmexit_handler:
     3.4  
     3.5  .globl vmx_asm_do_vmentry
     3.6  vmx_asm_do_vmentry:
     3.7 +        call vmx_intr_assist
     3.8 +
     3.9          get_current(bx)
    3.10          cli
    3.11  
    3.12 @@ -131,8 +133,6 @@ vmx_asm_do_vmentry:
    3.13          cmpl $0,(r(dx),r(ax),1)
    3.14          jnz  .Lvmx_process_softirqs
    3.15  
    3.16 -        call vmx_intr_assist
    3.17 -
    3.18          testb $0xff,VCPU_vmx_emul(r(bx))
    3.19          jnz  .Lvmx_goto_realmode
    3.20  
    3.21 @@ -179,11 +179,13 @@ vmx_asm_do_vmentry:
    3.22  
    3.23  /*.Lvmx_resume:*/
    3.24          VMRESUME
    3.25 +        sti
    3.26          call vm_resume_fail
    3.27          ud2
    3.28  
    3.29  .Lvmx_launch:
    3.30          VMLAUNCH
    3.31 +        sti
    3.32          call vm_launch_fail
    3.33          ud2
    3.34  
     4.1 --- a/xen/include/asm-x86/event.h	Thu Oct 23 11:20:44 2008 +0100
     4.2 +++ b/xen/include/asm-x86/event.h	Thu Oct 23 11:40:59 2008 +0100
     4.3 @@ -11,36 +11,8 @@
     4.4  
     4.5  #include <xen/shared.h>
     4.6  
     4.7 -static inline void vcpu_kick(struct vcpu *v)
     4.8 -{
     4.9 -    /*
    4.10 -     * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
    4.11 -     * pending flag. These values may fluctuate (after all, we hold no
    4.12 -     * locks) but the key insight is that each change will cause
    4.13 -     * evtchn_upcall_pending to be polled.
    4.14 -     * 
    4.15 -     * NB2. We save the running flag across the unblock to avoid a needless
    4.16 -     * IPI for domains that we IPI'd to unblock.
    4.17 -     */
    4.18 -    int running = v->is_running;
    4.19 -    vcpu_unblock(v);
    4.20 -    if ( running )
    4.21 -        smp_send_event_check_cpu(v->processor);
    4.22 -}
    4.23 -
    4.24 -static inline void vcpu_mark_events_pending(struct vcpu *v)
    4.25 -{
    4.26 -    int already_pending = test_and_set_bit(
    4.27 -        0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
    4.28 -
    4.29 -    if ( already_pending )
    4.30 -        return;
    4.31 -
    4.32 -    if ( is_hvm_vcpu(v) )
    4.33 -        hvm_assert_evtchn_irq(v);
    4.34 -    else
    4.35 -        vcpu_kick(v);
    4.36 -}
    4.37 +void vcpu_kick(struct vcpu *v);
    4.38 +void vcpu_mark_events_pending(struct vcpu *v);
    4.39  
    4.40  int hvm_local_events_need_delivery(struct vcpu *v);
    4.41  static inline int local_events_need_delivery(void)
     5.1 --- a/xen/include/asm-x86/softirq.h	Thu Oct 23 11:20:44 2008 +0100
     5.2 +++ b/xen/include/asm-x86/softirq.h	Thu Oct 23 11:40:59 2008 +0100
     5.3 @@ -3,7 +3,8 @@
     5.4  
     5.5  #define NMI_MCE_SOFTIRQ        (NR_COMMON_SOFTIRQS + 0)
     5.6  #define TIME_CALIBRATE_SOFTIRQ (NR_COMMON_SOFTIRQS + 1)
     5.7 +#define VCPU_KICK_SOFTIRQ      (NR_COMMON_SOFTIRQS + 2)
     5.8  
     5.9 -#define NR_ARCH_SOFTIRQS       2
    5.10 +#define NR_ARCH_SOFTIRQS       3
    5.11  
    5.12  #endif /* __ASM_SOFTIRQ_H__ */