ia64/xen-unstable

changeset 14661:f830c5719e74

xen: Remove {nmi_pending,nmi_masked,paused} vcpu bitflags.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Mar 29 18:07:33 2007 +0100 (2007-03-29)
parents 3681f91a91e8
children 28e187c8221d
files xen/arch/x86/nmi.c xen/arch/x86/traps.c xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/asm-offsets.c xen/arch/x86/x86_64/compat/entry.S xen/arch/x86/x86_64/compat/traps.c xen/arch/x86/x86_64/entry.S xen/arch/x86/x86_64/traps.c xen/common/domain.c xen/common/kernel.c xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/x86/nmi.c	Thu Mar 29 16:28:34 2007 +0000
     1.2 +++ b/xen/arch/x86/nmi.c	Thu Mar 29 18:07:33 2007 +0100
     1.3 @@ -445,18 +445,18 @@ static void do_nmi_stats(unsigned char k
     1.4      int i;
     1.5      struct domain *d;
     1.6      struct vcpu *v;
     1.7 +
     1.8      printk("CPU\tNMI\n");
     1.9 -    for_each_cpu(i)
    1.10 +    for_each_cpu ( i )
    1.11          printk("%3d\t%3d\n", i, nmi_count(i));
    1.12  
    1.13 -    if ((d = dom0) == NULL)
    1.14 -        return;
    1.15 -    if ((v = d->vcpu[0]) == NULL)
    1.16 +    if ( ((d = dom0) == NULL) || ((v = d->vcpu[0]) == NULL) )
    1.17          return;
    1.18 -    if (v->vcpu_flags & (VCPUF_nmi_pending|VCPUF_nmi_masked))
    1.19 +
    1.20 +    if ( v->nmi_pending || v->nmi_masked )
    1.21          printk("dom0 vpu0: NMI %s%s\n",
    1.22 -               v->vcpu_flags & VCPUF_nmi_pending ? "pending " : "",
    1.23 -               v->vcpu_flags & VCPUF_nmi_masked ? "masked " : "");
    1.24 +               v->nmi_pending ? "pending " : "",
    1.25 +               v->nmi_masked  ? "masked " : "");
    1.26      else
    1.27          printk("dom0 vcpu0: NMI neither pending nor masked\n");
    1.28  }
     2.1 --- a/xen/arch/x86/traps.c	Thu Mar 29 16:28:34 2007 +0000
     2.2 +++ b/xen/arch/x86/traps.c	Thu Mar 29 18:07:33 2007 +0100
     2.3 @@ -1859,7 +1859,7 @@ static void nmi_dom0_report(unsigned int
     2.4  
     2.5      set_bit(reason_idx, nmi_reason(d));
     2.6  
     2.7 -    if ( !test_and_set_bit(_VCPUF_nmi_pending, &v->vcpu_flags) )
     2.8 +    if ( !xchg(&v->nmi_pending, 1) )
     2.9          raise_softirq(NMI_SOFTIRQ); /* not safe to wake up a vcpu here */
    2.10  }
    2.11  
     3.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Thu Mar 29 16:28:34 2007 +0000
     3.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Thu Mar 29 18:07:33 2007 +0100
     3.3 @@ -68,8 +68,8 @@ void __dummy__(void)
     3.4      OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
     3.5      OFFSET(VCPU_flags, struct vcpu, vcpu_flags);
     3.6      OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
     3.7 -    DEFINE(_VCPUF_nmi_pending, _VCPUF_nmi_pending);
     3.8 -    DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked);
     3.9 +    OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
    3.10 +    OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
    3.11      DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
    3.12      BLANK();
    3.13  
     4.1 --- a/xen/arch/x86/x86_32/entry.S	Thu Mar 29 16:28:34 2007 +0000
     4.2 +++ b/xen/arch/x86/x86_32/entry.S	Thu Mar 29 18:07:33 2007 +0100
     4.3 @@ -232,8 +232,8 @@ test_all_events:
     4.4          shl  $IRQSTAT_shift,%eax
     4.5          test %ecx,irq_stat(%eax,1)
     4.6          jnz  process_softirqs
     4.7 -        btr  $_VCPUF_nmi_pending,VCPU_flags(%ebx)
     4.8 -        jc   process_nmi
     4.9 +        testb $1,VCPU_nmi_pending(%ebx)
    4.10 +        jnz  process_nmi
    4.11  test_guest_events:
    4.12          movl VCPU_vcpu_info(%ebx),%eax
    4.13          testb $0xFF,VCPUINFO_upcall_mask(%eax)
    4.14 @@ -259,11 +259,13 @@ process_softirqs:
    4.15  
    4.16          ALIGN
    4.17  process_nmi:
    4.18 +        testb $1,VCPU_nmi_masked(%ebx)
    4.19 +        jnz  test_guest_events
    4.20 +        movb $0,VCPU_nmi_pending(%ebx)
    4.21          movl VCPU_nmi_addr(%ebx),%eax
    4.22          test %eax,%eax
    4.23 -        jz   test_all_events
    4.24 -        bts  $_VCPUF_nmi_masked,VCPU_flags(%ebx)
    4.25 -        jc   1f
    4.26 +        jz   test_guest_events
    4.27 +        movb $1,VCPU_nmi_masked(%ebx)
    4.28          sti
    4.29          leal VCPU_trap_bounce(%ebx),%edx
    4.30          movl %eax,TRAPBOUNCE_eip(%edx)
    4.31 @@ -271,8 +273,6 @@ process_nmi:
    4.32          movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
    4.33          call create_bounce_frame
    4.34          jmp  test_all_events
    4.35 -1:      bts  $_VCPUF_nmi_pending,VCPU_flags(%ebx)
    4.36 -        jmp  test_guest_events
    4.37  
    4.38  bad_hypercall:
    4.39          movl $-ENOSYS,UREGS_eax(%esp)
     5.1 --- a/xen/arch/x86/x86_32/traps.c	Thu Mar 29 16:28:34 2007 +0000
     5.2 +++ b/xen/arch/x86/x86_32/traps.c	Thu Mar 29 18:07:33 2007 +0100
     5.3 @@ -218,7 +218,7 @@ unsigned long do_iret(void)
     5.4      }
     5.5  
     5.6      /* No longer in NMI context. */
     5.7 -    clear_bit(_VCPUF_nmi_masked, &current->vcpu_flags);
     5.8 +    current->nmi_masked = 0;
     5.9  
    5.10      /* Restore upcall mask from supplied EFLAGS.IF. */
    5.11      current->vcpu_info->evtchn_upcall_mask = !(eflags & X86_EFLAGS_IF);
     6.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Thu Mar 29 16:28:34 2007 +0000
     6.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Thu Mar 29 18:07:33 2007 +0100
     6.3 @@ -77,8 +77,8 @@ void __dummy__(void)
     6.4      OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
     6.5      OFFSET(VCPU_flags, struct vcpu, vcpu_flags);
     6.6      OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
     6.7 -    DEFINE(_VCPUF_nmi_pending, _VCPUF_nmi_pending);
     6.8 -    DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked);
     6.9 +    OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
    6.10 +    OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
    6.11      DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
    6.12      DEFINE(_VGCF_syscall_disables_events,  _VGCF_syscall_disables_events);
    6.13      BLANK();
     7.1 --- a/xen/arch/x86/x86_64/compat/entry.S	Thu Mar 29 16:28:34 2007 +0000
     7.2 +++ b/xen/arch/x86/x86_64/compat/entry.S	Thu Mar 29 18:07:33 2007 +0100
     7.3 @@ -87,8 +87,8 @@ ENTRY(compat_test_all_events)
     7.4          leaq  irq_stat(%rip),%rcx
     7.5          testl $~0,(%rcx,%rax,1)
     7.6          jnz   compat_process_softirqs
     7.7 -        btrq  $_VCPUF_nmi_pending,VCPU_flags(%rbx)
     7.8 -        jc    compat_process_nmi
     7.9 +        testb $1,VCPU_nmi_pending(%rbx)
    7.10 +        jnz   compat_process_nmi
    7.11  compat_test_guest_events:
    7.12          movq  VCPU_vcpu_info(%rbx),%rax
    7.13          testb $0xFF,COMPAT_VCPUINFO_upcall_mask(%rax)
    7.14 @@ -116,11 +116,13 @@ compat_process_softirqs:
    7.15  	ALIGN
    7.16  /* %rbx: struct vcpu */
    7.17  compat_process_nmi:
    7.18 +        testb $1,VCPU_nmi_masked(%rbx)
    7.19 +        jnz   compat_test_guest_events
    7.20 +        movb  $0,VCPU_nmi_pending(%rbx)
    7.21          movl  VCPU_nmi_addr(%rbx),%eax
    7.22          testl %eax,%eax
    7.23 -        jz    compat_test_all_events
    7.24 -        btsq  $_VCPUF_nmi_masked,VCPU_flags(%rbx)
    7.25 -        jc    1f
    7.26 +        jz    compat_test_guest_events
    7.27 +        movb  $1,VCPU_nmi_masked(%rbx)
    7.28          sti
    7.29          leaq  VCPU_trap_bounce(%rbx),%rdx
    7.30          movl  %eax,TRAPBOUNCE_eip(%rdx)
    7.31 @@ -128,9 +130,6 @@ compat_process_nmi:
    7.32          movw  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
    7.33          call  compat_create_bounce_frame
    7.34          jmp   compat_test_all_events
    7.35 -1:
    7.36 -        btsq  $_VCPUF_nmi_pending,VCPU_flags(%rbx)
    7.37 -        jmp   compat_test_guest_events
    7.38  
    7.39  compat_bad_hypercall:
    7.40          movl $-ENOSYS,UREGS_rax(%rsp)
     8.1 --- a/xen/arch/x86/x86_64/compat/traps.c	Thu Mar 29 16:28:34 2007 +0000
     8.2 +++ b/xen/arch/x86/x86_64/compat/traps.c	Thu Mar 29 18:07:33 2007 +0100
     8.3 @@ -118,7 +118,7 @@ unsigned int compat_iret(void)
     8.4          regs->_esp += 16;
     8.5  
     8.6      /* No longer in NMI context. */
     8.7 -    clear_bit(_VCPUF_nmi_masked, &current->vcpu_flags);
     8.8 +    current->nmi_masked = 0;
     8.9  
    8.10      /* Restore upcall mask from supplied EFLAGS.IF. */
    8.11      vcpu_info(current, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
     9.1 --- a/xen/arch/x86/x86_64/entry.S	Thu Mar 29 16:28:34 2007 +0000
     9.2 +++ b/xen/arch/x86/x86_64/entry.S	Thu Mar 29 18:07:33 2007 +0100
     9.3 @@ -177,8 +177,8 @@ test_all_events:
     9.4          leaq  irq_stat(%rip),%rcx
     9.5          testl $~0,(%rcx,%rax,1)
     9.6          jnz   process_softirqs
     9.7 -        btr   $_VCPUF_nmi_pending,VCPU_flags(%rbx)
     9.8 -        jc    process_nmi
     9.9 +        testb $1,VCPU_nmi_pending(%rbx)
    9.10 +        jnz   process_nmi
    9.11  test_guest_events:
    9.12          movq  VCPU_vcpu_info(%rbx),%rax
    9.13          testb $0xFF,VCPUINFO_upcall_mask(%rax)
    9.14 @@ -204,19 +204,19 @@ process_softirqs:
    9.15          ALIGN
    9.16  /* %rbx: struct vcpu */
    9.17  process_nmi:
    9.18 +        testb $1,VCPU_nmi_masked(%rbx)
    9.19 +        jnz  test_guest_events
    9.20 +        movb $0,VCPU_nmi_pending(%rbx)
    9.21          movq VCPU_nmi_addr(%rbx),%rax
    9.22          test %rax,%rax
    9.23 -        jz   test_all_events
    9.24 -        bts  $_VCPUF_nmi_masked,VCPU_flags(%rbx)
    9.25 -        jc   1f
    9.26 +        jz   test_guest_events
    9.27 +        movb $1,VCPU_nmi_masked(%rbx)
    9.28          sti
    9.29          leaq VCPU_trap_bounce(%rbx),%rdx
    9.30          movq %rax,TRAPBOUNCE_eip(%rdx)
    9.31          movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
    9.32          call create_bounce_frame
    9.33          jmp  test_all_events
    9.34 -1:      bts  $_VCPUF_nmi_pending,VCPU_flags(%rbx)
    9.35 -        jmp  test_guest_events
    9.36  
    9.37  bad_hypercall:
    9.38          movq $-ENOSYS,UREGS_rax(%rsp)
    10.1 --- a/xen/arch/x86/x86_64/traps.c	Thu Mar 29 16:28:34 2007 +0000
    10.2 +++ b/xen/arch/x86/x86_64/traps.c	Thu Mar 29 18:07:33 2007 +0100
    10.3 @@ -231,7 +231,7 @@ unsigned long do_iret(void)
    10.4      }
    10.5  
    10.6      /* No longer in NMI context. */
    10.7 -    clear_bit(_VCPUF_nmi_masked, &current->vcpu_flags);
    10.8 +    current->nmi_masked = 0;
    10.9  
   10.10      /* Restore upcall mask from supplied EFLAGS.IF. */
   10.11      vcpu_info(current, evtchn_upcall_mask) = !(iret_saved.rflags & EF_IE);
    11.1 --- a/xen/common/domain.c	Thu Mar 29 16:28:34 2007 +0000
    11.2 +++ b/xen/common/domain.c	Thu Mar 29 18:07:33 2007 +0100
    11.3 @@ -95,7 +95,6 @@ struct vcpu *alloc_vcpu(
    11.4  
    11.5      v->domain = d;
    11.6      v->vcpu_id = vcpu_id;
    11.7 -    spin_lock_init(&v->pause_lock);
    11.8  
    11.9      v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
   11.10      v->runstate.state_entry_time = NOW();
   11.11 @@ -407,40 +406,23 @@ void domain_destroy(struct domain *d)
   11.12      call_rcu(&d->rcu, complete_domain_destroy);
   11.13  }
   11.14  
   11.15 -static void vcpu_pause_setup(struct vcpu *v)
   11.16 -{
   11.17 -    spin_lock(&v->pause_lock);
   11.18 -    if ( v->pause_count++ == 0 )
   11.19 -        set_bit(_VCPUF_paused, &v->vcpu_flags);
   11.20 -    spin_unlock(&v->pause_lock);
   11.21 -}
   11.22 -
   11.23  void vcpu_pause(struct vcpu *v)
   11.24  {
   11.25      ASSERT(v != current);
   11.26 -    vcpu_pause_setup(v);
   11.27 +    atomic_inc(&v->pause_count);
   11.28      vcpu_sleep_sync(v);
   11.29  }
   11.30  
   11.31  void vcpu_pause_nosync(struct vcpu *v)
   11.32  {
   11.33 -    vcpu_pause_setup(v);
   11.34 +    atomic_inc(&v->pause_count);
   11.35      vcpu_sleep_nosync(v);
   11.36  }
   11.37  
   11.38  void vcpu_unpause(struct vcpu *v)
   11.39  {
   11.40 -    int wake;
   11.41 -
   11.42      ASSERT(v != current);
   11.43 -
   11.44 -    spin_lock(&v->pause_lock);
   11.45 -    wake = (--v->pause_count == 0);
   11.46 -    if ( wake )
   11.47 -        clear_bit(_VCPUF_paused, &v->vcpu_flags);
   11.48 -    spin_unlock(&v->pause_lock);
   11.49 -
   11.50 -    if ( wake )
   11.51 +    if ( atomic_dec_and_test(&v->pause_count) )
   11.52          vcpu_wake(v);
   11.53  }
   11.54  
   11.55 @@ -507,9 +489,9 @@ int vcpu_reset(struct vcpu *v)
   11.56      v->fpu_dirtied     = 0;
   11.57      v->is_polling      = 0;
   11.58      v->is_initialised  = 0;
   11.59 +    v->nmi_pending     = 0;
   11.60 +    v->nmi_masked      = 0;
   11.61      clear_bit(_VCPUF_blocked, &v->vcpu_flags);
   11.62 -    clear_bit(_VCPUF_nmi_pending, &v->vcpu_flags);
   11.63 -    clear_bit(_VCPUF_nmi_masked, &v->vcpu_flags);
   11.64  
   11.65   out:
   11.66      UNLOCK_BIGLOCK(v->domain);
    12.1 --- a/xen/common/kernel.c	Thu Mar 29 16:28:34 2007 +0000
    12.2 +++ b/xen/common/kernel.c	Thu Mar 29 18:07:33 2007 +0100
    12.3 @@ -256,7 +256,7 @@ long register_guest_nmi_callback(unsigne
    12.4       * now.
    12.5       */
    12.6      if ( arch_get_nmi_reason(d) != 0 )
    12.7 -        set_bit(_VCPUF_nmi_pending, &v->vcpu_flags);
    12.8 +        v->nmi_pending = 1;
    12.9  #endif
   12.10  
   12.11      return 0;
    13.1 --- a/xen/include/xen/sched.h	Thu Mar 29 16:28:34 2007 +0000
    13.2 +++ b/xen/include/xen/sched.h	Thu Mar 29 18:07:33 2007 +0100
    13.3 @@ -110,11 +110,14 @@ struct vcpu
    13.4      bool_t           is_initialised;
    13.5      /* Currently running on a CPU? */
    13.6      bool_t           is_running;
    13.7 +    /* NMI callback pending for this VCPU? */
    13.8 +    bool_t           nmi_pending;
    13.9 +    /* Avoid NMI reentry by allowing NMIs to be masked for short periods. */
   13.10 +    bool_t           nmi_masked;
   13.11  
   13.12      unsigned long    vcpu_flags;
   13.13  
   13.14 -    spinlock_t       pause_lock;
   13.15 -    unsigned int     pause_count;
   13.16 +    atomic_t         pause_count;
   13.17  
   13.18      u16              virq_to_evtchn[NR_VIRQS];
   13.19  
   13.20 @@ -440,31 +443,18 @@ extern struct domain *domain_list;
   13.21   /* VCPU is offline. */
   13.22  #define _VCPUF_down            1
   13.23  #define VCPUF_down             (1UL<<_VCPUF_down)
   13.24 - /* NMI callback pending for this VCPU? */
   13.25 -#define _VCPUF_nmi_pending     2
   13.26 -#define VCPUF_nmi_pending      (1UL<<_VCPUF_nmi_pending)
   13.27 - /* Avoid NMI reentry by allowing NMIs to be masked for short periods. */
   13.28 -#define _VCPUF_nmi_masked      3
   13.29 -#define VCPUF_nmi_masked       (1UL<<_VCPUF_nmi_masked)
   13.30 - /* VCPU is paused by the hypervisor? */
   13.31 -#define _VCPUF_paused          4
   13.32 -#define VCPUF_paused           (1UL<<_VCPUF_paused)
   13.33   /* VCPU is blocked awaiting an event to be consumed by Xen. */
   13.34 -#define _VCPUF_blocked_in_xen  5
   13.35 +#define _VCPUF_blocked_in_xen  2
   13.36  #define VCPUF_blocked_in_xen   (1UL<<_VCPUF_blocked_in_xen)
   13.37   /* VCPU affinity has changed: migrating to a new CPU. */
   13.38 -#define _VCPUF_migrating       6
   13.39 +#define _VCPUF_migrating       3
   13.40  #define VCPUF_migrating        (1UL<<_VCPUF_migrating)
   13.41  
   13.42  static inline int vcpu_runnable(struct vcpu *v)
   13.43  {
   13.44 -    return ( !(v->vcpu_flags &
   13.45 -               ( VCPUF_blocked |
   13.46 -                 VCPUF_down |
   13.47 -                 VCPUF_paused |
   13.48 -                 VCPUF_blocked_in_xen |
   13.49 -                 VCPUF_migrating )) &&
   13.50 -             (atomic_read(&v->domain->pause_count) == 0) );
   13.51 +    return (!v->vcpu_flags &&
   13.52 +            !atomic_read(&v->pause_count) &&
   13.53 +            !atomic_read(&v->domain->pause_count));
   13.54  }
   13.55  
   13.56  void vcpu_pause(struct vcpu *v);