ia64/xen-unstable

changeset 18441:ae9b223a675d

More efficient implementation of SCHEDOP_poll when polling a single port.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Sep 04 14:38:26 2008 +0100 (2008-09-04)
parents 8d982c7a0d30
children 392b04ccaf3c
files xen/common/domain.c xen/common/event_channel.c xen/common/schedule.c xen/include/xen/sched.h
line diff
     1.1 --- a/xen/common/domain.c	Thu Sep 04 14:37:56 2008 +0100
     1.2 +++ b/xen/common/domain.c	Thu Sep 04 14:38:26 2008 +0100
     1.3 @@ -651,9 +651,11 @@ void vcpu_reset(struct vcpu *v)
     1.4  
     1.5      set_bit(_VPF_down, &v->pause_flags);
     1.6  
     1.7 +    clear_bit(v->vcpu_id, d->poll_mask);
     1.8 +    v->poll_evtchn = 0;
     1.9 +
    1.10      v->fpu_initialised = 0;
    1.11      v->fpu_dirtied     = 0;
    1.12 -    v->is_polling      = 0;
    1.13      v->is_initialised  = 0;
    1.14      v->nmi_pending     = 0;
    1.15      v->mce_pending     = 0;
     2.1 --- a/xen/common/event_channel.c	Thu Sep 04 14:37:56 2008 +0100
     2.2 +++ b/xen/common/event_channel.c	Thu Sep 04 14:38:26 2008 +0100
     2.3 @@ -545,6 +545,7 @@ out:
     2.4  static int evtchn_set_pending(struct vcpu *v, int port)
     2.5  {
     2.6      struct domain *d = v->domain;
     2.7 +    int vcpuid;
     2.8  
     2.9      /*
    2.10       * The following bit operations must happen in strict order.
    2.11 @@ -564,15 +565,19 @@ static int evtchn_set_pending(struct vcp
    2.12      }
    2.13      
    2.14      /* Check if some VCPU might be polling for this event. */
    2.15 -    if ( unlikely(d->is_polling) )
    2.16 +    if ( likely(bitmap_empty(d->poll_mask, MAX_VIRT_CPUS)) )
    2.17 +        return 0;
    2.18 +
    2.19 +    /* Wake any interested (or potentially interested) pollers. */
    2.20 +    for ( vcpuid = find_first_bit(d->poll_mask, MAX_VIRT_CPUS);
    2.21 +          vcpuid < MAX_VIRT_CPUS;
    2.22 +          vcpuid = find_next_bit(d->poll_mask, MAX_VIRT_CPUS, vcpuid+1) )
    2.23      {
    2.24 -        d->is_polling = 0;
    2.25 -        smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */
    2.26 -        for_each_vcpu ( d, v )
    2.27 +        v = d->vcpu[vcpuid];
    2.28 +        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
    2.29 +             test_and_clear_bit(vcpuid, d->poll_mask) )
    2.30          {
    2.31 -            if ( !v->is_polling )
    2.32 -                continue;
    2.33 -            v->is_polling = 0;
    2.34 +            v->poll_evtchn = 0;
    2.35              vcpu_unblock(v);
    2.36          }
    2.37      }
     3.1 --- a/xen/common/schedule.c	Thu Sep 04 14:37:56 2008 +0100
     3.2 +++ b/xen/common/schedule.c	Thu Sep 04 14:38:26 2008 +0100
     3.3 @@ -198,6 +198,27 @@ void vcpu_wake(struct vcpu *v)
     3.4      TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
     3.5  }
     3.6  
     3.7 +void vcpu_unblock(struct vcpu *v)
     3.8 +{
     3.9 +    if ( !test_and_clear_bit(_VPF_blocked, &v->pause_flags) )
    3.10 +        return;
    3.11 +
    3.12 +    /* Polling period ends when a VCPU is unblocked. */
    3.13 +    if ( unlikely(v->poll_evtchn != 0) )
    3.14 +    {
    3.15 +        v->poll_evtchn = 0;
    3.16 +        /*
    3.17 +         * We *must* re-clear _VPF_blocked to avoid racing other wakeups of
    3.18 +         * this VCPU (and it then going back to sleep on poll_mask).
    3.19 +         * Test-and-clear is idiomatic and ensures clear_bit not reordered.
    3.20 +         */
    3.21 +        if ( test_and_clear_bit(v->vcpu_id, v->domain->poll_mask) )
    3.22 +            clear_bit(_VPF_blocked, &v->pause_flags);
    3.23 +    }
    3.24 +
    3.25 +    vcpu_wake(v);
    3.26 +}
    3.27 +
    3.28  static void vcpu_migrate(struct vcpu *v)
    3.29  {
    3.30      unsigned long flags;
    3.31 @@ -337,7 +358,7 @@ static long do_poll(struct sched_poll *s
    3.32      struct vcpu   *v = current;
    3.33      struct domain *d = v->domain;
    3.34      evtchn_port_t  port;
    3.35 -    long           rc = 0;
    3.36 +    long           rc;
    3.37      unsigned int   i;
    3.38  
    3.39      /* Fairly arbitrary limit. */
    3.40 @@ -348,11 +369,24 @@ static long do_poll(struct sched_poll *s
    3.41          return -EFAULT;
    3.42  
    3.43      set_bit(_VPF_blocked, &v->pause_flags);
    3.44 -    v->is_polling = 1;
    3.45 -    d->is_polling = 1;
    3.46 +    v->poll_evtchn = -1;
    3.47 +    set_bit(v->vcpu_id, d->poll_mask);
    3.48 +
    3.49 +#ifndef CONFIG_X86 /* set_bit() implies mb() on x86 */
    3.50 +    /* Check for events /after/ setting flags: avoids wakeup waiting race. */
    3.51 +    smp_mb();
    3.52  
    3.53 -    /* Check for events /after/ setting flags: avoids wakeup waiting race. */
    3.54 -    smp_wmb();
    3.55 +    /*
    3.56 +     * Someone may have seen we are blocked but not that we are polling, or
    3.57 +     * vice versa. We are certainly being woken, so clean up and bail. Beyond
    3.58 +     * this point others can be guaranteed to clean up for us if they wake us.
    3.59 +     */
    3.60 +    rc = 0;
    3.61 +    if ( (v->poll_evtchn == 0) ||
    3.62 +         !test_bit(_VPF_blocked, &v->pause_flags) ||
    3.63 +         !test_bit(v->vcpu_id, d->poll_mask) )
    3.64 +        goto out;
    3.65 +#endif
    3.66  
    3.67      for ( i = 0; i < sched_poll->nr_ports; i++ )
    3.68      {
    3.69 @@ -369,6 +403,9 @@ static long do_poll(struct sched_poll *s
    3.70              goto out;
    3.71      }
    3.72  
    3.73 +    if ( sched_poll->nr_ports == 1 )
    3.74 +        v->poll_evtchn = port;
    3.75 +
    3.76      if ( sched_poll->timeout != 0 )
    3.77          set_timer(&v->poll_timer, sched_poll->timeout);
    3.78  
    3.79 @@ -378,7 +415,8 @@ static long do_poll(struct sched_poll *s
    3.80      return 0;
    3.81  
    3.82   out:
    3.83 -    v->is_polling = 0;
    3.84 +    v->poll_evtchn = 0;
    3.85 +    clear_bit(v->vcpu_id, d->poll_mask);
    3.86      clear_bit(_VPF_blocked, &v->pause_flags);
    3.87      return rc;
    3.88  }
    3.89 @@ -760,11 +798,8 @@ static void poll_timer_fn(void *data)
    3.90  {
    3.91      struct vcpu *v = data;
    3.92  
    3.93 -    if ( !v->is_polling )
    3.94 -        return;
    3.95 -
    3.96 -    v->is_polling = 0;
    3.97 -    vcpu_unblock(v);
    3.98 +    if ( test_and_clear_bit(v->vcpu_id, v->domain->poll_mask) )
    3.99 +        vcpu_unblock(v);
   3.100  }
   3.101  
   3.102  /* Initialise the data structures. */
     4.1 --- a/xen/include/xen/sched.h	Thu Sep 04 14:37:56 2008 +0100
     4.2 +++ b/xen/include/xen/sched.h	Thu Sep 04 14:38:26 2008 +0100
     4.3 @@ -106,8 +106,6 @@ struct vcpu
     4.4      bool_t           fpu_initialised;
     4.5      /* Has the FPU been used since it was last saved? */
     4.6      bool_t           fpu_dirtied;
     4.7 -    /* Is this VCPU polling any event channels (SCHEDOP_poll)? */
     4.8 -    bool_t           is_polling;
     4.9      /* Initialization completed for this VCPU? */
    4.10      bool_t           is_initialised;
    4.11      /* Currently running on a CPU? */
    4.12 @@ -134,6 +132,13 @@ struct vcpu
    4.13      /* VCPU affinity is temporarily locked from controller changes? */
    4.14      bool_t           affinity_locked;
    4.15  
    4.16 +    /*
    4.17 +     * > 0: a single port is being polled;
    4.18 +     * = 0: nothing is being polled (vcpu should be clear in d->poll_mask);
    4.19 +     * < 0: multiple ports may be being polled.
    4.20 +     */
    4.21 +    int              poll_evtchn;
    4.22 +
    4.23      unsigned long    pause_flags;
    4.24      atomic_t         pause_count;
    4.25  
    4.26 @@ -209,8 +214,6 @@ struct domain
    4.27      struct domain   *target;
    4.28      /* Is this guest being debugged by dom0? */
    4.29      bool_t           debugger_attached;
    4.30 -    /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
    4.31 -    bool_t           is_polling;
    4.32      /* Is this guest dying (i.e., a zombie)? */
    4.33      enum { DOMDYING_alive, DOMDYING_dying, DOMDYING_dead } is_dying;
    4.34      /* Domain is paused by controller software? */
    4.35 @@ -218,6 +221,9 @@ struct domain
    4.36      /* Domain's VCPUs are pinned 1:1 to physical CPUs? */
    4.37      bool_t           is_pinned;
    4.38  
    4.39 +    /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
    4.40 +    DECLARE_BITMAP(poll_mask, MAX_VIRT_CPUS);
    4.41 +
    4.42      /* Guest has shut down (inc. reason code)? */
    4.43      spinlock_t       shutdown_lock;
    4.44      bool_t           is_shutting_down; /* in process of shutting down? */
    4.45 @@ -507,6 +513,7 @@ static inline int vcpu_runnable(struct v
    4.46               atomic_read(&v->domain->pause_count));
    4.47  }
    4.48  
    4.49 +void vcpu_unblock(struct vcpu *v);
    4.50  void vcpu_pause(struct vcpu *v);
    4.51  void vcpu_pause_nosync(struct vcpu *v);
    4.52  void domain_pause(struct domain *d);
    4.53 @@ -523,12 +530,6 @@ void vcpu_unlock_affinity(struct vcpu *v
    4.54  
    4.55  void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
    4.56  
    4.57 -static inline void vcpu_unblock(struct vcpu *v)
    4.58 -{
    4.59 -    if ( test_and_clear_bit(_VPF_blocked, &v->pause_flags) )
    4.60 -        vcpu_wake(v);
    4.61 -}
    4.62 -
    4.63  #define IS_PRIV(_d) ((_d)->is_privileged)
    4.64  #define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target == (_t)))
    4.65