ia64/xen-unstable

changeset 10354:ddc25d4ebf60

[XEN] Replace direct common-code access of evtchn_upcall_mask
with local_event_delivery_* accessors.
Notes:
1. Still some (read-only, debug) use in keyhandler.c
2. Still accesses through current->vcpu_info.
Both above may need to be compiled only for architectures
that use event channels.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Jun 10 11:07:11 2006 +0100 (2006-06-10)
parents ea4829e30092
children be05097d5d69
files xen/common/event_channel.c xen/common/schedule.c xen/include/asm-ia64/event.h xen/include/asm-ia64/vmx_vcpu.h xen/include/asm-x86/event.h xen/include/xen/event.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/common/event_channel.c	Sat Jun 10 11:05:11 2006 +0100
     1.2 +++ b/xen/common/event_channel.c	Sat Jun 10 11:07:11 2006 +0100
     1.3 @@ -499,7 +499,7 @@ void evtchn_set_pending(struct vcpu *v, 
     1.4          evtchn_notify(v);
     1.5      }
     1.6      else if ( unlikely(test_bit(_VCPUF_blocked, &v->vcpu_flags) &&
     1.7 -                       v->vcpu_info->evtchn_upcall_mask) )
     1.8 +                       !local_event_delivery_is_enabled()) )
     1.9      {
    1.10          /*
    1.11           * Blocked and masked will usually mean that the VCPU executed 
     2.1 --- a/xen/common/schedule.c	Sat Jun 10 11:05:11 2006 +0100
     2.2 +++ b/xen/common/schedule.c	Sat Jun 10 11:07:11 2006 +0100
     2.3 @@ -199,11 +199,11 @@ static long do_block(void)
     2.4  {
     2.5      struct vcpu *v = current;
     2.6  
     2.7 -    v->vcpu_info->evtchn_upcall_mask = 0;
     2.8 +    local_event_delivery_enable();
     2.9      set_bit(_VCPUF_blocked, &v->vcpu_flags);
    2.10  
    2.11      /* Check for events /after/ blocking: avoids wakeup waiting race. */
    2.12 -    if ( event_pending(v) )
    2.13 +    if ( local_events_need_delivery() )
    2.14      {
    2.15          clear_bit(_VCPUF_blocked, &v->vcpu_flags);
    2.16      }
    2.17 @@ -230,8 +230,8 @@ static long do_poll(struct sched_poll *s
    2.18      if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
    2.19          return -EFAULT;
    2.20  
    2.21 -    /* Ensure that upcalls are disabled: tested by evtchn_set_pending(). */
    2.22 -    if ( !v->vcpu_info->evtchn_upcall_mask )
    2.23 +    /* Ensure that events are disabled: tested by evtchn_set_pending(). */
    2.24 +    if ( local_event_delivery_is_enabled() )
    2.25          return -EINVAL;
    2.26  
    2.27      set_bit(_VCPUF_blocked, &v->vcpu_flags);
    2.28 @@ -248,7 +248,7 @@ static long do_poll(struct sched_poll *s
    2.29              goto out;
    2.30  
    2.31          rc = 0;
    2.32 -        if ( evtchn_pending(v->domain, port) )
    2.33 +        if ( test_bit(port, v->domain->shared_info->evtchn_pending) )
    2.34              goto out;
    2.35      }
    2.36  
     3.1 --- a/xen/include/asm-ia64/event.h	Sat Jun 10 11:05:11 2006 +0100
     3.2 +++ b/xen/include/asm-ia64/event.h	Sat Jun 10 11:07:11 2006 +0100
     3.3 @@ -37,6 +37,26 @@ static inline void evtchn_notify(struct 
     3.4      (!!(v)->vcpu_info->evtchn_upcall_pending &  \
     3.5        !(v)->vcpu_info->evtchn_upcall_mask)
     3.6  
     3.7 +static inline int local_events_need_delivery(void)
     3.8 +{
     3.9 +    return event_pending(current);
    3.10 +}
    3.11 +
    3.12 +static inline int local_event_delivery_is_enabled(void)
    3.13 +{
    3.14 +    return !current->vcpu_info->evtchn_upcall_mask;
    3.15 +}
    3.16 +
    3.17 +static inline void local_event_delivery_disable(void)
    3.18 +{
    3.19 +    current->vcpu_info->evtchn_upcall_mask = 1;
    3.20 +}
    3.21 +
    3.22 +static inline void local_event_delivery_enable(void)
    3.23 +{
    3.24 +    current->vcpu_info->evtchn_upcall_mask = 1;
    3.25 +}
    3.26 +
    3.27  static inline int arch_virq_is_global(int virq)
    3.28  {
    3.29      int rc;
     4.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Sat Jun 10 11:05:11 2006 +0100
     4.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Sat Jun 10 11:07:11 2006 +0100
     4.3 @@ -488,6 +488,4 @@ vcpu_get_vhpt(VCPU *vcpu)
     4.4      return &vcpu->arch.vhpt;
     4.5  }
     4.6  
     4.7 -#define check_work_pending(v)	\
     4.8 -    (event_pending((v)) || ((v)->arch.irq_new_pending))
     4.9  #endif
     5.1 --- a/xen/include/asm-x86/event.h	Sat Jun 10 11:05:11 2006 +0100
     5.2 +++ b/xen/include/asm-x86/event.h	Sat Jun 10 11:07:11 2006 +0100
     5.3 @@ -26,10 +26,28 @@ static inline void evtchn_notify(struct 
     5.4          smp_send_event_check_cpu(v->processor);
     5.5  }
     5.6  
     5.7 -/* Note: Bitwise operations result in fast code with no branches. */
     5.8 -#define event_pending(v)                        \
     5.9 -    (!!(v)->vcpu_info->evtchn_upcall_pending &  \
    5.10 -      !(v)->vcpu_info->evtchn_upcall_mask)
    5.11 +static inline int local_events_need_delivery(void)
    5.12 +{
    5.13 +    struct vcpu *v = current;
    5.14 +    /* Note: Bitwise operations result in fast code with no branches. */
    5.15 +    return (!!v->vcpu_info->evtchn_upcall_pending &
    5.16 +             !v->vcpu_info->evtchn_upcall_mask);
    5.17 +}
    5.18 +
    5.19 +static inline int local_event_delivery_is_enabled(void)
    5.20 +{
    5.21 +    return !current->vcpu_info->evtchn_upcall_mask;
    5.22 +}
    5.23 +
    5.24 +static inline void local_event_delivery_disable(void)
    5.25 +{
    5.26 +    current->vcpu_info->evtchn_upcall_mask = 1;
    5.27 +}
    5.28 +
    5.29 +static inline void local_event_delivery_enable(void)
    5.30 +{
    5.31 +    current->vcpu_info->evtchn_upcall_mask = 0;
    5.32 +}
    5.33  
    5.34  /* No arch specific virq definition now. Default to global. */
    5.35  static inline int arch_virq_is_global(int virq)
     6.1 --- a/xen/include/xen/event.h	Sat Jun 10 11:05:11 2006 +0100
     6.2 +++ b/xen/include/xen/event.h	Sat Jun 10 11:07:11 2006 +0100
     6.3 @@ -38,9 +38,6 @@ extern void send_guest_global_virq(struc
     6.4   */
     6.5  extern void send_guest_pirq(struct domain *d, int pirq);
     6.6  
     6.7 -#define evtchn_pending(d, p)                    \
     6.8 -    (test_bit((p), &(d)->shared_info->evtchn_pending[0]))
     6.9 -
    6.10  /* Send a notification from a local event-channel port. */
    6.11  extern long evtchn_send(unsigned int lport);
    6.12  
     7.1 --- a/xen/include/xen/sched.h	Sat Jun 10 11:05:11 2006 +0100
     7.2 +++ b/xen/include/xen/sched.h	Sat Jun 10 11:07:11 2006 +0100
     7.3 @@ -318,7 +318,7 @@ unsigned long hypercall_create_continuat
     7.4  
     7.5  #define hypercall_preempt_check() (unlikely(    \
     7.6          softirq_pending(smp_processor_id()) |   \
     7.7 -        event_pending(current)                  \
     7.8 +        local_events_need_delivery()            \
     7.9      ))
    7.10  
    7.11  /* This domain_hash and domain_list are protected by the domlist_lock. */