ia64/xen-unstable

changeset 5308:3c5a200df2c1

bitkeeper revision 1.1664.1.1 (42a08862ToP8uoeBgUzDwAQBMXo4wg)

Event-channel CPU affinity. Currently all event channels still bind to
VCPU#0 at start of day, and have their binding automatically changed
when bound to a VIRQ or IPI source. XenLinux maintains a per-cpu
evtchn mask denoting which event channels are bound to each cpu.
Todo: Allow guests to change binding of of non-ipi and non-virq evtchns.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jun 03 16:42:10 2005 +0000 (2005-06-03)
parents d68bc64e2cfe
children fa68b7fb217f
files linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c xen/arch/x86/irq.c xen/common/domain.c xen/common/event_channel.c xen/include/public/event_channel.h xen/include/xen/event.h xen/include/xen/sched.h
line diff
     1.1 --- a/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c	Fri Jun 03 14:10:37 2005 +0000
     1.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c	Fri Jun 03 16:42:10 2005 +0000
     1.3 @@ -74,6 +74,33 @@ static int irq_bindcount[NR_IRQS];
     1.4  /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
     1.5  static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
     1.6  
     1.7 +#ifdef CONFIG_SMP
     1.8 +
     1.9 +static u8  cpu_evtchn[NR_EVENT_CHANNELS];
    1.10 +static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
    1.11 +
    1.12 +#define active_evtchns(cpu,sh,idx)              \
    1.13 +    ((sh)->evtchn_pending[idx] &                \
    1.14 +     cpu_evtchn_mask[cpu][idx] &                \
    1.15 +     ~(sh)->evtchn_mask[idx])
    1.16 +
    1.17 +static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
    1.18 +{
    1.19 +    clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
    1.20 +    set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
    1.21 +    cpu_evtchn[chn] = cpu;
    1.22 +}
    1.23 +
    1.24 +#else
    1.25 +
    1.26 +#define active_evtchns(cpu,sh,idx)              \
    1.27 +    ((sh)->evtchn_pending[idx] &                \
    1.28 +     ~(sh)->evtchn_mask[idx])
    1.29 +
    1.30 +#define bind_evtchn_to_cpu(chn,cpu) ((void)0)
    1.31 +
    1.32 +#endif
    1.33 +
    1.34  /* Upcall to generic IRQ layer. */
    1.35  #ifdef CONFIG_X86
    1.36  #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
    1.37 @@ -109,9 +136,9 @@ asmlinkage void evtchn_do_upcall(struct 
    1.38  {
    1.39      u32 	   l1, l2;
    1.40      unsigned int   l1i, l2i, port;
    1.41 -    int            irq;
    1.42 +    int            irq, cpu = smp_processor_id();
    1.43      shared_info_t *s = HYPERVISOR_shared_info;
    1.44 -    vcpu_info_t   *vcpu_info = &s->vcpu_data[smp_processor_id()];
    1.45 +    vcpu_info_t   *vcpu_info = &s->vcpu_data[cpu];
    1.46  
    1.47      vcpu_info->evtchn_upcall_pending = 0;
    1.48      
    1.49 @@ -122,7 +149,7 @@ asmlinkage void evtchn_do_upcall(struct 
    1.50          l1i = __ffs(l1);
    1.51          l1 &= ~(1 << l1i);
    1.52          
    1.53 -        while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
    1.54 +        while ( (l2 = active_evtchns(cpu, s, l1i)) != 0 )
    1.55          {
    1.56              l2i = __ffs(l2);
    1.57              l2 &= ~(1 << l2i);
    1.58 @@ -171,6 +198,8 @@ int bind_virq_to_irq(int virq)
    1.59          irq_to_evtchn[irq]    = evtchn;
    1.60  
    1.61          per_cpu(virq_to_irq, cpu)[virq] = irq;
    1.62 +
    1.63 +        bind_evtchn_to_cpu(evtchn, cpu);
    1.64      }
    1.65  
    1.66      irq_bindcount[irq]++;
    1.67 @@ -225,8 +254,13 @@ int bind_ipi_on_cpu_to_irq(int cpu, int 
    1.68          irq_to_evtchn[irq]    = evtchn;
    1.69  
    1.70          per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
    1.71 -    } else
    1.72 +
    1.73 +        bind_evtchn_to_cpu(evtchn, cpu);
    1.74 +    } 
    1.75 +    else
    1.76 +    {
    1.77  	irq = evtchn_to_irq[evtchn];
    1.78 +    }
    1.79  
    1.80      irq_bindcount[irq]++;
    1.81  
    1.82 @@ -546,6 +580,11 @@ void __init init_IRQ(void)
    1.83  
    1.84      spin_lock_init(&irq_mapping_update_lock);
    1.85  
    1.86 +#ifdef CONFIG_SMP
    1.87 +    /* By default all event channels notify CPU#0. */
    1.88 +    memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
    1.89 +#endif
    1.90 +
    1.91      for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
    1.92  	/* No VIRQ -> IRQ mappings. */
    1.93  	for ( i = 0; i < NR_VIRQS; i++ )
     2.1 --- a/xen/arch/x86/irq.c	Fri Jun 03 14:10:37 2005 +0000
     2.2 +++ b/xen/arch/x86/irq.c	Fri Jun 03 16:42:10 2005 +0000
     2.3 @@ -184,22 +184,22 @@ typedef struct {
     2.4      u8 nr_guests;
     2.5      u8 in_flight;
     2.6      u8 shareable;
     2.7 -    struct vcpu *guest[IRQ_MAX_GUESTS];
     2.8 +    struct domain *guest[IRQ_MAX_GUESTS];
     2.9  } irq_guest_action_t;
    2.10  
    2.11  static void __do_IRQ_guest(int irq)
    2.12  {
    2.13      irq_desc_t         *desc = &irq_desc[irq];
    2.14      irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
    2.15 -    struct vcpu        *v;
    2.16 +    struct domain      *d;
    2.17      int                 i;
    2.18  
    2.19      for ( i = 0; i < action->nr_guests; i++ )
    2.20      {
    2.21 -        v = action->guest[i];
    2.22 -        if ( !test_and_set_bit(irq, &v->domain->pirq_mask) )
    2.23 +        d = action->guest[i];
    2.24 +        if ( !test_and_set_bit(irq, &d->pirq_mask) )
    2.25              action->in_flight++;
    2.26 -        send_guest_pirq(v, irq);
    2.27 +        send_guest_pirq(d, irq);
    2.28      }
    2.29  }
    2.30  
    2.31 @@ -294,7 +294,7 @@ int pirq_guest_bind(struct vcpu *v, int 
    2.32          goto out;
    2.33      }
    2.34  
    2.35 -    action->guest[action->nr_guests++] = v;
    2.36 +    action->guest[action->nr_guests++] = v->domain;
    2.37  
    2.38   out:
    2.39      spin_unlock_irqrestore(&desc->lock, flags);
    2.40 @@ -328,7 +328,7 @@ int pirq_guest_unbind(struct domain *d, 
    2.41      else
    2.42      {
    2.43          i = 0;
    2.44 -        while ( action->guest[i] && action->guest[i]->domain != d )
    2.45 +        while ( action->guest[i] && (action->guest[i] != d) )
    2.46              i++;
    2.47          memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
    2.48          action->nr_guests--;
     3.1 --- a/xen/common/domain.c	Fri Jun 03 14:10:37 2005 +0000
     3.2 +++ b/xen/common/domain.c	Fri Jun 03 16:42:10 2005 +0000
     3.3 @@ -54,9 +54,9 @@ struct domain *do_createdomain(domid_t d
     3.4          set_bit(_DOMF_idle_domain, &d->domain_flags);
     3.5  
     3.6      if ( !is_idle_task(d) &&
     3.7 -         ((init_event_channels(d) != 0) || (grant_table_create(d) != 0)) )
     3.8 +         ((evtchn_init(d) != 0) || (grant_table_create(d) != 0)) )
     3.9      {
    3.10 -        destroy_event_channels(d);
    3.11 +        evtchn_destroy(d);
    3.12          free_domain_struct(d);
    3.13          return NULL;
    3.14      }
    3.15 @@ -251,7 +251,7 @@ void domain_destruct(struct domain *d)
    3.16      *pd = d->next_in_hashbucket;
    3.17      write_unlock(&domlist_lock);
    3.18  
    3.19 -    destroy_event_channels(d);
    3.20 +    evtchn_destroy(d);
    3.21      grant_table_destroy(d);
    3.22  
    3.23      free_perdomain_pt(d);
     4.1 --- a/xen/common/event_channel.c	Fri Jun 03 14:10:37 2005 +0000
     4.2 +++ b/xen/common/event_channel.c	Fri Jun 03 16:42:10 2005 +0000
     4.3 @@ -27,50 +27,31 @@
     4.4  #include <public/xen.h>
     4.5  #include <public/event_channel.h>
     4.6  
     4.7 -#define INIT_EVENT_CHANNELS   16
     4.8 -#define MAX_EVENT_CHANNELS  1024
     4.9 -#define EVENT_CHANNELS_SPREAD 32
    4.10 +#define bucket_from_port(d,p) \
    4.11 +    ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
    4.12 +#define port_is_valid(d,p)    \
    4.13 +    (((p) >= 0) && ((p) < MAX_EVTCHNS) && \
    4.14 +     (bucket_from_port(d,p) != NULL))
    4.15 +#define evtchn_from_port(d,p) \
    4.16 +    (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
    4.17  
    4.18 -
    4.19 -static int get_free_port(struct vcpu *v)
    4.20 +static int get_free_port(struct domain *d)
    4.21  {
    4.22 -    struct domain *d = v->domain;
    4.23 -    int max, port;
    4.24 -    event_channel_t *chn;
    4.25 -
    4.26 -    max = d->max_event_channel;
    4.27 -    chn = d->event_channel;
    4.28 -
    4.29 -    for ( port = v->vcpu_id * EVENT_CHANNELS_SPREAD; port < max; port++ )
    4.30 -        if ( chn[port].state == ECS_FREE )
    4.31 -            break;
    4.32 +    struct evtchn *chn;
    4.33 +    int            port;
    4.34  
    4.35 -    if ( port >= max )
    4.36 -    {
    4.37 -        if ( max == MAX_EVENT_CHANNELS )
    4.38 -            return -ENOSPC;
    4.39 +    for ( port = 0; port_is_valid(d, port); port++ )
    4.40 +        if ( evtchn_from_port(d, port)->state == ECS_FREE )
    4.41 +            return port;
    4.42  
    4.43 -        if ( port == 0 )
    4.44 -            max = INIT_EVENT_CHANNELS;
    4.45 -        else
    4.46 -            max = port + EVENT_CHANNELS_SPREAD;
    4.47 -        
    4.48 -        chn = xmalloc_array(event_channel_t, max);
    4.49 -        if ( unlikely(chn == NULL) )
    4.50 -            return -ENOMEM;
    4.51 -
    4.52 -        memset(chn, 0, max * sizeof(event_channel_t));
    4.53 +    if ( port == MAX_EVTCHNS )
    4.54 +        return -ENOSPC;
    4.55  
    4.56 -        if ( d->event_channel != NULL )
    4.57 -        {
    4.58 -            memcpy(chn, d->event_channel, d->max_event_channel *
    4.59 -                   sizeof(event_channel_t));
    4.60 -            xfree(d->event_channel);
    4.61 -        }
    4.62 -
    4.63 -        d->event_channel     = chn;
    4.64 -        d->max_event_channel = max;
    4.65 -    }
    4.66 +    chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
    4.67 +    if ( unlikely(chn == NULL) )
    4.68 +        return -ENOMEM;
    4.69 +    memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
    4.70 +    bucket_from_port(d, port) = chn;
    4.71  
    4.72      return port;
    4.73  }
    4.74 @@ -78,18 +59,20 @@ static int get_free_port(struct vcpu *v)
    4.75  
    4.76  static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
    4.77  {
    4.78 +    struct evtchn *chn;
    4.79      struct domain *d = current->domain;
    4.80      int            port;
    4.81  
    4.82 -    spin_lock(&d->event_channel_lock);
    4.83 +    spin_lock(&d->evtchn_lock);
    4.84  
    4.85 -    if ( (port = get_free_port(current)) >= 0 )
    4.86 +    if ( (port = get_free_port(d)) >= 0 )
    4.87      {
    4.88 -        d->event_channel[port].state = ECS_UNBOUND;
    4.89 -        d->event_channel[port].u.unbound.remote_domid = alloc->dom;
    4.90 +        chn = evtchn_from_port(d, port);
    4.91 +        chn->state = ECS_UNBOUND;
    4.92 +        chn->u.unbound.remote_domid = alloc->dom;
    4.93      }
    4.94  
    4.95 -    spin_unlock(&d->event_channel_lock);
    4.96 +    spin_unlock(&d->evtchn_lock);
    4.97  
    4.98      if ( port < 0 )
    4.99          return port;
   4.100 @@ -102,8 +85,8 @@ static long evtchn_alloc_unbound(evtchn_
   4.101  static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
   4.102  {
   4.103  #define ERROR_EXIT(_errno) do { rc = (_errno); goto out; } while ( 0 )
   4.104 +    struct evtchn *chn1, *chn2;
   4.105      struct domain *d1, *d2;
   4.106 -    struct vcpu   *v1, *v2;
   4.107      int            port1 = bind->port1, port2 = bind->port2;
   4.108      domid_t        dom1 = bind->dom1, dom2 = bind->dom2;
   4.109      long           rc = 0;
   4.110 @@ -111,9 +94,6 @@ static long evtchn_bind_interdomain(evtc
   4.111      if ( !IS_PRIV(current->domain) && (dom1 != DOMID_SELF) )
   4.112          return -EPERM;
   4.113  
   4.114 -    if ( (port1 < 0) || (port2 < 0) )
   4.115 -        return -EINVAL;
   4.116 -
   4.117      if ( dom1 == DOMID_SELF )
   4.118          dom1 = current->domain->domain_id;
   4.119      if ( dom2 == DOMID_SELF )
   4.120 @@ -127,63 +107,61 @@ static long evtchn_bind_interdomain(evtc
   4.121          return -ESRCH;
   4.122      }
   4.123  
   4.124 -    v1 = d1->vcpu[0];   /* XXX */
   4.125 -    v2 = d2->vcpu[0];   /* XXX */
   4.126 -
   4.127      /* Avoid deadlock by first acquiring lock of domain with smaller id. */
   4.128      if ( d1 < d2 )
   4.129      {
   4.130 -        spin_lock(&d1->event_channel_lock);
   4.131 -        spin_lock(&d2->event_channel_lock);
   4.132 +        spin_lock(&d1->evtchn_lock);
   4.133 +        spin_lock(&d2->evtchn_lock);
   4.134      }
   4.135      else
   4.136      {
   4.137          if ( d1 != d2 )
   4.138 -            spin_lock(&d2->event_channel_lock);
   4.139 -        spin_lock(&d1->event_channel_lock);
   4.140 +            spin_lock(&d2->evtchn_lock);
   4.141 +        spin_lock(&d1->evtchn_lock);
   4.142      }
   4.143  
   4.144      /* Obtain, or ensure that we already have, a valid <port1>. */
   4.145      if ( port1 == 0 )
   4.146      {
   4.147 -        if ( (port1 = get_free_port(v1)) < 0 )
   4.148 +        if ( (port1 = get_free_port(d1)) < 0 )
   4.149              ERROR_EXIT(port1);
   4.150      }
   4.151 -    else if ( port1 >= d1->max_event_channel )
   4.152 +    else if ( !port_is_valid(d1, port1) )
   4.153          ERROR_EXIT(-EINVAL);
   4.154 +    chn1 = evtchn_from_port(d1, port1);
   4.155  
   4.156      /* Obtain, or ensure that we already have, a valid <port2>. */
   4.157      if ( port2 == 0 )
   4.158      {
   4.159          /* Make port1 non-free while we allocate port2 (in case dom1==dom2). */
   4.160 -        u16 tmp = d1->event_channel[port1].state;
   4.161 -        d1->event_channel[port1].state = ECS_INTERDOMAIN;
   4.162 -        port2 = get_free_port(v2);
   4.163 -        d1->event_channel[port1].state = tmp;
   4.164 +        u16 state = chn1->state;
   4.165 +        chn1->state = ECS_INTERDOMAIN;
   4.166 +        port2 = get_free_port(d2);
   4.167 +        chn1->state = state;
   4.168          if ( port2 < 0 )
   4.169              ERROR_EXIT(port2);
   4.170      }
   4.171 -    else if ( port2 >= d2->max_event_channel )
   4.172 +    else if ( !port_is_valid(d2, port2) )
   4.173          ERROR_EXIT(-EINVAL);
   4.174 +    chn2 = evtchn_from_port(d2, port2);
   4.175  
   4.176      /* Validate <dom1,port1>'s current state. */
   4.177 -    switch ( d1->event_channel[port1].state )
   4.178 +    switch ( chn1->state )
   4.179      {
   4.180      case ECS_FREE:
   4.181          break;
   4.182  
   4.183      case ECS_UNBOUND:
   4.184 -        if ( d1->event_channel[port1].u.unbound.remote_domid != dom2 )
   4.185 +        if ( chn1->u.unbound.remote_domid != dom2 )
   4.186              ERROR_EXIT(-EINVAL);
   4.187          break;
   4.188  
   4.189      case ECS_INTERDOMAIN:
   4.190 -        if ( d1->event_channel[port1].u.interdomain.remote_dom != v2 )
   4.191 +        if ( chn1->u.interdomain.remote_dom != d2 )
   4.192              ERROR_EXIT(-EINVAL);
   4.193 -        if ( (d1->event_channel[port1].u.interdomain.remote_port != port2) &&
   4.194 -             (bind->port2 != 0) )
   4.195 +        if ( (chn1->u.interdomain.remote_port != port2) && (bind->port2 != 0) )
   4.196              ERROR_EXIT(-EINVAL);
   4.197 -        port2 = d1->event_channel[port1].u.interdomain.remote_port;
   4.198 +        port2 = chn1->u.interdomain.remote_port;
   4.199          goto out;
   4.200  
   4.201      default:
   4.202 @@ -191,7 +169,7 @@ static long evtchn_bind_interdomain(evtc
   4.203      }
   4.204  
   4.205      /* Validate <dom2,port2>'s current state. */
   4.206 -    switch ( d2->event_channel[port2].state )
   4.207 +    switch ( chn2->state )
   4.208      {
   4.209      case ECS_FREE:
   4.210          if ( !IS_PRIV(current->domain) && (dom2 != DOMID_SELF) )
   4.211 @@ -199,17 +177,16 @@ static long evtchn_bind_interdomain(evtc
   4.212          break;
   4.213  
   4.214      case ECS_UNBOUND:
   4.215 -        if ( d2->event_channel[port2].u.unbound.remote_domid != dom1 )
   4.216 +        if ( chn2->u.unbound.remote_domid != dom1 )
   4.217              ERROR_EXIT(-EINVAL);
   4.218          break;
   4.219  
   4.220      case ECS_INTERDOMAIN:
   4.221 -        if ( d2->event_channel[port2].u.interdomain.remote_dom != v1 )
   4.222 +        if ( chn2->u.interdomain.remote_dom != d1 )
   4.223              ERROR_EXIT(-EINVAL);
   4.224 -        if ( (d2->event_channel[port2].u.interdomain.remote_port != port1) &&
   4.225 -             (bind->port1 != 0) )
   4.226 +        if ( (chn2->u.interdomain.remote_port != port1) && (bind->port1 != 0) )
   4.227              ERROR_EXIT(-EINVAL);
   4.228 -        port1 = d2->event_channel[port2].u.interdomain.remote_port;
   4.229 +        port1 = chn2->u.interdomain.remote_port;
   4.230          goto out;
   4.231  
   4.232      default:
   4.233 @@ -220,18 +197,18 @@ static long evtchn_bind_interdomain(evtc
   4.234       * Everything checked out okay -- bind <dom1,port1> to <dom2,port2>.
   4.235       */
   4.236  
   4.237 -    d1->event_channel[port1].u.interdomain.remote_dom  = v2;
   4.238 -    d1->event_channel[port1].u.interdomain.remote_port = (u16)port2;
   4.239 -    d1->event_channel[port1].state                     = ECS_INTERDOMAIN;
   4.240 +    chn1->u.interdomain.remote_dom  = d2;
   4.241 +    chn1->u.interdomain.remote_port = (u16)port2;
   4.242 +    chn1->state                     = ECS_INTERDOMAIN;
   4.243      
   4.244 -    d2->event_channel[port2].u.interdomain.remote_dom  = v1;
   4.245 -    d2->event_channel[port2].u.interdomain.remote_port = (u16)port1;
   4.246 -    d2->event_channel[port2].state                     = ECS_INTERDOMAIN;
   4.247 +    chn2->u.interdomain.remote_dom  = d1;
   4.248 +    chn2->u.interdomain.remote_port = (u16)port1;
   4.249 +    chn2->state                     = ECS_INTERDOMAIN;
   4.250  
   4.251   out:
   4.252 -    spin_unlock(&d1->event_channel_lock);
   4.253 +    spin_unlock(&d1->evtchn_lock);
   4.254      if ( d1 != d2 )
   4.255 -        spin_unlock(&d2->event_channel_lock);
   4.256 +        spin_unlock(&d2->evtchn_lock);
   4.257      
   4.258      put_domain(d1);
   4.259      put_domain(d2);
   4.260 @@ -246,6 +223,7 @@ static long evtchn_bind_interdomain(evtc
   4.261  
   4.262  static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
   4.263  {
   4.264 +    struct evtchn *chn;
   4.265      struct vcpu   *v = current;
   4.266      struct domain *d = v->domain;
   4.267      int            port, virq = bind->virq;
   4.268 @@ -253,23 +231,25 @@ static long evtchn_bind_virq(evtchn_bind
   4.269      if ( virq >= ARRAY_SIZE(v->virq_to_evtchn) )
   4.270          return -EINVAL;
   4.271  
   4.272 -    spin_lock(&d->event_channel_lock);
   4.273 +    spin_lock(&d->evtchn_lock);
   4.274  
   4.275      /*
   4.276       * Port 0 is the fallback port for VIRQs that haven't been explicitly
   4.277       * bound yet.
   4.278       */
   4.279      if ( ((port = v->virq_to_evtchn[virq]) != 0) ||
   4.280 -         ((port = get_free_port(v)) < 0) )
   4.281 +         ((port = get_free_port(d)) < 0) )
   4.282          goto out;
   4.283  
   4.284 -    d->event_channel[port].state  = ECS_VIRQ;
   4.285 -    d->event_channel[port].u.virq = virq;
   4.286 +    chn = evtchn_from_port(d, port);
   4.287 +    chn->state          = ECS_VIRQ;
   4.288 +    chn->notify_vcpu_id = v->vcpu_id;
   4.289 +    chn->u.virq         = virq;
   4.290  
   4.291      v->virq_to_evtchn[virq] = port;
   4.292  
   4.293   out:
   4.294 -    spin_unlock(&d->event_channel_lock);
   4.295 +    spin_unlock(&d->evtchn_lock);
   4.296  
   4.297      if ( port < 0 )
   4.298          return port;
   4.299 @@ -278,24 +258,26 @@ static long evtchn_bind_virq(evtchn_bind
   4.300      return 0;
   4.301  }
   4.302  
   4.303 +
   4.304  static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
   4.305  {
   4.306 -    struct vcpu   *v = current;
   4.307 -    struct domain *d = v->domain;
   4.308 +    struct evtchn *chn;
   4.309 +    struct domain *d = current->domain;
   4.310      int            port, ipi_vcpu = bind->ipi_vcpu;
   4.311  
   4.312 -    if ( ipi_vcpu >= MAX_VIRT_CPUS )
   4.313 +    if ( (ipi_vcpu >= MAX_VIRT_CPUS) || (d->vcpu[ipi_vcpu] == NULL) )
   4.314          return -EINVAL;
   4.315  
   4.316 -    spin_lock(&d->event_channel_lock);
   4.317 +    spin_lock(&d->evtchn_lock);
   4.318  
   4.319 -    if ( (port = get_free_port(v)) >= 0 )
   4.320 +    if ( (port = get_free_port(d)) >= 0 )
   4.321      {
   4.322 -        d->event_channel[port].state      = ECS_IPI;
   4.323 -        d->event_channel[port].u.ipi_vcpu = ipi_vcpu;
   4.324 +        chn = evtchn_from_port(d, port);
   4.325 +        chn->state          = ECS_IPI;
   4.326 +        chn->notify_vcpu_id = ipi_vcpu;
   4.327      }
   4.328  
   4.329 -    spin_unlock(&d->event_channel_lock);
   4.330 +    spin_unlock(&d->evtchn_lock);
   4.331  
   4.332      if ( port < 0 )
   4.333          return port;
   4.334 @@ -307,20 +289,23 @@ static long evtchn_bind_ipi(evtchn_bind_
   4.335  
   4.336  static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
   4.337  {
   4.338 +    struct evtchn *chn;
   4.339      struct domain *d = current->domain;
   4.340      int            port, rc, pirq = bind->pirq;
   4.341  
   4.342      if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
   4.343          return -EINVAL;
   4.344  
   4.345 -    spin_lock(&d->event_channel_lock);
   4.346 +    spin_lock(&d->evtchn_lock);
   4.347  
   4.348      if ( ((rc = port = d->pirq_to_evtchn[pirq]) != 0) ||
   4.349 -         ((rc = port = get_free_port(current)) < 0) )
   4.350 +         ((rc = port = get_free_port(d)) < 0) )
   4.351          goto out;
   4.352  
   4.353 +    chn = evtchn_from_port(d, port);
   4.354 +
   4.355      d->pirq_to_evtchn[pirq] = port;
   4.356 -    rc = pirq_guest_bind(current, pirq, 
   4.357 +    rc = pirq_guest_bind(d->vcpu[chn->notify_vcpu_id], pirq, 
   4.358                           !!(bind->flags & BIND_PIRQ__WILL_SHARE));
   4.359      if ( rc != 0 )
   4.360      {
   4.361 @@ -328,11 +313,11 @@ static long evtchn_bind_pirq(evtchn_bind
   4.362          goto out;
   4.363      }
   4.364  
   4.365 -    d->event_channel[port].state  = ECS_PIRQ;
   4.366 -    d->event_channel[port].u.pirq = pirq;
   4.367 +    chn->state  = ECS_PIRQ;
   4.368 +    chn->u.pirq = pirq;
   4.369  
   4.370   out:
   4.371 -    spin_unlock(&d->event_channel_lock);
   4.372 +    spin_unlock(&d->evtchn_lock);
   4.373  
   4.374      if ( rc < 0 )
   4.375          return rc;
   4.376 @@ -344,24 +329,23 @@ static long evtchn_bind_pirq(evtchn_bind
   4.377  
   4.378  static long __evtchn_close(struct domain *d1, int port1)
   4.379  {
   4.380 -    struct domain   *d2 = NULL;
   4.381 -    struct vcpu *v;
   4.382 -    event_channel_t *chn1, *chn2;
   4.383 -    int              port2;
   4.384 -    long             rc = 0;
   4.385 +    struct domain *d2 = NULL;
   4.386 +    struct vcpu   *v;
   4.387 +    struct evtchn *chn1, *chn2;
   4.388 +    int            port2;
   4.389 +    long           rc = 0;
   4.390  
   4.391   again:
   4.392 -    spin_lock(&d1->event_channel_lock);
   4.393 +    spin_lock(&d1->evtchn_lock);
   4.394  
   4.395 -    chn1 = d1->event_channel;
   4.396 -
   4.397 -    if ( (port1 < 0) || (port1 >= d1->max_event_channel) )
   4.398 +    if ( !port_is_valid(d1, port1) )
   4.399      {
   4.400          rc = -EINVAL;
   4.401          goto out;
   4.402      }
   4.403  
   4.404 -    switch ( chn1[port1].state )
   4.405 +    chn1 = evtchn_from_port(d1, port1);
   4.406 +    switch ( chn1->state )
   4.407      {
   4.408      case ECS_FREE:
   4.409      case ECS_RESERVED:
   4.410 @@ -372,15 +356,14 @@ static long __evtchn_close(struct domain
   4.411          break;
   4.412  
   4.413      case ECS_PIRQ:
   4.414 -        if ( (rc = pirq_guest_unbind(d1, chn1[port1].u.pirq)) == 0 )
   4.415 -            d1->pirq_to_evtchn[chn1[port1].u.pirq] = 0;
   4.416 +        if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
   4.417 +            d1->pirq_to_evtchn[chn1->u.pirq] = 0;
   4.418          break;
   4.419  
   4.420      case ECS_VIRQ:
   4.421 -        /* XXX could store vcpu in chn1[port1].u */
   4.422          for_each_vcpu ( d1, v )
   4.423 -            if (v->virq_to_evtchn[chn1[port1].u.virq] == port1)
   4.424 -                v->virq_to_evtchn[chn1[port1].u.virq] = 0;
   4.425 +            if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
   4.426 +                v->virq_to_evtchn[chn1->u.virq] = 0;
   4.427          break;
   4.428  
   4.429      case ECS_IPI:
   4.430 @@ -389,7 +372,7 @@ static long __evtchn_close(struct domain
   4.431      case ECS_INTERDOMAIN:
   4.432          if ( d2 == NULL )
   4.433          {
   4.434 -            d2 = chn1[port1].u.interdomain.remote_dom->domain;
   4.435 +            d2 = chn1->u.interdomain.remote_dom;
   4.436  
   4.437              /* If we unlock d1 then we could lose d2. Must get a reference. */
   4.438              if ( unlikely(!get_domain(d2)) )
   4.439 @@ -404,50 +387,47 @@ static long __evtchn_close(struct domain
   4.440  
   4.441              if ( d1 < d2 )
   4.442              {
   4.443 -                spin_lock(&d2->event_channel_lock);
   4.444 +                spin_lock(&d2->evtchn_lock);
   4.445              }
   4.446              else if ( d1 != d2 )
   4.447              {
   4.448 -                spin_unlock(&d1->event_channel_lock);
   4.449 -                spin_lock(&d2->event_channel_lock);
   4.450 +                spin_unlock(&d1->evtchn_lock);
   4.451 +                spin_lock(&d2->evtchn_lock);
   4.452                  goto again;
   4.453              }
   4.454          }
   4.455 -        else if ( d2 != chn1[port1].u.interdomain.remote_dom->domain )
   4.456 +        else if ( d2 != chn1->u.interdomain.remote_dom )
   4.457          {
   4.458              rc = -EINVAL;
   4.459              goto out;
   4.460          }
   4.461      
   4.462 -        chn2  = d2->event_channel;
   4.463 -        port2 = chn1[port1].u.interdomain.remote_port;
   4.464 +        port2 = chn1->u.interdomain.remote_port;
   4.465 +        BUG_ON(!port_is_valid(d2, port2));
   4.466  
   4.467 -        if ( port2 >= d2->max_event_channel )
   4.468 -            BUG();
   4.469 -        if ( chn2[port2].state != ECS_INTERDOMAIN )
   4.470 -            BUG();
   4.471 -        if ( chn2[port2].u.interdomain.remote_dom->domain != d1 )
   4.472 -            BUG();
   4.473 +        chn2 = evtchn_from_port(d2, port2);
   4.474 +        BUG_ON(chn2->state != ECS_INTERDOMAIN);
   4.475 +        BUG_ON(chn2->u.interdomain.remote_dom != d1);
   4.476  
   4.477 -        chn2[port2].state = ECS_UNBOUND;
   4.478 -        chn2[port2].u.unbound.remote_domid = d1->domain_id;
   4.479 +        chn2->state = ECS_UNBOUND;
   4.480 +        chn2->u.unbound.remote_domid = d1->domain_id;
   4.481          break;
   4.482  
   4.483      default:
   4.484          BUG();
   4.485      }
   4.486  
   4.487 -    chn1[port1].state = ECS_FREE;
   4.488 +    chn1->state = ECS_FREE;
   4.489  
   4.490   out:
   4.491      if ( d2 != NULL )
   4.492      {
   4.493          if ( d1 != d2 )
   4.494 -            spin_unlock(&d2->event_channel_lock);
   4.495 +            spin_unlock(&d2->evtchn_lock);
   4.496          put_domain(d2);
   4.497      }
   4.498      
   4.499 -    spin_unlock(&d1->event_channel_lock);
   4.500 +    spin_unlock(&d1->evtchn_lock);
   4.501  
   4.502      return rc;
   4.503  }
   4.504 @@ -476,50 +456,52 @@ static long evtchn_close(evtchn_close_t 
   4.505  
   4.506  long evtchn_send(int lport)
   4.507  {
   4.508 -    struct domain *ld = current->domain;
   4.509 -    struct vcpu *rd;
   4.510 +    struct evtchn *lchn, *rchn;
   4.511 +    struct domain *ld = current->domain, *rd;
   4.512      int            rport, ret = 0;
   4.513  
   4.514 -    spin_lock(&ld->event_channel_lock);
   4.515 +    spin_lock(&ld->evtchn_lock);
   4.516  
   4.517 -    if ( unlikely(lport < 0) ||
   4.518 -         unlikely(lport >= ld->max_event_channel))
   4.519 +    if ( unlikely(!port_is_valid(ld, lport)) )
   4.520      {
   4.521 -        spin_unlock(&ld->event_channel_lock);
   4.522 +        spin_unlock(&ld->evtchn_lock);
   4.523          return -EINVAL;
   4.524      }
   4.525  
   4.526 -    switch ( ld->event_channel[lport].state )
   4.527 +    lchn = evtchn_from_port(ld, lport);
   4.528 +    switch ( lchn->state )
   4.529      {
   4.530      case ECS_INTERDOMAIN:
   4.531 -        rd    = ld->event_channel[lport].u.interdomain.remote_dom;
   4.532 -        rport = ld->event_channel[lport].u.interdomain.remote_port;
   4.533 -
   4.534 -        evtchn_set_pending(rd, rport);
   4.535 +        rd    = lchn->u.interdomain.remote_dom;
   4.536 +        rport = lchn->u.interdomain.remote_port;
   4.537 +        rchn  = evtchn_from_port(rd, rport);
   4.538 +        evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
   4.539          break;
   4.540      case ECS_IPI:
   4.541 -        rd = ld->vcpu[ld->event_channel[lport].u.ipi_vcpu];
   4.542 -        if ( rd  )
   4.543 -            evtchn_set_pending(rd, lport);
   4.544 -        else
   4.545 -            ret = -EINVAL;
   4.546 +        evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
   4.547          break;
   4.548      default:
   4.549          ret = -EINVAL;
   4.550      }
   4.551  
   4.552 -    spin_unlock(&ld->event_channel_lock);
   4.553 +    spin_unlock(&ld->evtchn_lock);
   4.554  
   4.555      return ret;
   4.556  }
   4.557  
   4.558 +void send_guest_pirq(struct domain *d, int pirq)
   4.559 +{
   4.560 +    int port = d->pirq_to_evtchn[pirq];
   4.561 +    struct evtchn *chn = evtchn_from_port(d, port);
   4.562 +    evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
   4.563 +}
   4.564  
   4.565  static long evtchn_status(evtchn_status_t *status)
   4.566  {
   4.567      struct domain   *d;
   4.568      domid_t          dom = status->dom;
   4.569      int              port = status->port;
   4.570 -    event_channel_t *chn;
   4.571 +    struct evtchn   *chn;
   4.572      long             rc = 0;
   4.573  
   4.574      if ( dom == DOMID_SELF )
   4.575 @@ -530,17 +512,16 @@ static long evtchn_status(evtchn_status_
   4.576      if ( (d = find_domain_by_id(dom)) == NULL )
   4.577          return -ESRCH;
   4.578  
   4.579 -    spin_lock(&d->event_channel_lock);
   4.580 +    spin_lock(&d->evtchn_lock);
   4.581  
   4.582 -    chn = d->event_channel;
   4.583 -
   4.584 -    if ( (port < 0) || (port >= d->max_event_channel) )
   4.585 +    if ( !port_is_valid(d, port) )
   4.586      {
   4.587          rc = -EINVAL;
   4.588          goto out;
   4.589      }
   4.590  
   4.591 -    switch ( chn[port].state )
   4.592 +    chn = evtchn_from_port(d, port);
   4.593 +    switch ( chn->state )
   4.594      {
   4.595      case ECS_FREE:
   4.596      case ECS_RESERVED:
   4.597 @@ -548,32 +529,32 @@ static long evtchn_status(evtchn_status_
   4.598          break;
   4.599      case ECS_UNBOUND:
   4.600          status->status = EVTCHNSTAT_unbound;
   4.601 -        status->u.unbound.dom = chn[port].u.unbound.remote_domid;
   4.602 +        status->u.unbound.dom = chn->u.unbound.remote_domid;
   4.603          break;
   4.604      case ECS_INTERDOMAIN:
   4.605          status->status = EVTCHNSTAT_interdomain;
   4.606          status->u.interdomain.dom  =
   4.607 -            chn[port].u.interdomain.remote_dom->domain->domain_id;
   4.608 -        status->u.interdomain.port = chn[port].u.interdomain.remote_port;
   4.609 +            chn->u.interdomain.remote_dom->domain_id;
   4.610 +        status->u.interdomain.port = chn->u.interdomain.remote_port;
   4.611          break;
   4.612      case ECS_PIRQ:
   4.613          status->status = EVTCHNSTAT_pirq;
   4.614 -        status->u.pirq = chn[port].u.pirq;
   4.615 +        status->u.pirq = chn->u.pirq;
   4.616          break;
   4.617      case ECS_VIRQ:
   4.618          status->status = EVTCHNSTAT_virq;
   4.619 -        status->u.virq = chn[port].u.virq;
   4.620 +        status->u.virq = chn->u.virq;
   4.621          break;
   4.622      case ECS_IPI:
   4.623          status->status     = EVTCHNSTAT_ipi;
   4.624 -        status->u.ipi_vcpu = chn[port].u.ipi_vcpu;
   4.625 +        status->u.ipi_vcpu = chn->notify_vcpu_id;
   4.626          break;
   4.627      default:
   4.628          BUG();
   4.629      }
   4.630  
   4.631   out:
   4.632 -    spin_unlock(&d->event_channel_lock);
   4.633 +    spin_unlock(&d->evtchn_lock);
   4.634      put_domain(d);
   4.635      return rc;
   4.636  }
   4.637 @@ -642,26 +623,26 @@ long do_event_channel_op(evtchn_op_t *uo
   4.638  }
   4.639  
   4.640  
   4.641 -int init_event_channels(struct domain *d)
   4.642 +int evtchn_init(struct domain *d)
   4.643  {
   4.644 -    spin_lock_init(&d->event_channel_lock);
   4.645 -    /* Call get_free_port to initialize d->event_channel */
   4.646 -    if ( get_free_port(d->vcpu[0]) != 0 )
   4.647 +    spin_lock_init(&d->evtchn_lock);
   4.648 +    if ( get_free_port(d) != 0 )
   4.649          return -EINVAL;
   4.650 -    d->event_channel[0].state = ECS_RESERVED;
   4.651 +    evtchn_from_port(d, 0)->state = ECS_RESERVED;
   4.652      return 0;
   4.653  }
   4.654  
   4.655  
   4.656 -void destroy_event_channels(struct domain *d)
   4.657 +void evtchn_destroy(struct domain *d)
   4.658  {
   4.659      int i;
   4.660 -    if ( d->event_channel != NULL )
   4.661 -    {
   4.662 -        for ( i = 0; i < d->max_event_channel; i++ )
   4.663 +
   4.664 +    for ( i = 0; port_is_valid(d, i); i++ )
   4.665              (void)__evtchn_close(d, i);
   4.666 -        xfree(d->event_channel);
   4.667 -    }
   4.668 +
   4.669 +    for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
   4.670 +        if ( d->evtchn[i] != NULL )
   4.671 +            xfree(d->evtchn[i]);
   4.672  }
   4.673  
   4.674  /*
     5.1 --- a/xen/include/public/event_channel.h	Fri Jun 03 14:10:37 2005 +0000
     5.2 +++ b/xen/include/public/event_channel.h	Fri Jun 03 16:42:10 2005 +0000
     5.3 @@ -51,9 +51,11 @@ typedef struct {
     5.4  } PACKED evtchn_bind_interdomain_t; /* 12 bytes */
     5.5  
     5.6  /*
     5.7 - * EVTCHNOP_bind_virq: Bind a local event channel to IRQ <irq>.
     5.8 + * EVTCHNOP_bind_virq: Bind a local event channel to IRQ <irq> on calling vcpu.
     5.9   * NOTES:
    5.10 - *  1. A virtual IRQ may be bound to at most one event channel per domain.
    5.11 + *  1. A virtual IRQ may be bound to at most one event channel per vcpu.
    5.12 + *  2. The allocated event channel is bound to the calling vcpu. The binding
    5.13 + *     may not be changed.
    5.14   */
    5.15  #define EVTCHNOP_bind_virq        1
    5.16  typedef struct {
    5.17 @@ -80,6 +82,20 @@ typedef struct {
    5.18  } PACKED evtchn_bind_pirq_t; /* 12 bytes */
    5.19  
    5.20  /*
    5.21 + * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
    5.22 + * NOTES:
    5.23 + *  1. The allocated event channel is bound to the calling vcpu. The binding
    5.24 + *     may not be changed.
    5.25 + */
    5.26 +#define EVTCHNOP_bind_ipi         7
    5.27 +typedef struct {
    5.28 +    /* IN parameters. */
    5.29 +    u32 ipi_vcpu;                     /*  0 */
    5.30 +    /* OUT parameters. */
    5.31 +    u32 port;                         /*  4 */
    5.32 +} PACKED evtchn_bind_ipi_t; /* 8 bytes */
    5.33 +
    5.34 +/*
    5.35   * EVTCHNOP_close: Close the communication channel which has an endpoint at
    5.36   * <dom, port>. If the channel is interdomain then the remote end is placed in
    5.37   * the unbound state (EVTCHNSTAT_unbound), awaiting a new connection.
    5.38 @@ -145,18 +161,6 @@ typedef struct {
    5.39      } PACKED u;
    5.40  } PACKED evtchn_status_t; /* 20 bytes */
    5.41  
    5.42 -/*
    5.43 - * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
    5.44 - */
    5.45 -#define EVTCHNOP_bind_ipi         7
    5.46 -typedef struct {
    5.47 -    /* IN parameters. */
    5.48 -    u32 ipi_vcpu;                     /*  0 */
    5.49 -    /* OUT parameters. */
    5.50 -    u32 port;                         /*  4 */
    5.51 -} PACKED evtchn_bind_ipi_t; /* 8 bytes */
    5.52 -
    5.53 -
    5.54  typedef struct {
    5.55      u32 cmd; /* EVTCHNOP_* */         /*  0 */
    5.56      u32 __reserved;                   /*  4 */
    5.57 @@ -165,10 +169,10 @@ typedef struct {
    5.58          evtchn_bind_interdomain_t bind_interdomain;
    5.59          evtchn_bind_virq_t        bind_virq;
    5.60          evtchn_bind_pirq_t        bind_pirq;
    5.61 +        evtchn_bind_ipi_t         bind_ipi;
    5.62          evtchn_close_t            close;
    5.63          evtchn_send_t             send;
    5.64          evtchn_status_t           status;
    5.65 -        evtchn_bind_ipi_t         bind_ipi;
    5.66          u8                        __dummy[24];
    5.67      } PACKED u;
    5.68  } PACKED evtchn_op_t; /* 32 bytes */
     6.1 --- a/xen/include/xen/event.h	Fri Jun 03 14:10:37 2005 +0000
     6.2 +++ b/xen/include/xen/event.h	Fri Jun 03 16:42:10 2005 +0000
     6.3 @@ -53,7 +53,7 @@ static inline void evtchn_set_pending(st
     6.4  
     6.5  /*
     6.6   * send_guest_virq:
     6.7 - *  @d:        Domain to which virtual IRQ should be sent
     6.8 + *  @v:        VCPU to which virtual IRQ should be sent
     6.9   *  @virq:     Virtual IRQ number (VIRQ_*)
    6.10   */
    6.11  static inline void send_guest_virq(struct vcpu *v, int virq)
    6.12 @@ -69,10 +69,7 @@ static inline void send_guest_virq(struc
    6.13   *  @d:        Domain to which physical IRQ should be sent
    6.14   *  @pirq:     Physical IRQ number
    6.15   */
    6.16 -static inline void send_guest_pirq(struct vcpu *v, int pirq)
    6.17 -{
    6.18 -    evtchn_set_pending(v, v->domain->pirq_to_evtchn[pirq]);
    6.19 -}
    6.20 +extern void send_guest_pirq(struct domain *d, int pirq);
    6.21  
    6.22  #define event_pending(_d)                                     \
    6.23      ((_d)->vcpu_info->evtchn_upcall_pending && \
     7.1 --- a/xen/include/xen/sched.h	Fri Jun 03 14:10:37 2005 +0000
     7.2 +++ b/xen/include/xen/sched.h	Fri Jun 03 16:42:10 2005 +0000
     7.3 @@ -19,7 +19,11 @@ extern rwlock_t domlist_lock;
     7.4  /* A global pointer to the initial domain (DOM0). */
     7.5  extern struct domain *dom0;
     7.6  
     7.7 -typedef struct event_channel_st
     7.8 +#define MAX_EVTCHNS        1024
     7.9 +#define EVTCHNS_PER_BUCKET 128
    7.10 +#define NR_EVTCHN_BUCKETS  (MAX_EVTCHNS / EVTCHNS_PER_BUCKET)
    7.11 +
    7.12 +struct evtchn
    7.13  {
    7.14  #define ECS_FREE         0 /* Channel is available for use.                  */
    7.15  #define ECS_RESERVED     1 /* Channel is reserved.                           */
    7.16 @@ -28,24 +32,23 @@ typedef struct event_channel_st
    7.17  #define ECS_PIRQ         4 /* Channel is bound to a physical IRQ line.       */
    7.18  #define ECS_VIRQ         5 /* Channel is bound to a virtual IRQ line.        */
    7.19  #define ECS_IPI          6 /* Channel is bound to a virtual IPI line.        */
    7.20 -    u16 state;
    7.21 +    u16 state;             /* ECS_* */
    7.22 +    u16 notify_vcpu_id;    /* VCPU for local delivery notification */
    7.23      union {
    7.24          struct {
    7.25              domid_t remote_domid;
    7.26 -        } __attribute__ ((packed)) unbound; /* state == ECS_UNBOUND */
    7.27 +        } unbound;     /* state == ECS_UNBOUND */
    7.28          struct {
    7.29 -            u16                 remote_port;
    7.30 -            struct vcpu *remote_dom;
    7.31 -        } __attribute__ ((packed)) interdomain; /* state == ECS_INTERDOMAIN */
    7.32 -        u16 pirq; /* state == ECS_PIRQ */
    7.33 -        u16 virq; /* state == ECS_VIRQ */
    7.34 -        u32 ipi_vcpu; /* state == ECS_IPI */
    7.35 +            u16            remote_port;
    7.36 +            struct domain *remote_dom;
    7.37 +        } interdomain; /* state == ECS_INTERDOMAIN */
    7.38 +        u16 pirq;      /* state == ECS_PIRQ */
    7.39 +        u16 virq;      /* state == ECS_VIRQ */
    7.40      } u;
    7.41 -} event_channel_t;
    7.42 +};
    7.43  
    7.44 -int  init_event_channels(struct domain *d);
    7.45 -void destroy_event_channels(struct domain *d);
    7.46 -int  init_vcpu_event_channels(struct vcpu *v);
    7.47 +int  evtchn_init(struct domain *d);
    7.48 +void evtchn_destroy(struct domain *d);
    7.49  
    7.50  #define CPUMAP_RUNANYWHERE 0xFFFFFFFF
    7.51  
    7.52 @@ -109,9 +112,8 @@ struct domain
    7.53      struct domain   *next_in_hashbucket;
    7.54  
    7.55      /* Event channel information. */
    7.56 -    event_channel_t *event_channel;
    7.57 -    unsigned int     max_event_channel;
    7.58 -    spinlock_t       event_channel_lock;
    7.59 +    struct evtchn   *evtchn[NR_EVTCHN_BUCKETS];
    7.60 +    spinlock_t       evtchn_lock;
    7.61  
    7.62      grant_table_t   *grant_table;
    7.63