ia64/xen-unstable

changeset 1128:74915dc117d5

bitkeeper revision 1.751 (403ce3e1pBEv9Q629_q2HvL9RZ0ThA)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/homes/maw48/xeno-unstable-tbufs-new.bk
author maw48@labyrinth.cl.cam.ac.uk
date Wed Feb 25 18:05:21 2004 +0000 (2004-02-25)
parents bd4df43914c9 9ed81ff882d4
children acd0f2cab313
files .rootkeys BitKeeper/etc/logging_ok xen/arch/i386/apic.c xen/common/domain.c xen/common/event_channel.c xen/include/hypervisor-ifs/event_channel.h xen/include/hypervisor-ifs/hypervisor-if.h xen/include/xeno/sched.h
line diff
     1.1 --- a/.rootkeys	Wed Feb 25 15:36:47 2004 +0000
     1.2 +++ b/.rootkeys	Wed Feb 25 18:05:21 2004 +0000
     1.3 @@ -417,6 +417,7 @@ 400304fcmRQmDdFYEzDh0wcBba9alg xen/inclu
     1.4  3ddb79c2YTaZwOqWin9-QNgHge5RVw xen/include/hypervisor-ifs/block.h
     1.5  3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/hypervisor-ifs/dom0_ops.h
     1.6  3e6377eaioRoNm0m_HSDEAd4Vqrq_w xen/include/hypervisor-ifs/dom_mem_ops.h
     1.7 +403cd194j2pyLqXD8FJ-ukvZzkPenw xen/include/hypervisor-ifs/event_channel.h
     1.8  3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/hypervisor-ifs/hypervisor-if.h
     1.9  3ead095dE_VF-QA88rl_5cWYRWtRVQ xen/include/hypervisor-ifs/kbd.h
    1.10  3ddb79c2oRPrzClk3zbTkRHlpumzKA xen/include/hypervisor-ifs/network.h
     2.1 --- a/BitKeeper/etc/logging_ok	Wed Feb 25 15:36:47 2004 +0000
     2.2 +++ b/BitKeeper/etc/logging_ok	Wed Feb 25 18:05:21 2004 +0000
     2.3 @@ -21,6 +21,7 @@ kaf24@scramble.cl.cam.ac.uk
     2.4  kaf24@striker.cl.cam.ac.uk
     2.5  laudney@eclipse.(none)
     2.6  lynx@idefix.cl.cam.ac.uk
     2.7 +maw48@labyrinth.cl.cam.ac.uk
     2.8  mwilli2@equilibrium.research.intel-research.net
     2.9  rac61@labyrinth.cl.cam.ac.uk
    2.10  rgr22@boulderdash.cl.cam.ac.uk
     3.1 --- a/xen/arch/i386/apic.c	Wed Feb 25 15:36:47 2004 +0000
     3.2 +++ b/xen/arch/i386/apic.c	Wed Feb 25 18:05:21 2004 +0000
     3.3 @@ -659,7 +659,6 @@ void __init setup_APIC_clocks (void)
     3.4   */
     3.5  int reprogram_ac_timer(s_time_t timeout)
     3.6  {
     3.7 -    int         cpu = smp_processor_id();
     3.8      s_time_t    now;
     3.9      s_time_t    expire;
    3.10      u64         apic_tmict;
    3.11 @@ -669,7 +668,8 @@ int reprogram_ac_timer(s_time_t timeout)
    3.12       * cause an immediate interrupt). At least this is guaranteed to hold it
    3.13       * off for ages (esp. since the clock ticks on bus clock, not cpu clock!).
    3.14       */
    3.15 -    if (timeout == 0) {
    3.16 +    if ( timeout == 0 )
    3.17 +    {
    3.18          apic_tmict = 0xffffffff;
    3.19          goto reprogram;
    3.20      }
    3.21 @@ -677,10 +677,12 @@ int reprogram_ac_timer(s_time_t timeout)
    3.22      now = NOW();
    3.23      expire = timeout - now; /* value from now */
    3.24  
    3.25 -    if (expire <= 0) {
    3.26 +    if ( expire <= 0 )
    3.27 +    {
    3.28          Dprintk("APICT[%02d] Timeout in the past 0x%08X%08X > 0x%08X%08X\n", 
    3.29 -                cpu, (u32)(now>>32), (u32)now, (u32)(timeout>>32),(u32)timeout);
    3.30 -        return 0;       /* timeout value in the past */
    3.31 +                smp_processor_id(), (u32)(now>>32), 
    3.32 +                (u32)now, (u32)(timeout>>32),(u32)timeout);
    3.33 +        return 0;
    3.34      }
    3.35  
    3.36      /*
    3.37 @@ -693,12 +695,15 @@ int reprogram_ac_timer(s_time_t timeout)
    3.38      /* conversion to bus units */
    3.39      apic_tmict = (((u64)bus_scale) * expire)>>18;
    3.40  
    3.41 -    if (apic_tmict >= 0xffffffff) {
    3.42 -        Dprintk("APICT[%02d] Timeout value too large\n", cpu);
    3.43 +    if ( apic_tmict >= 0xffffffff )
    3.44 +    {
    3.45 +        Dprintk("APICT[%02d] Timeout value too large\n", smp_processor_id());
    3.46          apic_tmict = 0xffffffff;
    3.47      }
    3.48 -    if (apic_tmict == 0) {
    3.49 -        Dprintk("APICT[%02d] timeout value too small\n", cpu);
    3.50 +
    3.51 +    if ( apic_tmict == 0 )
    3.52 +    {
    3.53 +        Dprintk("APICT[%02d] timeout value too small\n", smp_processor_id());
    3.54          return 0;
    3.55      }
    3.56  
     4.1 --- a/xen/common/domain.c	Wed Feb 25 15:36:47 2004 +0000
     4.2 +++ b/xen/common/domain.c	Wed Feb 25 18:05:21 2004 +0000
     4.3 @@ -127,6 +127,8 @@ void kill_domain_with_errmsg(const char 
     4.4  
     4.5  void __kill_domain(struct task_struct *p)
     4.6  {
     4.7 +    extern void destroy_event_channels(struct task_struct *);
     4.8 +
     4.9      int i;
    4.10      struct task_struct **pp;
    4.11      unsigned long flags;
    4.12 @@ -149,6 +151,8 @@ void __kill_domain(struct task_struct *p
    4.13      for ( i = 0; i < MAX_DOMAIN_VIFS; i++ )
    4.14          unlink_net_vif(p->net_vif_list[i]);
    4.15  
    4.16 +    destroy_event_channels(p);
    4.17 +
    4.18      /*
    4.19       * Note this means that find_domain_by_id may fail, even when the caller
    4.20       * holds a reference to the domain being queried. Take care!
    4.21 @@ -467,8 +471,6 @@ unsigned int alloc_new_dom_mem(struct ta
    4.22  /* Release resources belonging to task @p. */
    4.23  void release_task(struct task_struct *p)
    4.24  {
    4.25 -    extern void destroy_event_channels(struct task_struct *);
    4.26 -
    4.27      ASSERT(p->state == TASK_DYING);
    4.28      ASSERT(!p->has_cpu);
    4.29  
    4.30 @@ -481,7 +483,6 @@ void release_task(struct task_struct *p)
    4.31      destroy_blkdev_info(p);
    4.32  
    4.33      /* Free all memory associated with this domain. */
    4.34 -    destroy_event_channels(p);
    4.35      free_page((unsigned long)p->mm.perdomain_pt);
    4.36      UNSHARE_PFN(virt_to_page(p->shared_info));
    4.37      free_all_dom_mem(p);
     5.1 --- a/xen/common/event_channel.c	Wed Feb 25 15:36:47 2004 +0000
     5.2 +++ b/xen/common/event_channel.c	Wed Feb 25 18:05:21 2004 +0000
     5.3 @@ -3,7 +3,7 @@
     5.4   * 
     5.5   * Event channels between domains.
     5.6   * 
     5.7 - * Copyright (c) 2003, K A Fraser.
     5.8 + * Copyright (c) 2003-2004, K A Fraser.
     5.9   * 
    5.10   * This program is distributed in the hope that it will be useful,
    5.11   * but WITHOUT ANY WARRANTY; without even the implied warranty of
    5.12 @@ -22,135 +22,147 @@
    5.13  #include <xeno/sched.h>
    5.14  #include <xeno/event.h>
    5.15  
    5.16 +#include <hypervisor-ifs/hypervisor-if.h>
    5.17 +#include <hypervisor-ifs/event_channel.h>
    5.18 +
    5.19  #define MAX_EVENT_CHANNELS 1024
    5.20  
    5.21 -static long event_channel_open(u16 target_dom)
    5.22 +static int get_free_port(struct task_struct *p)
    5.23  {
    5.24 -    struct task_struct *lp = current, *rp;
    5.25 -    int                 i, lmax, rmax, lid, rid;
    5.26 -    event_channel_t    *lchn, *rchn;
    5.27 -    shared_info_t      *rsi;
    5.28 +    int max, port;
    5.29 +    event_channel_t *chn;
    5.30 +
    5.31 +    max = p->max_event_channel;
    5.32 +    chn = p->event_channel;
    5.33 +
    5.34 +    for ( port = 0; port < max; port++ )
    5.35 +        if ( chn[port].state == ECS_FREE )
    5.36 +            break;
    5.37 +
    5.38 +    if ( port == max )
    5.39 +    {
    5.40 +        if ( max == MAX_EVENT_CHANNELS )
    5.41 +            return -ENOSPC;
    5.42 +        
    5.43 +        max = (max == 0) ? 4 : (max * 2);
    5.44 +        
    5.45 +        chn = kmalloc(max * sizeof(event_channel_t), GFP_KERNEL);
    5.46 +        if ( unlikely(chn == NULL) )
    5.47 +            return -ENOMEM;
    5.48 +
    5.49 +        memset(chn, 0, max * sizeof(event_channel_t));
    5.50 +
    5.51 +        if ( p->event_channel != NULL )
    5.52 +        {
    5.53 +            memcpy(chn, p->event_channel, (max/2) * sizeof(event_channel_t));
    5.54 +            kfree(p->event_channel);
    5.55 +        }
    5.56 +
    5.57 +        p->event_channel     = chn;
    5.58 +        p->max_event_channel = max;
    5.59 +    }
    5.60 +
    5.61 +    return port;
    5.62 +}
    5.63 +
    5.64 +static inline unsigned long set_event_pending(struct task_struct *p, int port)
    5.65 +{
    5.66 +    if ( !test_and_set_bit(port,    &p->shared_info->event_channel_pend[0]) &&
    5.67 +         !test_and_set_bit(port>>5, &p->shared_info->event_channel_pend_sel) )
    5.68 +        return mark_guest_event(p, _EVENT_EVTCHN);
    5.69 +    return 0;
    5.70 +}
    5.71 +
    5.72 +static inline unsigned long set_event_disc(struct task_struct *p, int port)
    5.73 +{
    5.74 +    if ( !test_and_set_bit(port,    &p->shared_info->event_channel_disc[0]) &&
    5.75 +         !test_and_set_bit(port>>5, &p->shared_info->event_channel_disc_sel) )
    5.76 +        return mark_guest_event(p, _EVENT_EVTCHN);
    5.77 +    return 0;
    5.78 +}
    5.79 +
    5.80 +static long event_channel_open(evtchn_open_t *open)
    5.81 +{
    5.82 +    struct task_struct *lp, *rp;
    5.83 +    int                 lport = 0, rport = 0;
    5.84      unsigned long       cpu_mask;
    5.85 +    domid_t             ldom = open->local_dom, rdom = open->remote_dom;
    5.86      long                rc = 0;
    5.87  
    5.88 -    rp = find_domain_by_id(target_dom);
    5.89 +    if ( !IS_PRIV(current) )
    5.90 +        return -EPERM;
    5.91 +
    5.92 +    /* 'local_dom' may be DOMID_SELF. 'remote_dom' cannot be.*/
    5.93 +    if ( ldom == DOMID_SELF )
    5.94 +        ldom = current->domain;
    5.95 +
    5.96 +    /* Event channel must connect distinct domains. */
    5.97 +    if ( ldom == rdom )
    5.98 +        return -EINVAL;
    5.99  
   5.100 -    /*
   5.101 -     * We need locks at both ends to make a connection. We avoid deadlock
   5.102 -     * by acquiring the locks in address order.
   5.103 -     */
   5.104 -    if ( (unsigned long)lp < (unsigned long)rp )
   5.105 +    if ( ((lp = find_domain_by_id(ldom)) == NULL) ||
   5.106 +         ((rp = find_domain_by_id(rdom)) == NULL) )
   5.107 +    {
   5.108 +        if ( lp != NULL )
   5.109 +            put_task_struct(lp);
   5.110 +        return -ESRCH;
   5.111 +    }
   5.112 +
   5.113 +    /* Avoid deadlock by first acquiring lock of domain with smaller id. */
   5.114 +    if ( ldom < rdom )
   5.115      {
   5.116          spin_lock(&lp->event_channel_lock);
   5.117          spin_lock(&rp->event_channel_lock);
   5.118      }
   5.119      else
   5.120      {
   5.121 -        if ( likely(rp != NULL) )
   5.122 -            spin_lock(&rp->event_channel_lock);
   5.123 +        spin_lock(&rp->event_channel_lock);
   5.124          spin_lock(&lp->event_channel_lock);
   5.125      }
   5.126  
   5.127 -    lmax = lp->max_event_channel;
   5.128 -    lchn = lp->event_channel;
   5.129 -    lid  = -1;
   5.130 -
   5.131 -    /*
   5.132 -     * Find the first unused event channel. Also ensure bo channel already
   5.133 -     * exists to the specified target domain.
   5.134 -     */
   5.135 -    for ( i = 0; i < lmax; i++ )
   5.136 -    {
   5.137 -        if ( (lid == -1) && !(lchn[i].flags & ECF_INUSE) )
   5.138 -        {
   5.139 -            lid = i;
   5.140 -        }
   5.141 -        else if ( unlikely(lchn[i].target_dom == target_dom) )
   5.142 -        {
   5.143 -            rc = -EEXIST;
   5.144 -            goto out;
   5.145 -        }
   5.146 -    }
   5.147 -    
   5.148 -    /* If there is no free slot we need to allocate a bigger channel list. */
   5.149 -    if ( unlikely(lid == -1) )
   5.150 +    if ( (lport = get_free_port(lp)) < 0 )
   5.151      {
   5.152 -        /* Reached maximum channel count? */
   5.153 -        if ( unlikely(lmax == MAX_EVENT_CHANNELS) )
   5.154 -        {
   5.155 -            rc = -ENOSPC;
   5.156 -            goto out;
   5.157 -        }
   5.158 -        
   5.159 -        lmax = (lmax == 0) ? 4 : (lmax * 2);
   5.160 -        
   5.161 -        lchn = kmalloc(lmax * sizeof(event_channel_t), GFP_KERNEL);
   5.162 -        if ( unlikely(lchn == NULL) )
   5.163 -        {
   5.164 -            rc = -ENOMEM;
   5.165 -            goto out;
   5.166 -        }
   5.167 +        rc = lport;
   5.168 +        goto out;
   5.169 +    }
   5.170  
   5.171 -        memset(lchn, 0, lmax * sizeof(event_channel_t));
   5.172 -        
   5.173 -        if ( likely(lp->event_channel != NULL) )
   5.174 -            kfree(lp->event_channel);
   5.175 -
   5.176 -        lp->event_channel     = lchn;
   5.177 -        lp->max_event_channel = lmax;
   5.178 +    if ( (rport = get_free_port(rp)) < 0 )
   5.179 +    {
   5.180 +        rc = rport;
   5.181 +        goto out;
   5.182      }
   5.183  
   5.184 -    lchn[lid].target_dom = target_dom;
   5.185 -    lchn[lid].flags      = ECF_INUSE;
   5.186 +    lp->event_channel[lport].remote_dom  = rp;
   5.187 +    lp->event_channel[lport].remote_port = (u16)rport;
   5.188 +    lp->event_channel[lport].state       = ECS_CONNECTED;
   5.189  
   5.190 -    if ( likely(rp != NULL) )
   5.191 -    {
   5.192 -        rchn = rp->event_channel;
   5.193 -        rmax = rp->max_event_channel;
   5.194 -        
   5.195 -        for ( rid = 0; rid < rmax; rid++ )
   5.196 -        {
   5.197 -            if ( (rchn[rid].target_dom == lp->domain) &&
   5.198 -                 (rchn[rid].flags & ECF_INUSE) )
   5.199 -            {
   5.200 -                /*
   5.201 -                 * The target was awaiting a connection. We make the connection
   5.202 -                 * and send a connection-made event to the remote end.
   5.203 -                 */
   5.204 -                rchn[rid].flags = ECF_INUSE | ECF_CONNECTED | lid;
   5.205 -                lchn[lid].flags = ECF_INUSE | ECF_CONNECTED | rid;
   5.206 +    rp->event_channel[rport].remote_dom  = lp;
   5.207 +    rp->event_channel[rport].remote_port = (u16)lport;
   5.208 +    rp->event_channel[rport].state       = ECS_CONNECTED;
   5.209  
   5.210 -                rsi = rp->shared_info;
   5.211 -                if ( !test_and_set_bit(rid,    &rsi->event_channel_pend[0]) &&
   5.212 -                     !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) )
   5.213 -                {
   5.214 -                    cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
   5.215 -                    guest_event_notify(cpu_mask);
   5.216 -                }
   5.217 -
   5.218 -                break;
   5.219 -            }
   5.220 -        }
   5.221 -    }
   5.222 +    cpu_mask  = set_event_pending(lp, lport);
   5.223 +    cpu_mask |= set_event_pending(rp, rport);
   5.224 +    guest_event_notify(cpu_mask);
   5.225      
   5.226   out:
   5.227      spin_unlock(&lp->event_channel_lock);
   5.228 -    if ( rp != NULL )
   5.229 -    {
   5.230 -        spin_unlock(&rp->event_channel_lock);
   5.231 -        put_task_struct(rp);
   5.232 -    }
   5.233 +    spin_unlock(&rp->event_channel_lock);
   5.234 +    
   5.235 +    put_task_struct(lp);
   5.236 +    put_task_struct(rp);
   5.237 +
   5.238 +    open->local_port  = lport;
   5.239 +    open->remote_port = rport;
   5.240  
   5.241      return rc;
   5.242  }
   5.243  
   5.244  
   5.245 -static long event_channel_close(u16 lid)
   5.246 +static long __event_channel_close(struct task_struct *lp, int lport)
   5.247  {
   5.248 -    struct task_struct *lp = current, *rp = NULL;
   5.249 +    struct task_struct *rp = NULL;
   5.250      event_channel_t    *lchn, *rchn;
   5.251 -    u16                 rid;
   5.252 -    shared_info_t      *rsi;
   5.253 +    int                 rport;
   5.254      unsigned long       cpu_mask;
   5.255      long                rc = 0;
   5.256  
   5.257 @@ -159,21 +171,21 @@ static long event_channel_close(u16 lid)
   5.258  
   5.259      lchn = lp->event_channel;
   5.260  
   5.261 -    if ( unlikely(lid >= lp->max_event_channel) || 
   5.262 -         unlikely(!(lchn[lid].flags & ECF_INUSE)) )
   5.263 +    if ( (lport < 0) || (lport >= lp->max_event_channel) || 
   5.264 +         (lchn[lport].state == ECS_FREE) )
   5.265      {
   5.266          rc = -EINVAL;
   5.267          goto out;
   5.268      }
   5.269  
   5.270 -    if ( lchn[lid].flags & ECF_CONNECTED )
   5.271 +    if ( lchn[lport].state == ECS_CONNECTED )
   5.272      {
   5.273          if ( rp == NULL )
   5.274          {
   5.275 -            rp = find_domain_by_id(lchn[lid].target_dom);
   5.276 -            ASSERT(rp != NULL);
   5.277 -            
   5.278 -            if ( (unsigned long)lp < (unsigned long)rp )
   5.279 +            rp = lchn[lport].remote_dom;
   5.280 +            get_task_struct(rp);
   5.281 +
   5.282 +            if ( lp->domain < rp->domain )
   5.283              {
   5.284                  spin_lock(&rp->event_channel_lock);
   5.285              }
   5.286 @@ -184,34 +196,39 @@ static long event_channel_close(u16 lid)
   5.287                  goto again;
   5.288              }
   5.289          }
   5.290 -        else if ( rp->domain != lchn[lid].target_dom )
   5.291 +        else if ( rp != lchn[lport].remote_dom )
   5.292          {
   5.293              rc = -EINVAL;
   5.294              goto out;
   5.295          }
   5.296          
   5.297 -        rchn = rp->event_channel;
   5.298 -        rid  = lchn[lid].flags & ECF_TARGET_ID;
   5.299 -        ASSERT(rid < rp->max_event_channel);
   5.300 -        ASSERT(rchn[rid].flags == (ECF_INUSE | ECF_CONNECTED | lid));
   5.301 -        ASSERT(rchn[rid].target_dom == lp->domain);
   5.302 -
   5.303 -        rchn[rid].flags = ECF_INUSE;
   5.304 +        rchn  = rp->event_channel;
   5.305 +        rport = lchn[lport].remote_port;
   5.306  
   5.307 -        rsi = rp->shared_info;
   5.308 -        if ( !test_and_set_bit(rid,    &rsi->event_channel_disc[0]) &&
   5.309 -             !test_and_set_bit(rid>>5, &rsi->event_channel_disc_sel) )
   5.310 -        {
   5.311 -            cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
   5.312 -            guest_event_notify(cpu_mask);
   5.313 -        }
   5.314 +        if ( rport >= rp->max_event_channel )
   5.315 +            BUG();
   5.316 +        if ( rchn[rport].state != ECS_CONNECTED )
   5.317 +            BUG();
   5.318 +        if ( rchn[rport].remote_dom != lp )
   5.319 +            BUG();
   5.320 +
   5.321 +        rchn[rport].state       = ECS_ZOMBIE;
   5.322 +        rchn[rport].remote_dom  = NULL;
   5.323 +        rchn[rport].remote_port = 0xFFFF;
   5.324 +
   5.325 +        cpu_mask  = set_event_disc(lp, lport);
   5.326 +        cpu_mask |= set_event_disc(rp, rport);
   5.327 +        guest_event_notify(cpu_mask);
   5.328      }
   5.329  
   5.330 -    lchn[lid].target_dom = 0;
   5.331 -    lchn[lid].flags      = 0;
   5.332 +    lchn[lport].state       = ECS_FREE;
   5.333 +    lchn[lport].remote_dom  = NULL;
   5.334 +    lchn[lport].remote_port = 0xFFFF;
   5.335      
   5.336   out:
   5.337      spin_unlock(&lp->event_channel_lock);
   5.338 +    put_task_struct(lp);
   5.339 +
   5.340      if ( rp != NULL )
   5.341      {
   5.342          spin_unlock(&rp->event_channel_lock);
   5.343 @@ -222,87 +239,135 @@ static long event_channel_close(u16 lid)
   5.344  }
   5.345  
   5.346  
   5.347 -static long event_channel_send(u16 lid)
   5.348 +static long event_channel_close(evtchn_close_t *close)
   5.349 +{
   5.350 +    struct task_struct *lp;
   5.351 +    int                 lport = close->local_port;
   5.352 +    long                rc;
   5.353 +    domid_t             ldom = close->local_dom;
   5.354 +
   5.355 +    if ( ldom == DOMID_SELF )
   5.356 +        ldom = current->domain;
   5.357 +    else if ( !IS_PRIV(current) )
   5.358 +        return -EPERM;
   5.359 +
   5.360 +    if ( (lp = find_domain_by_id(ldom)) == NULL )
   5.361 +        return -ESRCH;
   5.362 +
   5.363 +    rc = __event_channel_close(lp, lport);
   5.364 +
   5.365 +    put_task_struct(lp);
   5.366 +    return rc;
   5.367 +}
   5.368 +
   5.369 +
   5.370 +static long event_channel_send(int lport)
   5.371  {
   5.372      struct task_struct *lp = current, *rp;
   5.373 -    u16                 rid, rdom;
   5.374 -    shared_info_t      *rsi;
   5.375 +    int                 rport;
   5.376      unsigned long       cpu_mask;
   5.377  
   5.378      spin_lock(&lp->event_channel_lock);
   5.379  
   5.380 -    if ( unlikely(lid >= lp->max_event_channel) || 
   5.381 -         unlikely(!(lp->event_channel[lid].flags & ECF_CONNECTED)) )
   5.382 +    if ( unlikely(lport < 0) ||
   5.383 +         unlikely(lport >= lp->max_event_channel) || 
   5.384 +         unlikely(lp->event_channel[lport].state != ECS_CONNECTED) )
   5.385      {
   5.386          spin_unlock(&lp->event_channel_lock);
   5.387          return -EINVAL;
   5.388      }
   5.389  
   5.390 -    rdom = lp->event_channel[lid].target_dom;
   5.391 -    rid  = lp->event_channel[lid].flags & ECF_TARGET_ID;
   5.392 +    rp    = lp->event_channel[lport].remote_dom;
   5.393 +    rport = lp->event_channel[lport].remote_port;
   5.394 +
   5.395 +    get_task_struct(rp);
   5.396  
   5.397      spin_unlock(&lp->event_channel_lock);
   5.398  
   5.399 -    if ( unlikely(rid >= MAX_EVENT_CHANNELS) || 
   5.400 -         unlikely ((rp = find_domain_by_id(rdom)) == NULL) )
   5.401 -        return -EINVAL;
   5.402 -
   5.403 -    rsi = rp->shared_info;
   5.404 -    if ( !test_and_set_bit(rid,    &rsi->event_channel_pend[0]) &&
   5.405 -         !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) )
   5.406 -    {
   5.407 -        cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
   5.408 -        guest_event_notify(cpu_mask);
   5.409 -    }
   5.410 +    cpu_mask = set_event_pending(rp, rport);
   5.411 +    guest_event_notify(cpu_mask);
   5.412  
   5.413      put_task_struct(rp);
   5.414 +
   5.415      return 0;
   5.416  }
   5.417  
   5.418  
   5.419 -static long event_channel_status(u16 lid)
   5.420 +static long event_channel_status(evtchn_status_t *status)
   5.421  {
   5.422 -    struct task_struct *lp = current;
   5.423 +    struct task_struct *lp;
   5.424 +    domid_t             ldom = status->local_dom;
   5.425 +    int                 lport = status->local_port;
   5.426      event_channel_t    *lchn;
   5.427 -    long                rc = EVTCHNSTAT_closed;
   5.428 +
   5.429 +    if ( ldom == DOMID_SELF )
   5.430 +        ldom = current->domain;
   5.431 +    else if ( !IS_PRIV(current) )
   5.432 +        return -EPERM;
   5.433 +
   5.434 +    if ( (lp = find_domain_by_id(ldom)) == NULL )
   5.435 +        return -ESRCH;
   5.436  
   5.437      spin_lock(&lp->event_channel_lock);
   5.438  
   5.439      lchn = lp->event_channel;
   5.440  
   5.441 -    if ( lid < lp->max_event_channel )
   5.442 +    if ( (lport < 0) || (lport >= lp->max_event_channel) )
   5.443 +    {
   5.444 +        spin_unlock(&lp->event_channel_lock);
   5.445 +        return -EINVAL;
   5.446 +    }
   5.447 +
   5.448 +    switch ( lchn[lport].state )
   5.449      {
   5.450 -        if ( lchn[lid].flags & ECF_CONNECTED )
   5.451 -            rc = EVTCHNSTAT_connected;        
   5.452 -        else if ( lchn[lid].flags & ECF_INUSE )
   5.453 -            rc = EVTCHNSTAT_disconnected;
   5.454 +    case ECS_FREE:
   5.455 +        status->status = EVTCHNSTAT_closed;
   5.456 +        break;
   5.457 +    case ECS_ZOMBIE:
   5.458 +        status->status = EVTCHNSTAT_disconnected;
   5.459 +        break;
   5.460 +    case ECS_CONNECTED:
   5.461 +        status->status = EVTCHNSTAT_connected;
   5.462 +        status->remote_dom  = lchn[lport].remote_dom->domain;
   5.463 +        status->remote_port = lchn[lport].remote_port;
   5.464 +        break;
   5.465 +    default:
   5.466 +        BUG();
   5.467      }
   5.468  
   5.469      spin_unlock(&lp->event_channel_lock);
   5.470 -    return rc;
   5.471 +    return 0;
   5.472  }
   5.473  
   5.474  
   5.475 -long do_event_channel_op(unsigned int cmd, unsigned int id)
   5.476 +long do_event_channel_op(evtchn_op_t *uop)
   5.477  {
   5.478      long rc;
   5.479 +    evtchn_op_t op;
   5.480  
   5.481 -    switch ( cmd )
   5.482 +    if ( copy_from_user(&op, uop, sizeof(op)) != 0 )
   5.483 +        return -EFAULT;
   5.484 +
   5.485 +    switch ( op.cmd )
   5.486      {
   5.487      case EVTCHNOP_open:
   5.488 -        rc = event_channel_open((u16)id);
   5.489 +        rc = event_channel_open(&op.u.open);
   5.490 +        if ( copy_to_user(uop, &op, sizeof(op)) != 0 )
   5.491 +            rc = -EFAULT; /* Cleaning up here would be a mess! */
   5.492          break;
   5.493  
   5.494      case EVTCHNOP_close:
   5.495 -        rc = event_channel_close((u16)id);
   5.496 +        rc = event_channel_close(&op.u.close);
   5.497          break;
   5.498  
   5.499      case EVTCHNOP_send:
   5.500 -        rc = event_channel_send((u16)id);
   5.501 +        rc = event_channel_send(op.u.send.local_port);
   5.502          break;
   5.503  
   5.504      case EVTCHNOP_status:
   5.505 -        rc = event_channel_status((u16)id);
   5.506 +        rc = event_channel_status(&op.u.status);
   5.507 +        if ( copy_to_user(uop, &op, sizeof(op)) != 0 )
   5.508 +            rc = -EFAULT;
   5.509          break;
   5.510  
   5.511      default:
   5.512 @@ -320,7 +385,7 @@ void destroy_event_channels(struct task_
   5.513      if ( p->event_channel != NULL )
   5.514      {
   5.515          for ( i = 0; i < p->max_event_channel; i++ )
   5.516 -            (void)event_channel_close((u16)i);
   5.517 +            (void)__event_channel_close(p, i);
   5.518          kfree(p->event_channel);
   5.519      }
   5.520  }
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/include/hypervisor-ifs/event_channel.h	Wed Feb 25 18:05:21 2004 +0000
     6.3 @@ -0,0 +1,93 @@
     6.4 +/******************************************************************************
     6.5 + * event_channel.h
     6.6 + * 
     6.7 + * Event channels between domains.
     6.8 + * 
     6.9 + * Copyright (c) 2003-2004, K A Fraser.
    6.10 + */
    6.11 +
    6.12 +#ifndef __HYPERVISOR_IFS__EVENT_CHANNEL_H__
    6.13 +#define __HYPERVISOR_IFS__EVENT_CHANNEL_H__
    6.14 +
    6.15 +/*
    6.16 + * EVTCHNOP_open: Open a communication channel between <local_dom> and
    6.17 + * <remote_dom>.
    6.18 + * NOTES:
    6.19 + *  1. <local_dom> may be specified as DOMID_SELF.
    6.20 + *  2. Only a sufficiently-privileged domain may create an event channel.
    6.21 + *  3. <local_port> and <remote_port> are only supplied if the op succeeds.
    6.22 + */
    6.23 +#define EVTCHNOP_open           0
    6.24 +typedef struct evtchn_open
    6.25 +{
    6.26 +    /* IN parameters. */
    6.27 +    domid_t local_dom, remote_dom;
    6.28 +    /* OUT parameters. */
    6.29 +    int     local_port, remote_port;
    6.30 +} evtchn_open_t;
    6.31 +
    6.32 +/*
    6.33 + * EVTCHNOP_close: Close the communication channel which has an endpoint at
    6.34 + * <local_dom, local_port>.
    6.35 + * NOTES:
    6.36 + *  1. <local_dom> may be specified as DOMID_SELF.
    6.37 + *  2. Only a sufficiently-privileged domain may close an event channel
    6.38 + *     for which <local_dom> is not DOMID_SELF.
    6.39 + */
    6.40 +#define EVTCHNOP_close          1
    6.41 +typedef struct evtchn_close
    6.42 +{
    6.43 +    /* IN parameters. */
    6.44 +    domid_t local_dom;
    6.45 +    int     local_port;
    6.46 +    /* No OUT parameters. */
    6.47 +} evtchn_close_t;
    6.48 +
    6.49 +/*
    6.50 + * EVTCHNOP_send: Send an event to the remote end of the channel whose local
    6.51 + * endpoint is <DOMID_SELF, local_port>.
    6.52 + */
    6.53 +#define EVTCHNOP_send           2
    6.54 +typedef struct evtchn_send
    6.55 +{
    6.56 +    /* IN parameters. */
    6.57 +    int     local_port;
    6.58 +    /* No OUT parameters. */
    6.59 +} evtchn_send_t;
    6.60 +
    6.61 +/*
    6.62 + * EVTCHNOP_status: Get the current status of the communication channel which
    6.63 + * has an endpoint at <local_dom, local_port>.
    6.64 + * NOTES:
    6.65 + *  1. <local_dom> may be specified as DOMID_SELF.
    6.66 + *  2. Only a sufficiently-privileged domain may obtain the status of an event
    6.67 + *     channel for which <local_dom> is not DOMID_SELF.
    6.68 + *  3. <remote_dom, remote_port> is only supplied if status is 'connected'.
    6.69 + */
    6.70 +#define EVTCHNOP_status         3  /* Get status of <channel id>.         */
    6.71 +typedef struct evtchn_status
    6.72 +{
    6.73 +    /* IN parameters */
    6.74 +    domid_t local_dom;
    6.75 +    int     local_port;
    6.76 +    /* OUT parameters */
    6.77 +    domid_t remote_dom;
    6.78 +    int     remote_port;
    6.79 +#define EVTCHNSTAT_closed       0  /* Chennel is not in use.              */
    6.80 +#define EVTCHNSTAT_disconnected 1  /* Channel is not connected to remote. */
    6.81 +#define EVTCHNSTAT_connected    2  /* Channel is connected to remote.     */
    6.82 +    int     status;
    6.83 +} evtchn_status_t;
    6.84 +
    6.85 +typedef struct evtchn_op
    6.86 +{
    6.87 +    int cmd; /* EVTCHNOP_* */
    6.88 +    union {
    6.89 +        evtchn_open_t   open;
    6.90 +        evtchn_close_t  close;
    6.91 +        evtchn_send_t   send;
    6.92 +        evtchn_status_t status;
    6.93 +    } u;
    6.94 +} evtchn_op_t;
    6.95 +
    6.96 +#endif /* __HYPERVISOR_IFS__EVENT_CHANNEL_H__ */
     7.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h	Wed Feb 25 15:36:47 2004 +0000
     7.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h	Wed Feb 25 18:05:21 2004 +0000
     7.3 @@ -168,21 +168,6 @@
     7.4  #define SCHEDOP_exit            3   /* Exit and kill this domain.        */
     7.5  #define SCHEDOP_stop            4   /* Stop executing this domain.       */
     7.6  
     7.7 -/*
     7.8 - * EVTCHNOP_* - Event channel operations.
     7.9 - */
    7.10 -#define EVTCHNOP_open           0  /* Open channel to <target domain>.    */
    7.11 -#define EVTCHNOP_close          1  /* Close <channel id>.                 */
    7.12 -#define EVTCHNOP_send           2  /* Send event on <channel id>.         */
    7.13 -#define EVTCHNOP_status         3  /* Get status of <channel id>.         */
    7.14 -
    7.15 -/*
    7.16 - * EVTCHNSTAT_* - Non-error return values from EVTCHNOP_status.
    7.17 - */
    7.18 -#define EVTCHNSTAT_closed       0  /* Chennel is not in use.              */
    7.19 -#define EVTCHNSTAT_disconnected 1  /* Channel is not connected to remote. */
    7.20 -#define EVTCHNSTAT_connected    2  /* Channel is connected to remote.     */
    7.21 -
    7.22  
    7.23  #ifndef __ASSEMBLY__
    7.24  
     8.1 --- a/xen/include/xeno/sched.h	Wed Feb 25 15:36:47 2004 +0000
     8.2 +++ b/xen/include/xeno/sched.h	Wed Feb 25 18:05:21 2004 +0000
     8.3 @@ -45,13 +45,16 @@ extern struct mm_struct init_mm;
     8.4  
     8.5  #define IS_PRIV(_p) (test_bit(PF_PRIVILEGED, &(_p)->flags))
     8.6  
     8.7 +struct task_struct;
     8.8 +
     8.9  typedef struct event_channel_st
    8.10  {
    8.11 -    u16 target_dom; /* Target domain (i.e. domain at remote end). */
    8.12 -#define ECF_TARGET_ID ((1<<10)-1) /* Channel identifier at remote end.    */
    8.13 -#define ECF_INUSE     (1<<10)     /* Is this channel descriptor in use?   */
    8.14 -#define ECF_CONNECTED (1<<11)     /* Is this channel connected to remote? */
    8.15 -    u16 flags;
    8.16 +    struct task_struct *remote_dom;
    8.17 +    u16                 remote_port;
    8.18 +#define ECS_FREE      0 /* Available for use.                            */
    8.19 +#define ECS_ZOMBIE    1 /* Connection is closed. Remote is disconnected. */
    8.20 +#define ECS_CONNECTED 2 /* Connected to remote end.                      */
    8.21 +    u16                 state;
    8.22  } event_channel_t;
    8.23  
    8.24  struct task_struct