ia64/xen-unstable

changeset 955:5a9d5824dcb7

bitkeeper revision 1.610 (3fba5ba4BXSYNJAB1zZyNEpKGUOX1A)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xeno
author kaf24@scramble.cl.cam.ac.uk
date Tue Nov 18 17:49:24 2003 +0000 (2003-11-18)
parents 279e288dc4f0 70b42e624c5c
children 45296ed1d50d
files .rootkeys xen/arch/i386/entry.S xen/common/domain.c xen/common/event_channel.c xen/include/hypervisor-ifs/hypervisor-if.h xen/include/xeno/event.h xen/include/xeno/mm.h xen/include/xeno/sched.h
line diff
     1.1 --- a/.rootkeys	Tue Nov 18 14:42:49 2003 +0000
     1.2 +++ b/.rootkeys	Tue Nov 18 17:49:24 2003 +0000
     1.3 @@ -247,6 +247,7 @@ 3e6377e4i0c9GtKN65e99OtRbw3AZw xen/commo
     1.4  3ddb79bdYO5D8Av12NHqPeSviav7cg xen/common/domain.c
     1.5  3e32af9aRnYGl4GMOaDKp7JdfhOGhg xen/common/domain_page.c
     1.6  3ddb79bdeyutmaXEfpQvvxj7eQ0fCw xen/common/event.c
     1.7 +3fba5b96H0khoxNiKbjdi0inpXV-Pw xen/common/event_channel.c
     1.8  3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen/common/kernel.c
     1.9  3e4cd9d8LAAghUY0hNIK72uc2ch_Nw xen/common/keyhandler.c
    1.10  3ddb79bduhSEZI8xa7IbGQCpap5y2A xen/common/lib.c
     2.1 --- a/xen/arch/i386/entry.S	Tue Nov 18 14:42:49 2003 +0000
     2.2 +++ b/xen/arch/i386/entry.S	Tue Nov 18 17:49:24 2003 +0000
     2.3 @@ -725,6 +725,7 @@ ENTRY(hypervisor_call_table)
     2.4          .long SYMBOL_NAME(do_multicall)
     2.5          .long SYMBOL_NAME(do_kbd_op)
     2.6          .long SYMBOL_NAME(do_update_va_mapping)
     2.7 +        .long SYMBOL_NAME(do_event_channel_op)
     2.8          .rept NR_syscalls-((.-hypervisor_call_table)/4)
     2.9          .long SYMBOL_NAME(sys_ni_syscall)
    2.10 -	.endr
    2.11 +        .endr
     3.1 --- a/xen/common/domain.c	Tue Nov 18 14:42:49 2003 +0000
     3.2 +++ b/xen/common/domain.c	Tue Nov 18 17:49:24 2003 +0000
     3.3 @@ -52,6 +52,7 @@ struct task_struct *do_createdomain(unsi
     3.4  
     3.5      spin_lock_init(&p->blk_ring_lock);
     3.6      spin_lock_init(&p->page_lock);
     3.7 +    spin_lock_init(&p->event_channel_lock);
     3.8  
     3.9      p->shared_info = (void *)get_free_page(GFP_KERNEL);
    3.10      memset(p->shared_info, 0, PAGE_SIZE);
    3.11 @@ -288,6 +289,8 @@ void free_all_dom_mem(struct task_struct
    3.12  /* Release resources belonging to task @p. */
    3.13  void release_task(struct task_struct *p)
    3.14  {
    3.15 +    extern void destroy_event_channels(struct task_struct *);
    3.16 +
    3.17      ASSERT(p->state == TASK_DYING);
    3.18      ASSERT(!p->has_cpu);
    3.19  
    3.20 @@ -300,6 +303,7 @@ void release_task(struct task_struct *p)
    3.21      destroy_blkdev_info(p);
    3.22  
    3.23      /* Free all memory associated with this domain. */
    3.24 +    destroy_event_channels(p);
    3.25      free_page((unsigned long)p->mm.perdomain_pt);
    3.26      UNSHARE_PFN(virt_to_page(p->shared_info));
    3.27      free_page((unsigned long)p->shared_info);
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/common/event_channel.c	Tue Nov 18 17:49:24 2003 +0000
     4.3 @@ -0,0 +1,338 @@
     4.4 +/******************************************************************************
     4.5 + * event_channel.c
     4.6 + * 
     4.7 + * Event channels between domains.
     4.8 + * 
     4.9 + * Copyright (c) 2003, K A Fraser.
    4.10 + * 
    4.11 + * This program is distributed in the hope that it will be useful,
    4.12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    4.13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    4.14 + * GNU General Public License for more details.
    4.15 + * 
    4.16 + * You should have received a copy of the GNU General Public License
    4.17 + * along with this program; if not, write to the Free Software
    4.18 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    4.19 + */
    4.20 +
    4.21 +#include <xeno/config.h>
    4.22 +#include <xeno/init.h>
    4.23 +#include <xeno/lib.h>
    4.24 +#include <xeno/errno.h>
    4.25 +#include <xeno/sched.h>
    4.26 +#include <xeno/event.h>
    4.27 +
    4.28 +
    4.29 +static long event_channel_open(u16 target_dom)
    4.30 +{
    4.31 +    struct task_struct *lp = current, *rp;
    4.32 +    int                 i, lmax, rmax, lid, rid;
    4.33 +    event_channel_t    *lchn, *rchn;
    4.34 +    shared_info_t      *rsi;
    4.35 +    unsigned long       cpu_mask;
    4.36 +    long                rc = 0;
    4.37 +
    4.38 +    rp = find_domain_by_id(target_dom);
    4.39 +
    4.40 +    /*
    4.41 +     * We need locks at both ends to make a connection. We avoid deadlock
    4.42 +     * by acquiring the locks in address order.
    4.43 +     */
    4.44 +    if ( (unsigned long)lp < (unsigned long)rp )
    4.45 +    {
    4.46 +        spin_lock(&lp->event_channel_lock);
    4.47 +        spin_lock(&rp->event_channel_lock);
    4.48 +    }
    4.49 +    else
    4.50 +    {
    4.51 +        if ( likely(rp != NULL) )
    4.52 +            spin_lock(&rp->event_channel_lock);
    4.53 +        spin_lock(&lp->event_channel_lock);
    4.54 +    }
    4.55 +
    4.56 +    lmax = lp->max_event_channel;
    4.57 +    lchn = lp->event_channel;
    4.58 +    lid  = -1;
    4.59 +
    4.60 +    /*
    4.61 +     * Find the first unused event channel. Also ensure bo channel already
    4.62 +     * exists to the specified target domain.
    4.63 +     */
    4.64 +    for ( i = 0; i < lmax; i++ )
    4.65 +    {
    4.66 +        if ( (lid == -1) && !(lchn[i].flags & ECF_INUSE) )
    4.67 +        {
    4.68 +            lid = i;
    4.69 +        }
    4.70 +        else if ( unlikely(lchn[i].target_dom == target_dom) )
    4.71 +        {
    4.72 +            rc = -EEXIST;
    4.73 +            goto out;
    4.74 +        }
    4.75 +    }
    4.76 +    
    4.77 +    /* If there is no free slot we need to allocate a bigger channel list. */
    4.78 +    if ( unlikely(lid == -1) )
    4.79 +    {
    4.80 +        /* Reached maximum channel count? */
    4.81 +        if ( unlikely(lmax == 1024) )
    4.82 +        {
    4.83 +            rc = -ENOSPC;
    4.84 +            goto out;
    4.85 +        }
    4.86 +        
    4.87 +        lmax = (lmax == 0) ? 4 : (lmax * 2);
    4.88 +        
    4.89 +        lchn = kmalloc(lmax * sizeof(event_channel_t), GFP_KERNEL);
    4.90 +        if ( unlikely(lchn == NULL) )
    4.91 +        {
    4.92 +            rc = -ENOMEM;
    4.93 +            goto out;
    4.94 +        }
    4.95 +
    4.96 +        memset(lchn, 0, lmax * sizeof(event_channel_t));
    4.97 +        
    4.98 +        if ( likely(lp->event_channel != NULL) )
    4.99 +            kfree(lp->event_channel);
   4.100 +
   4.101 +        lp->event_channel     = lchn;
   4.102 +        lp->max_event_channel = lmax;
   4.103 +    }
   4.104 +
   4.105 +    lchn[lid].target_dom = target_dom;
   4.106 +    lchn[lid].flags      = ECF_INUSE;
   4.107 +
   4.108 +    if ( likely(rp != NULL) )
   4.109 +    {
   4.110 +        rchn = rp->event_channel;
   4.111 +        rmax = rp->max_event_channel;
   4.112 +        
   4.113 +        for ( rid = 0; rid < rmax; rid++ )
   4.114 +        {
   4.115 +            if ( (rchn[rid].target_dom == lp->domain) &&
   4.116 +                 (rchn[rid].flags & ECF_INUSE) )
   4.117 +            {
   4.118 +                /*
   4.119 +                 * The target was awaiting a connection. We make the connection
   4.120 +                 * and send a connection-made event to the remote end.
   4.121 +                 */
   4.122 +                rchn[rid].flags = ECF_INUSE | ECF_CONNECTED | lid;
   4.123 +                lchn[lid].flags = ECF_INUSE | ECF_CONNECTED | rid;
   4.124 +
   4.125 +                rsi = rp->shared_info;
   4.126 +                if ( !test_and_set_bit(rid,    &rsi->event_channel_pend[0]) &&
   4.127 +                     !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) )
   4.128 +                {
   4.129 +                    cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
   4.130 +                    guest_event_notify(cpu_mask);
   4.131 +                }
   4.132 +
   4.133 +                break;
   4.134 +            }
   4.135 +        }
   4.136 +    }
   4.137 +    
   4.138 + out:
   4.139 +    spin_unlock(&lp->event_channel_lock);
   4.140 +    if ( rp != NULL )
   4.141 +    {
   4.142 +        spin_unlock(&rp->event_channel_lock);
   4.143 +        put_task_struct(rp);
   4.144 +    }
   4.145 +
   4.146 +    return rc;
   4.147 +}
   4.148 +
   4.149 +
   4.150 +static long event_channel_close(u16 lid)
   4.151 +{
   4.152 +    struct task_struct *lp = current, *rp = NULL;
   4.153 +    event_channel_t    *lchn, *rchn;
   4.154 +    u16                 rid;
   4.155 +    shared_info_t      *rsi;
   4.156 +    unsigned long       cpu_mask;
   4.157 +    long                rc = 0;
   4.158 +
   4.159 + again:
   4.160 +    spin_lock(&lp->event_channel_lock);
   4.161 +
   4.162 +    lchn = lp->event_channel;
   4.163 +
   4.164 +    if ( unlikely(lid >= lp->max_event_channel) || 
   4.165 +         unlikely(!(lchn[lid].flags & ECF_INUSE)) )
   4.166 +    {
   4.167 +        rc = -EINVAL;
   4.168 +        goto out;
   4.169 +    }
   4.170 +
   4.171 +    if ( lchn[lid].flags & ECF_CONNECTED )
   4.172 +    {
   4.173 +        if ( rp == NULL )
   4.174 +        {
   4.175 +            rp = find_domain_by_id(lchn[lid].target_dom);
   4.176 +            ASSERT(rp != NULL);
   4.177 +            
   4.178 +            if ( (unsigned long)lp < (unsigned long)rp )
   4.179 +            {
   4.180 +                spin_lock(&rp->event_channel_lock);
   4.181 +            }
   4.182 +            else
   4.183 +            {
   4.184 +                spin_unlock(&lp->event_channel_lock);
   4.185 +                spin_lock(&rp->event_channel_lock);
   4.186 +                goto again;
   4.187 +            }
   4.188 +        }
   4.189 +        else if ( rp->domain != lchn[lid].target_dom )
   4.190 +        {
   4.191 +            rc = -EINVAL;
   4.192 +            goto out;
   4.193 +        }
   4.194 +        
   4.195 +        rchn = rp->event_channel;
   4.196 +        rid  = lchn[lid].flags & ECF_TARGET_ID;
   4.197 +        ASSERT(rid < rp->max_event_channel);
   4.198 +        ASSERT(rchn[rid].flags == (ECF_INUSE | ECF_CONNECTED | lid));
   4.199 +        ASSERT(rchn[rid].target_dom == lp->domain);
   4.200 +
   4.201 +        rchn[rid].flags = ECF_INUSE;
   4.202 +
   4.203 +        rsi = rp->shared_info;
   4.204 +        if ( !test_and_set_bit(rid,    &rsi->event_channel_disc[0]) &&
   4.205 +             !test_and_set_bit(rid>>5, &rsi->event_channel_disc_sel) )
   4.206 +        {
   4.207 +            cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
   4.208 +            guest_event_notify(cpu_mask);
   4.209 +        }
   4.210 +    }
   4.211 +
   4.212 +    lchn[lid].target_dom = 0;
   4.213 +    lchn[lid].flags      = 0;
   4.214 +    
   4.215 + out:
   4.216 +    spin_unlock(&lp->event_channel_lock);
   4.217 +    if ( rp != NULL )
   4.218 +    {
   4.219 +        spin_unlock(&rp->event_channel_lock);
   4.220 +        put_task_struct(rp);
   4.221 +    }
   4.222 +    
   4.223 +    return rc;
   4.224 +}
   4.225 +
   4.226 +
   4.227 +static long event_channel_send(u16 lid)
   4.228 +{
   4.229 +    struct task_struct *lp = current, *rp;
   4.230 +    event_channel_t    *lchn, *rchn;
   4.231 +    u16                 rid;
   4.232 +    shared_info_t      *rsi;
   4.233 +    unsigned long       cpu_mask;
   4.234 +
   4.235 +    spin_lock(&lp->event_channel_lock);
   4.236 +
   4.237 +    lchn = lp->event_channel;
   4.238 +
   4.239 +    if ( unlikely(lid >= lp->max_event_channel) || 
   4.240 +         unlikely((lchn[lid].flags & (ECF_INUSE|ECF_CONNECTED)) !=
   4.241 +                  (ECF_INUSE|ECF_CONNECTED)) )
   4.242 +    {
   4.243 +        spin_unlock(&lp->event_channel_lock);
   4.244 +        return -EINVAL;
   4.245 +    }
   4.246 +
   4.247 +    rid  = lchn[lid].flags & ECF_TARGET_ID;
   4.248 +    rp   = find_domain_by_id(lchn[lid].target_dom);
   4.249 +    ASSERT(rp != NULL);
   4.250 +
   4.251 +    spin_unlock(&lp->event_channel_lock);
   4.252 +
   4.253 +    spin_lock(&rp->event_channel_lock);
   4.254 +
   4.255 +    rchn = rp->event_channel;
   4.256 +
   4.257 +    if ( unlikely(rid >= rp->max_event_channel) )
   4.258 +    {
   4.259 +        spin_unlock(&rp->event_channel_lock);
   4.260 +        put_task_struct(rp);
   4.261 +        return -EINVAL;
   4.262 +    }
   4.263 +
   4.264 +    rsi = rp->shared_info;
   4.265 +    if ( !test_and_set_bit(rid,    &rsi->event_channel_pend[0]) &&
   4.266 +         !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) )
   4.267 +    {
   4.268 +        cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
   4.269 +        guest_event_notify(cpu_mask);
   4.270 +    }
   4.271 +
   4.272 +    spin_unlock(&rp->event_channel_lock);
   4.273 +    put_task_struct(rp);
   4.274 +    return 0;
   4.275 +}
   4.276 +
   4.277 +
   4.278 +static long event_channel_status(u16 lid)
   4.279 +{
   4.280 +    struct task_struct *lp = current;
   4.281 +    event_channel_t    *lchn;
   4.282 +    long                rc = EVTCHNSTAT_closed;
   4.283 +
   4.284 +    spin_lock(&lp->event_channel_lock);
   4.285 +
   4.286 +    lchn = lp->event_channel;
   4.287 +
   4.288 +    if ( lid < lp->max_event_channel )
   4.289 +    {
   4.290 +        if ( (lchn[lid].flags & (ECF_INUSE|ECF_CONNECTED)) == ECF_INUSE )
   4.291 +            rc = EVTCHNSTAT_connected;        
   4.292 +        else if ( lchn[lid].flags & ECF_INUSE )
   4.293 +            rc = EVTCHNSTAT_disconnected;
   4.294 +    }
   4.295 +
   4.296 +    spin_unlock(&lp->event_channel_lock);
   4.297 +    return rc;
   4.298 +}
   4.299 +
   4.300 +
   4.301 +long do_event_channel_op(unsigned int cmd, unsigned int id)
   4.302 +{
   4.303 +    long rc;
   4.304 +
   4.305 +    switch ( cmd )
   4.306 +    {
   4.307 +    case EVTCHNOP_open:
   4.308 +        rc = event_channel_open((u16)id);
   4.309 +        break;
   4.310 +
   4.311 +    case EVTCHNOP_close:
   4.312 +        rc = event_channel_close((u16)id);
   4.313 +        break;
   4.314 +
   4.315 +    case EVTCHNOP_send:
   4.316 +        rc = event_channel_send((u16)id);
   4.317 +        break;
   4.318 +
   4.319 +    case EVTCHNOP_status:
   4.320 +        rc = event_channel_status((u16)id);
   4.321 +        break;
   4.322 +
   4.323 +    default:
   4.324 +        rc = -ENOSYS;
   4.325 +        break;
   4.326 +    }
   4.327 +
   4.328 +    return rc;
   4.329 +}
   4.330 +
   4.331 +
   4.332 +void destroy_event_channels(struct task_struct *p)
   4.333 +{
   4.334 +    int i;
   4.335 +    if ( p->event_channel != NULL )
   4.336 +    {
   4.337 +        for ( i = 0; i < p->max_event_channel; i++ )
   4.338 +            (void)event_channel_close((u16)i);
   4.339 +        kfree(p->event_channel);
   4.340 +    }
   4.341 +}
     5.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h	Tue Nov 18 14:42:49 2003 +0000
     5.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h	Tue Nov 18 17:49:24 2003 +0000
     5.3 @@ -60,6 +60,7 @@
     5.4  #define __HYPERVISOR_multicall            17
     5.5  #define __HYPERVISOR_kbd_op               18
     5.6  #define __HYPERVISOR_update_va_mapping    19
     5.7 +#define __HYPERVISOR_event_channel_op     20
     5.8  
     5.9  /* And the trap vector is... */
    5.10  #define TRAP_INSTR "int $0x82"
    5.11 @@ -91,6 +92,7 @@
    5.12  #define EVENT_NET      0x10 /* There are packets for transmission. */
    5.13  #define EVENT_PS2      0x20 /* PS/2 keyboard or mouse event(s) */
    5.14  #define EVENT_STOP     0x40 /* Prepare for stopping and possible pickling */
    5.15 +#define EVENT_EVTCHN   0x80 /* Event pending on an event channel */
    5.16  
    5.17  /* Bit offsets, as opposed to the above masks. */
    5.18  #define _EVENT_BLKDEV   0
    5.19 @@ -100,6 +102,7 @@
    5.20  #define _EVENT_NET      4
    5.21  #define _EVENT_PS2      5
    5.22  #define _EVENT_STOP     6
    5.23 +#define _EVENT_EVTCHN   7
    5.24  
    5.25  /*
    5.26   * Virtual addresses beyond this are not modifiable by guest OSes. The 
    5.27 @@ -160,7 +163,21 @@
    5.28  #define SCHEDOP_exit            1
    5.29  #define SCHEDOP_stop            2
    5.30  
    5.31 - 
    5.32 +/*
    5.33 + * EVTCHNOP_* - Event channel operations.
    5.34 + */
    5.35 +#define EVTCHNOP_open           0  /* Open channel to <target domain>.    */
    5.36 +#define EVTCHNOP_close          1  /* Close <channel id>.                 */
    5.37 +#define EVTCHNOP_send           2  /* Send event on <channel id>.         */
    5.38 +#define EVTCHNOP_status         3  /* Get status of <channel id>.         */
    5.39 +
    5.40 +/*
    5.41 + * EVTCHNSTAT_* - Non-error return values from EVTCHNOP_status.
    5.42 + */
    5.43 +#define EVTCHNSTAT_closed       0  /* Chennel is not in use.              */
    5.44 +#define EVTCHNSTAT_disconnected 1  /* Channel is not connected to remote. */
    5.45 +#define EVTCHNSTAT_connected    2  /* Channel is connected to remote.     */
    5.46 +
    5.47  
    5.48  #ifndef __ASSEMBLY__
    5.49  
    5.50 @@ -238,6 +255,30 @@ typedef struct shared_info_st {
    5.51      unsigned long events_mask;
    5.52  
    5.53      /*
    5.54 +     * A domain can have up to 1024 bidirectional event channels to/from other
    5.55 +     * domains. Domains must agree out-of-band to set up a connection, and then
    5.56 +     * each must explicitly request a connection to the other. When both have
    5.57 +     * made the request the channel is fully allocated and set up.
    5.58 +     * 
    5.59 +     * An event channel is a single sticky 'bit' of information. Setting the
    5.60 +     * sticky bit also causes an upcall into the target domain. In this way
    5.61 +     * events can be seen as an IPI [Inter-Process(or) Interrupt].
    5.62 +     * 
    5.63 +     * A guest can see which of its event channels are pending by reading the
    5.64 +     * 'event_channel_pend' bitfield. To avoid a linear scan of the entire
    5.65 +     * bitfield there is a 'selector' which indicates which words in the
    5.66 +     * bitfield contain at least one set bit.
    5.67 +     * 
    5.68 +     * There is a similar bitfield to indicate which event channels have been
    5.69 +     * disconnected by the remote end. There is also a 'selector' for this
    5.70 +     * field.
    5.71 +     */
    5.72 +    u32 event_channel_pend[32];
    5.73 +    u32 event_channel_pend_sel;
    5.74 +    u32 event_channel_disc[32];
    5.75 +    u32 event_channel_disc_sel;
    5.76 +
    5.77 +    /*
    5.78       * Time: The following abstractions are exposed: System Time, Clock Time,
    5.79       * Domain Virtual Time. Domains can access Cycle counter time directly.
    5.80       */
     6.1 --- a/xen/include/xeno/event.h	Tue Nov 18 14:42:49 2003 +0000
     6.2 +++ b/xen/include/xeno/event.h	Tue Nov 18 17:49:24 2003 +0000
     6.3 @@ -28,7 +28,8 @@
     6.4   */
     6.5  static inline unsigned long mark_guest_event(struct task_struct *p, int event)
     6.6  {
     6.7 -    set_bit(event, &p->shared_info->events);
     6.8 +    if ( test_and_set_bit(event, &p->shared_info->events) )
     6.9 +        return 0;
    6.10  
    6.11      /*
    6.12       * No need for the runqueue_lock! The check below does not race
    6.13 @@ -46,7 +47,8 @@ static inline unsigned long mark_guest_e
    6.14  /* As above, but hyp_events are handled within the hypervisor. */
    6.15  static inline unsigned long mark_hyp_event(struct task_struct *p, int event)
    6.16  {
    6.17 -    set_bit(event, &p->hyp_events);
    6.18 +    if ( test_and_set_bit(event, &p->shared_info->events) )
    6.19 +        return 0;
    6.20      smp_mb();
    6.21      if ( p->state == TASK_INTERRUPTIBLE ) wake_up(p);
    6.22      reschedule(p);
    6.23 @@ -64,17 +66,21 @@ static inline void guest_event_notify(un
    6.24  
    6.25  static inline unsigned long mark_guest_event(struct task_struct *p, int event)
    6.26  {
    6.27 -    set_bit(event, &p->shared_info->events);
    6.28 -    if ( p->state == TASK_INTERRUPTIBLE ) wake_up(p);
    6.29 -    reschedule(p);
    6.30 +    if ( !test_and_set_bit(event, &p->shared_info->events) )
    6.31 +    {
    6.32 +        if ( p->state == TASK_INTERRUPTIBLE ) wake_up(p);
    6.33 +        reschedule(p);
    6.34 +    }
    6.35      return 0;
    6.36  }
    6.37  
    6.38  static inline unsigned long mark_hyp_event(struct task_struct *p, int event)
    6.39  {
    6.40 -    set_bit(event, &p->hyp_events);
    6.41 -    if ( p->state == TASK_INTERRUPTIBLE ) wake_up(p);
    6.42 -    reschedule(p);
    6.43 +    if ( !test_and_set_bit(event, &p->hyp_events) )
    6.44 +    {
    6.45 +        if ( p->state == TASK_INTERRUPTIBLE ) wake_up(p);
    6.46 +        reschedule(p);
    6.47 +    }
    6.48      return 0;
    6.49  }
    6.50  
     7.1 --- a/xen/include/xeno/mm.h	Tue Nov 18 14:42:49 2003 +0000
     7.2 +++ b/xen/include/xeno/mm.h	Tue Nov 18 17:49:24 2003 +0000
     7.3 @@ -78,7 +78,7 @@ typedef struct pfn_info {
     7.4  #define page_type_count(p)	 ((p)->type_count)
     7.5  #define set_page_type_count(p,v) ((p)->type_count = v)
     7.6  
     7.7 -#define PG_domain_mask 0x00ffffff /* owning domain (24 bits) */
     7.8 +#define PG_domain_mask MAX_DOMAIN_ID /* owning domain (16 bits) */
     7.9  /* hypervisor flags (domain == 0) */
    7.10  #define PG_slab	       24
    7.11  /* domain flags (domain != 0) */
     8.1 --- a/xen/include/xeno/sched.h	Tue Nov 18 14:42:49 2003 +0000
     8.2 +++ b/xen/include/xeno/sched.h	Tue Nov 18 17:49:24 2003 +0000
     8.3 @@ -43,6 +43,18 @@ extern struct mm_struct init_mm;
     8.4  /* SMH: replace below when have explicit 'priv' flag or bitmask */
     8.5  #define IS_PRIV(_p) ((_p)->domain == 0) 
     8.6  
     8.7 +#define DOMAIN_ID_BITS (16)
     8.8 +#define MAX_DOMAIN_ID  ((1<<(DOMAIN_ID_BITS))-1)
     8.9 +
    8.10 +typedef struct event_channel_st
    8.11 +{
    8.12 +    u16 target_dom; /* Target domain (i.e. domain at remote end). */
    8.13 +#define ECF_TARGET_ID ((1<<10)-1) /* Channel identifier at remote end.    */
    8.14 +#define ECF_INUSE     (1<<10)     /* Is this channel descriptor in use?   */
    8.15 +#define ECF_CONNECTED (1<<11)     /* Is this channel connected to remote? */
    8.16 +    u16 flags;
    8.17 +} event_channel_t;
    8.18 +
    8.19  struct task_struct 
    8.20  {
    8.21      /*
    8.22 @@ -129,6 +141,11 @@ struct task_struct
    8.23      struct thread_struct thread;
    8.24      struct task_struct *prev_task, *next_task, *next_hash;
    8.25      
    8.26 +    /* Event channel information. */
    8.27 +    event_channel_t *event_channel;
    8.28 +    unsigned int     max_event_channel;
    8.29 +    spinlock_t       event_channel_lock;
    8.30 +
    8.31      unsigned long flags;
    8.32  
    8.33      atomic_t refcnt;