ia64/xen-unstable

annotate xen/common/event_channel.c @ 956:45296ed1d50d

bitkeeper revision 1.611 (3fbb410b63xMuUN6Q1y1PYhhAi94Lw)

event_channel.c:
Cleanups for event channels.
author kaf24@scramble.cl.cam.ac.uk
date Wed Nov 19 10:08:11 2003 +0000 (2003-11-19)
parents 5a9d5824dcb7
children 9ed81ff882d4 74915dc117d5
rev   line source
kaf24@954 1 /******************************************************************************
kaf24@954 2 * event_channel.c
kaf24@954 3 *
kaf24@954 4 * Event channels between domains.
kaf24@954 5 *
kaf24@954 6 * Copyright (c) 2003, K A Fraser.
kaf24@954 7 *
kaf24@954 8 * This program is distributed in the hope that it will be useful,
kaf24@954 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
kaf24@954 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
kaf24@954 11 * GNU General Public License for more details.
kaf24@954 12 *
kaf24@954 13 * You should have received a copy of the GNU General Public License
kaf24@954 14 * along with this program; if not, write to the Free Software
kaf24@954 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
kaf24@954 16 */
kaf24@954 17
kaf24@954 18 #include <xeno/config.h>
kaf24@954 19 #include <xeno/init.h>
kaf24@954 20 #include <xeno/lib.h>
kaf24@954 21 #include <xeno/errno.h>
kaf24@954 22 #include <xeno/sched.h>
kaf24@954 23 #include <xeno/event.h>
kaf24@954 24
kaf24@956 25 #define MAX_EVENT_CHANNELS 1024
kaf24@954 26
kaf24@954 27 static long event_channel_open(u16 target_dom)
kaf24@954 28 {
kaf24@954 29 struct task_struct *lp = current, *rp;
kaf24@954 30 int i, lmax, rmax, lid, rid;
kaf24@954 31 event_channel_t *lchn, *rchn;
kaf24@954 32 shared_info_t *rsi;
kaf24@954 33 unsigned long cpu_mask;
kaf24@954 34 long rc = 0;
kaf24@954 35
kaf24@954 36 rp = find_domain_by_id(target_dom);
kaf24@954 37
kaf24@954 38 /*
kaf24@954 39 * We need locks at both ends to make a connection. We avoid deadlock
kaf24@954 40 * by acquiring the locks in address order.
kaf24@954 41 */
kaf24@954 42 if ( (unsigned long)lp < (unsigned long)rp )
kaf24@954 43 {
kaf24@954 44 spin_lock(&lp->event_channel_lock);
kaf24@954 45 spin_lock(&rp->event_channel_lock);
kaf24@954 46 }
kaf24@954 47 else
kaf24@954 48 {
kaf24@954 49 if ( likely(rp != NULL) )
kaf24@954 50 spin_lock(&rp->event_channel_lock);
kaf24@954 51 spin_lock(&lp->event_channel_lock);
kaf24@954 52 }
kaf24@954 53
kaf24@954 54 lmax = lp->max_event_channel;
kaf24@954 55 lchn = lp->event_channel;
kaf24@954 56 lid = -1;
kaf24@954 57
kaf24@954 58 /*
kaf24@954 59 * Find the first unused event channel. Also ensure bo channel already
kaf24@954 60 * exists to the specified target domain.
kaf24@954 61 */
kaf24@954 62 for ( i = 0; i < lmax; i++ )
kaf24@954 63 {
kaf24@954 64 if ( (lid == -1) && !(lchn[i].flags & ECF_INUSE) )
kaf24@954 65 {
kaf24@954 66 lid = i;
kaf24@954 67 }
kaf24@954 68 else if ( unlikely(lchn[i].target_dom == target_dom) )
kaf24@954 69 {
kaf24@954 70 rc = -EEXIST;
kaf24@954 71 goto out;
kaf24@954 72 }
kaf24@954 73 }
kaf24@954 74
kaf24@954 75 /* If there is no free slot we need to allocate a bigger channel list. */
kaf24@954 76 if ( unlikely(lid == -1) )
kaf24@954 77 {
kaf24@954 78 /* Reached maximum channel count? */
kaf24@956 79 if ( unlikely(lmax == MAX_EVENT_CHANNELS) )
kaf24@954 80 {
kaf24@954 81 rc = -ENOSPC;
kaf24@954 82 goto out;
kaf24@954 83 }
kaf24@954 84
kaf24@954 85 lmax = (lmax == 0) ? 4 : (lmax * 2);
kaf24@954 86
kaf24@954 87 lchn = kmalloc(lmax * sizeof(event_channel_t), GFP_KERNEL);
kaf24@954 88 if ( unlikely(lchn == NULL) )
kaf24@954 89 {
kaf24@954 90 rc = -ENOMEM;
kaf24@954 91 goto out;
kaf24@954 92 }
kaf24@954 93
kaf24@954 94 memset(lchn, 0, lmax * sizeof(event_channel_t));
kaf24@954 95
kaf24@954 96 if ( likely(lp->event_channel != NULL) )
kaf24@954 97 kfree(lp->event_channel);
kaf24@954 98
kaf24@954 99 lp->event_channel = lchn;
kaf24@954 100 lp->max_event_channel = lmax;
kaf24@954 101 }
kaf24@954 102
kaf24@954 103 lchn[lid].target_dom = target_dom;
kaf24@954 104 lchn[lid].flags = ECF_INUSE;
kaf24@954 105
kaf24@954 106 if ( likely(rp != NULL) )
kaf24@954 107 {
kaf24@954 108 rchn = rp->event_channel;
kaf24@954 109 rmax = rp->max_event_channel;
kaf24@954 110
kaf24@954 111 for ( rid = 0; rid < rmax; rid++ )
kaf24@954 112 {
kaf24@954 113 if ( (rchn[rid].target_dom == lp->domain) &&
kaf24@954 114 (rchn[rid].flags & ECF_INUSE) )
kaf24@954 115 {
kaf24@954 116 /*
kaf24@954 117 * The target was awaiting a connection. We make the connection
kaf24@954 118 * and send a connection-made event to the remote end.
kaf24@954 119 */
kaf24@954 120 rchn[rid].flags = ECF_INUSE | ECF_CONNECTED | lid;
kaf24@954 121 lchn[lid].flags = ECF_INUSE | ECF_CONNECTED | rid;
kaf24@954 122
kaf24@954 123 rsi = rp->shared_info;
kaf24@954 124 if ( !test_and_set_bit(rid, &rsi->event_channel_pend[0]) &&
kaf24@954 125 !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) )
kaf24@954 126 {
kaf24@954 127 cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
kaf24@954 128 guest_event_notify(cpu_mask);
kaf24@954 129 }
kaf24@954 130
kaf24@954 131 break;
kaf24@954 132 }
kaf24@954 133 }
kaf24@954 134 }
kaf24@954 135
kaf24@954 136 out:
kaf24@954 137 spin_unlock(&lp->event_channel_lock);
kaf24@954 138 if ( rp != NULL )
kaf24@954 139 {
kaf24@954 140 spin_unlock(&rp->event_channel_lock);
kaf24@954 141 put_task_struct(rp);
kaf24@954 142 }
kaf24@954 143
kaf24@954 144 return rc;
kaf24@954 145 }
kaf24@954 146
kaf24@954 147
kaf24@954 148 static long event_channel_close(u16 lid)
kaf24@954 149 {
kaf24@954 150 struct task_struct *lp = current, *rp = NULL;
kaf24@954 151 event_channel_t *lchn, *rchn;
kaf24@954 152 u16 rid;
kaf24@954 153 shared_info_t *rsi;
kaf24@954 154 unsigned long cpu_mask;
kaf24@954 155 long rc = 0;
kaf24@954 156
kaf24@954 157 again:
kaf24@954 158 spin_lock(&lp->event_channel_lock);
kaf24@954 159
kaf24@954 160 lchn = lp->event_channel;
kaf24@954 161
kaf24@954 162 if ( unlikely(lid >= lp->max_event_channel) ||
kaf24@954 163 unlikely(!(lchn[lid].flags & ECF_INUSE)) )
kaf24@954 164 {
kaf24@954 165 rc = -EINVAL;
kaf24@954 166 goto out;
kaf24@954 167 }
kaf24@954 168
kaf24@954 169 if ( lchn[lid].flags & ECF_CONNECTED )
kaf24@954 170 {
kaf24@954 171 if ( rp == NULL )
kaf24@954 172 {
kaf24@954 173 rp = find_domain_by_id(lchn[lid].target_dom);
kaf24@954 174 ASSERT(rp != NULL);
kaf24@954 175
kaf24@954 176 if ( (unsigned long)lp < (unsigned long)rp )
kaf24@954 177 {
kaf24@954 178 spin_lock(&rp->event_channel_lock);
kaf24@954 179 }
kaf24@954 180 else
kaf24@954 181 {
kaf24@954 182 spin_unlock(&lp->event_channel_lock);
kaf24@954 183 spin_lock(&rp->event_channel_lock);
kaf24@954 184 goto again;
kaf24@954 185 }
kaf24@954 186 }
kaf24@954 187 else if ( rp->domain != lchn[lid].target_dom )
kaf24@954 188 {
kaf24@954 189 rc = -EINVAL;
kaf24@954 190 goto out;
kaf24@954 191 }
kaf24@954 192
kaf24@954 193 rchn = rp->event_channel;
kaf24@954 194 rid = lchn[lid].flags & ECF_TARGET_ID;
kaf24@954 195 ASSERT(rid < rp->max_event_channel);
kaf24@954 196 ASSERT(rchn[rid].flags == (ECF_INUSE | ECF_CONNECTED | lid));
kaf24@954 197 ASSERT(rchn[rid].target_dom == lp->domain);
kaf24@954 198
kaf24@954 199 rchn[rid].flags = ECF_INUSE;
kaf24@954 200
kaf24@954 201 rsi = rp->shared_info;
kaf24@954 202 if ( !test_and_set_bit(rid, &rsi->event_channel_disc[0]) &&
kaf24@954 203 !test_and_set_bit(rid>>5, &rsi->event_channel_disc_sel) )
kaf24@954 204 {
kaf24@954 205 cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
kaf24@954 206 guest_event_notify(cpu_mask);
kaf24@954 207 }
kaf24@954 208 }
kaf24@954 209
kaf24@954 210 lchn[lid].target_dom = 0;
kaf24@954 211 lchn[lid].flags = 0;
kaf24@954 212
kaf24@954 213 out:
kaf24@954 214 spin_unlock(&lp->event_channel_lock);
kaf24@954 215 if ( rp != NULL )
kaf24@954 216 {
kaf24@954 217 spin_unlock(&rp->event_channel_lock);
kaf24@954 218 put_task_struct(rp);
kaf24@954 219 }
kaf24@954 220
kaf24@954 221 return rc;
kaf24@954 222 }
kaf24@954 223
kaf24@954 224
kaf24@954 225 static long event_channel_send(u16 lid)
kaf24@954 226 {
kaf24@954 227 struct task_struct *lp = current, *rp;
kaf24@956 228 u16 rid, rdom;
kaf24@954 229 shared_info_t *rsi;
kaf24@954 230 unsigned long cpu_mask;
kaf24@954 231
kaf24@954 232 spin_lock(&lp->event_channel_lock);
kaf24@954 233
kaf24@954 234 if ( unlikely(lid >= lp->max_event_channel) ||
kaf24@956 235 unlikely(!(lp->event_channel[lid].flags & ECF_CONNECTED)) )
kaf24@954 236 {
kaf24@954 237 spin_unlock(&lp->event_channel_lock);
kaf24@954 238 return -EINVAL;
kaf24@954 239 }
kaf24@954 240
kaf24@956 241 rdom = lp->event_channel[lid].target_dom;
kaf24@956 242 rid = lp->event_channel[lid].flags & ECF_TARGET_ID;
kaf24@954 243
kaf24@954 244 spin_unlock(&lp->event_channel_lock);
kaf24@954 245
kaf24@956 246 if ( unlikely(rid >= MAX_EVENT_CHANNELS) ||
kaf24@956 247 unlikely ((rp = find_domain_by_id(rdom)) == NULL) )
kaf24@954 248 return -EINVAL;
kaf24@954 249
kaf24@954 250 rsi = rp->shared_info;
kaf24@954 251 if ( !test_and_set_bit(rid, &rsi->event_channel_pend[0]) &&
kaf24@954 252 !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) )
kaf24@954 253 {
kaf24@954 254 cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
kaf24@954 255 guest_event_notify(cpu_mask);
kaf24@954 256 }
kaf24@954 257
kaf24@954 258 put_task_struct(rp);
kaf24@954 259 return 0;
kaf24@954 260 }
kaf24@954 261
kaf24@954 262
kaf24@954 263 static long event_channel_status(u16 lid)
kaf24@954 264 {
kaf24@954 265 struct task_struct *lp = current;
kaf24@954 266 event_channel_t *lchn;
kaf24@954 267 long rc = EVTCHNSTAT_closed;
kaf24@954 268
kaf24@954 269 spin_lock(&lp->event_channel_lock);
kaf24@954 270
kaf24@954 271 lchn = lp->event_channel;
kaf24@954 272
kaf24@954 273 if ( lid < lp->max_event_channel )
kaf24@954 274 {
kaf24@956 275 if ( lchn[lid].flags & ECF_CONNECTED )
kaf24@954 276 rc = EVTCHNSTAT_connected;
kaf24@954 277 else if ( lchn[lid].flags & ECF_INUSE )
kaf24@954 278 rc = EVTCHNSTAT_disconnected;
kaf24@954 279 }
kaf24@954 280
kaf24@954 281 spin_unlock(&lp->event_channel_lock);
kaf24@954 282 return rc;
kaf24@954 283 }
kaf24@954 284
kaf24@954 285
kaf24@954 286 long do_event_channel_op(unsigned int cmd, unsigned int id)
kaf24@954 287 {
kaf24@954 288 long rc;
kaf24@954 289
kaf24@954 290 switch ( cmd )
kaf24@954 291 {
kaf24@954 292 case EVTCHNOP_open:
kaf24@954 293 rc = event_channel_open((u16)id);
kaf24@954 294 break;
kaf24@954 295
kaf24@954 296 case EVTCHNOP_close:
kaf24@954 297 rc = event_channel_close((u16)id);
kaf24@954 298 break;
kaf24@954 299
kaf24@954 300 case EVTCHNOP_send:
kaf24@954 301 rc = event_channel_send((u16)id);
kaf24@954 302 break;
kaf24@954 303
kaf24@954 304 case EVTCHNOP_status:
kaf24@954 305 rc = event_channel_status((u16)id);
kaf24@954 306 break;
kaf24@954 307
kaf24@954 308 default:
kaf24@954 309 rc = -ENOSYS;
kaf24@954 310 break;
kaf24@954 311 }
kaf24@954 312
kaf24@954 313 return rc;
kaf24@954 314 }
kaf24@954 315
kaf24@954 316
kaf24@954 317 void destroy_event_channels(struct task_struct *p)
kaf24@954 318 {
kaf24@954 319 int i;
kaf24@954 320 if ( p->event_channel != NULL )
kaf24@954 321 {
kaf24@954 322 for ( i = 0; i < p->max_event_channel; i++ )
kaf24@954 323 (void)event_channel_close((u16)i);
kaf24@954 324 kfree(p->event_channel);
kaf24@954 325 }
kaf24@954 326 }