ia64/xen-unstable

annotate xen/common/event_channel.c @ 954:70b42e624c5c

bitkeeper revision 1.608.1.1 (3fba5b99WMvlBA7JwJeGU5vakf_qWg)

event_channel.c:
new file
sched.h, mm.h, event.h, hypervisor-if.h, domain.c, entry.S:
Event channels between domains. Also do not reschedule a domain if a particular guest event is already pending.
author kaf24@scramble.cl.cam.ac.uk
date Tue Nov 18 17:49:13 2003 +0000 (2003-11-18)
parents
children 5a9d5824dcb7
rev   line source
kaf24@954 1 /******************************************************************************
kaf24@954 2 * event_channel.c
kaf24@954 3 *
kaf24@954 4 * Event channels between domains.
kaf24@954 5 *
kaf24@954 6 * Copyright (c) 2003, K A Fraser.
kaf24@954 7 *
kaf24@954 8 * This program is distributed in the hope that it will be useful,
kaf24@954 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
kaf24@954 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
kaf24@954 11 * GNU General Public License for more details.
kaf24@954 12 *
kaf24@954 13 * You should have received a copy of the GNU General Public License
kaf24@954 14 * along with this program; if not, write to the Free Software
kaf24@954 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
kaf24@954 16 */
kaf24@954 17
kaf24@954 18 #include <xeno/config.h>
kaf24@954 19 #include <xeno/init.h>
kaf24@954 20 #include <xeno/lib.h>
kaf24@954 21 #include <xeno/errno.h>
kaf24@954 22 #include <xeno/sched.h>
kaf24@954 23 #include <xeno/event.h>
kaf24@954 24
kaf24@954 25
kaf24@954 26 static long event_channel_open(u16 target_dom)
kaf24@954 27 {
kaf24@954 28 struct task_struct *lp = current, *rp;
kaf24@954 29 int i, lmax, rmax, lid, rid;
kaf24@954 30 event_channel_t *lchn, *rchn;
kaf24@954 31 shared_info_t *rsi;
kaf24@954 32 unsigned long cpu_mask;
kaf24@954 33 long rc = 0;
kaf24@954 34
kaf24@954 35 rp = find_domain_by_id(target_dom);
kaf24@954 36
kaf24@954 37 /*
kaf24@954 38 * We need locks at both ends to make a connection. We avoid deadlock
kaf24@954 39 * by acquiring the locks in address order.
kaf24@954 40 */
kaf24@954 41 if ( (unsigned long)lp < (unsigned long)rp )
kaf24@954 42 {
kaf24@954 43 spin_lock(&lp->event_channel_lock);
kaf24@954 44 spin_lock(&rp->event_channel_lock);
kaf24@954 45 }
kaf24@954 46 else
kaf24@954 47 {
kaf24@954 48 if ( likely(rp != NULL) )
kaf24@954 49 spin_lock(&rp->event_channel_lock);
kaf24@954 50 spin_lock(&lp->event_channel_lock);
kaf24@954 51 }
kaf24@954 52
kaf24@954 53 lmax = lp->max_event_channel;
kaf24@954 54 lchn = lp->event_channel;
kaf24@954 55 lid = -1;
kaf24@954 56
kaf24@954 57 /*
kaf24@954 58 * Find the first unused event channel. Also ensure bo channel already
kaf24@954 59 * exists to the specified target domain.
kaf24@954 60 */
kaf24@954 61 for ( i = 0; i < lmax; i++ )
kaf24@954 62 {
kaf24@954 63 if ( (lid == -1) && !(lchn[i].flags & ECF_INUSE) )
kaf24@954 64 {
kaf24@954 65 lid = i;
kaf24@954 66 }
kaf24@954 67 else if ( unlikely(lchn[i].target_dom == target_dom) )
kaf24@954 68 {
kaf24@954 69 rc = -EEXIST;
kaf24@954 70 goto out;
kaf24@954 71 }
kaf24@954 72 }
kaf24@954 73
kaf24@954 74 /* If there is no free slot we need to allocate a bigger channel list. */
kaf24@954 75 if ( unlikely(lid == -1) )
kaf24@954 76 {
kaf24@954 77 /* Reached maximum channel count? */
kaf24@954 78 if ( unlikely(lmax == 1024) )
kaf24@954 79 {
kaf24@954 80 rc = -ENOSPC;
kaf24@954 81 goto out;
kaf24@954 82 }
kaf24@954 83
kaf24@954 84 lmax = (lmax == 0) ? 4 : (lmax * 2);
kaf24@954 85
kaf24@954 86 lchn = kmalloc(lmax * sizeof(event_channel_t), GFP_KERNEL);
kaf24@954 87 if ( unlikely(lchn == NULL) )
kaf24@954 88 {
kaf24@954 89 rc = -ENOMEM;
kaf24@954 90 goto out;
kaf24@954 91 }
kaf24@954 92
kaf24@954 93 memset(lchn, 0, lmax * sizeof(event_channel_t));
kaf24@954 94
kaf24@954 95 if ( likely(lp->event_channel != NULL) )
kaf24@954 96 kfree(lp->event_channel);
kaf24@954 97
kaf24@954 98 lp->event_channel = lchn;
kaf24@954 99 lp->max_event_channel = lmax;
kaf24@954 100 }
kaf24@954 101
kaf24@954 102 lchn[lid].target_dom = target_dom;
kaf24@954 103 lchn[lid].flags = ECF_INUSE;
kaf24@954 104
kaf24@954 105 if ( likely(rp != NULL) )
kaf24@954 106 {
kaf24@954 107 rchn = rp->event_channel;
kaf24@954 108 rmax = rp->max_event_channel;
kaf24@954 109
kaf24@954 110 for ( rid = 0; rid < rmax; rid++ )
kaf24@954 111 {
kaf24@954 112 if ( (rchn[rid].target_dom == lp->domain) &&
kaf24@954 113 (rchn[rid].flags & ECF_INUSE) )
kaf24@954 114 {
kaf24@954 115 /*
kaf24@954 116 * The target was awaiting a connection. We make the connection
kaf24@954 117 * and send a connection-made event to the remote end.
kaf24@954 118 */
kaf24@954 119 rchn[rid].flags = ECF_INUSE | ECF_CONNECTED | lid;
kaf24@954 120 lchn[lid].flags = ECF_INUSE | ECF_CONNECTED | rid;
kaf24@954 121
kaf24@954 122 rsi = rp->shared_info;
kaf24@954 123 if ( !test_and_set_bit(rid, &rsi->event_channel_pend[0]) &&
kaf24@954 124 !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) )
kaf24@954 125 {
kaf24@954 126 cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
kaf24@954 127 guest_event_notify(cpu_mask);
kaf24@954 128 }
kaf24@954 129
kaf24@954 130 break;
kaf24@954 131 }
kaf24@954 132 }
kaf24@954 133 }
kaf24@954 134
kaf24@954 135 out:
kaf24@954 136 spin_unlock(&lp->event_channel_lock);
kaf24@954 137 if ( rp != NULL )
kaf24@954 138 {
kaf24@954 139 spin_unlock(&rp->event_channel_lock);
kaf24@954 140 put_task_struct(rp);
kaf24@954 141 }
kaf24@954 142
kaf24@954 143 return rc;
kaf24@954 144 }
kaf24@954 145
kaf24@954 146
kaf24@954 147 static long event_channel_close(u16 lid)
kaf24@954 148 {
kaf24@954 149 struct task_struct *lp = current, *rp = NULL;
kaf24@954 150 event_channel_t *lchn, *rchn;
kaf24@954 151 u16 rid;
kaf24@954 152 shared_info_t *rsi;
kaf24@954 153 unsigned long cpu_mask;
kaf24@954 154 long rc = 0;
kaf24@954 155
kaf24@954 156 again:
kaf24@954 157 spin_lock(&lp->event_channel_lock);
kaf24@954 158
kaf24@954 159 lchn = lp->event_channel;
kaf24@954 160
kaf24@954 161 if ( unlikely(lid >= lp->max_event_channel) ||
kaf24@954 162 unlikely(!(lchn[lid].flags & ECF_INUSE)) )
kaf24@954 163 {
kaf24@954 164 rc = -EINVAL;
kaf24@954 165 goto out;
kaf24@954 166 }
kaf24@954 167
kaf24@954 168 if ( lchn[lid].flags & ECF_CONNECTED )
kaf24@954 169 {
kaf24@954 170 if ( rp == NULL )
kaf24@954 171 {
kaf24@954 172 rp = find_domain_by_id(lchn[lid].target_dom);
kaf24@954 173 ASSERT(rp != NULL);
kaf24@954 174
kaf24@954 175 if ( (unsigned long)lp < (unsigned long)rp )
kaf24@954 176 {
kaf24@954 177 spin_lock(&rp->event_channel_lock);
kaf24@954 178 }
kaf24@954 179 else
kaf24@954 180 {
kaf24@954 181 spin_unlock(&lp->event_channel_lock);
kaf24@954 182 spin_lock(&rp->event_channel_lock);
kaf24@954 183 goto again;
kaf24@954 184 }
kaf24@954 185 }
kaf24@954 186 else if ( rp->domain != lchn[lid].target_dom )
kaf24@954 187 {
kaf24@954 188 rc = -EINVAL;
kaf24@954 189 goto out;
kaf24@954 190 }
kaf24@954 191
kaf24@954 192 rchn = rp->event_channel;
kaf24@954 193 rid = lchn[lid].flags & ECF_TARGET_ID;
kaf24@954 194 ASSERT(rid < rp->max_event_channel);
kaf24@954 195 ASSERT(rchn[rid].flags == (ECF_INUSE | ECF_CONNECTED | lid));
kaf24@954 196 ASSERT(rchn[rid].target_dom == lp->domain);
kaf24@954 197
kaf24@954 198 rchn[rid].flags = ECF_INUSE;
kaf24@954 199
kaf24@954 200 rsi = rp->shared_info;
kaf24@954 201 if ( !test_and_set_bit(rid, &rsi->event_channel_disc[0]) &&
kaf24@954 202 !test_and_set_bit(rid>>5, &rsi->event_channel_disc_sel) )
kaf24@954 203 {
kaf24@954 204 cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
kaf24@954 205 guest_event_notify(cpu_mask);
kaf24@954 206 }
kaf24@954 207 }
kaf24@954 208
kaf24@954 209 lchn[lid].target_dom = 0;
kaf24@954 210 lchn[lid].flags = 0;
kaf24@954 211
kaf24@954 212 out:
kaf24@954 213 spin_unlock(&lp->event_channel_lock);
kaf24@954 214 if ( rp != NULL )
kaf24@954 215 {
kaf24@954 216 spin_unlock(&rp->event_channel_lock);
kaf24@954 217 put_task_struct(rp);
kaf24@954 218 }
kaf24@954 219
kaf24@954 220 return rc;
kaf24@954 221 }
kaf24@954 222
kaf24@954 223
kaf24@954 224 static long event_channel_send(u16 lid)
kaf24@954 225 {
kaf24@954 226 struct task_struct *lp = current, *rp;
kaf24@954 227 event_channel_t *lchn, *rchn;
kaf24@954 228 u16 rid;
kaf24@954 229 shared_info_t *rsi;
kaf24@954 230 unsigned long cpu_mask;
kaf24@954 231
kaf24@954 232 spin_lock(&lp->event_channel_lock);
kaf24@954 233
kaf24@954 234 lchn = lp->event_channel;
kaf24@954 235
kaf24@954 236 if ( unlikely(lid >= lp->max_event_channel) ||
kaf24@954 237 unlikely((lchn[lid].flags & (ECF_INUSE|ECF_CONNECTED)) !=
kaf24@954 238 (ECF_INUSE|ECF_CONNECTED)) )
kaf24@954 239 {
kaf24@954 240 spin_unlock(&lp->event_channel_lock);
kaf24@954 241 return -EINVAL;
kaf24@954 242 }
kaf24@954 243
kaf24@954 244 rid = lchn[lid].flags & ECF_TARGET_ID;
kaf24@954 245 rp = find_domain_by_id(lchn[lid].target_dom);
kaf24@954 246 ASSERT(rp != NULL);
kaf24@954 247
kaf24@954 248 spin_unlock(&lp->event_channel_lock);
kaf24@954 249
kaf24@954 250 spin_lock(&rp->event_channel_lock);
kaf24@954 251
kaf24@954 252 rchn = rp->event_channel;
kaf24@954 253
kaf24@954 254 if ( unlikely(rid >= rp->max_event_channel) )
kaf24@954 255 {
kaf24@954 256 spin_unlock(&rp->event_channel_lock);
kaf24@954 257 put_task_struct(rp);
kaf24@954 258 return -EINVAL;
kaf24@954 259 }
kaf24@954 260
kaf24@954 261 rsi = rp->shared_info;
kaf24@954 262 if ( !test_and_set_bit(rid, &rsi->event_channel_pend[0]) &&
kaf24@954 263 !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) )
kaf24@954 264 {
kaf24@954 265 cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
kaf24@954 266 guest_event_notify(cpu_mask);
kaf24@954 267 }
kaf24@954 268
kaf24@954 269 spin_unlock(&rp->event_channel_lock);
kaf24@954 270 put_task_struct(rp);
kaf24@954 271 return 0;
kaf24@954 272 }
kaf24@954 273
kaf24@954 274
kaf24@954 275 static long event_channel_status(u16 lid)
kaf24@954 276 {
kaf24@954 277 struct task_struct *lp = current;
kaf24@954 278 event_channel_t *lchn;
kaf24@954 279 long rc = EVTCHNSTAT_closed;
kaf24@954 280
kaf24@954 281 spin_lock(&lp->event_channel_lock);
kaf24@954 282
kaf24@954 283 lchn = lp->event_channel;
kaf24@954 284
kaf24@954 285 if ( lid < lp->max_event_channel )
kaf24@954 286 {
kaf24@954 287 if ( (lchn[lid].flags & (ECF_INUSE|ECF_CONNECTED)) == ECF_INUSE )
kaf24@954 288 rc = EVTCHNSTAT_connected;
kaf24@954 289 else if ( lchn[lid].flags & ECF_INUSE )
kaf24@954 290 rc = EVTCHNSTAT_disconnected;
kaf24@954 291 }
kaf24@954 292
kaf24@954 293 spin_unlock(&lp->event_channel_lock);
kaf24@954 294 return rc;
kaf24@954 295 }
kaf24@954 296
kaf24@954 297
kaf24@954 298 long do_event_channel_op(unsigned int cmd, unsigned int id)
kaf24@954 299 {
kaf24@954 300 long rc;
kaf24@954 301
kaf24@954 302 switch ( cmd )
kaf24@954 303 {
kaf24@954 304 case EVTCHNOP_open:
kaf24@954 305 rc = event_channel_open((u16)id);
kaf24@954 306 break;
kaf24@954 307
kaf24@954 308 case EVTCHNOP_close:
kaf24@954 309 rc = event_channel_close((u16)id);
kaf24@954 310 break;
kaf24@954 311
kaf24@954 312 case EVTCHNOP_send:
kaf24@954 313 rc = event_channel_send((u16)id);
kaf24@954 314 break;
kaf24@954 315
kaf24@954 316 case EVTCHNOP_status:
kaf24@954 317 rc = event_channel_status((u16)id);
kaf24@954 318 break;
kaf24@954 319
kaf24@954 320 default:
kaf24@954 321 rc = -ENOSYS;
kaf24@954 322 break;
kaf24@954 323 }
kaf24@954 324
kaf24@954 325 return rc;
kaf24@954 326 }
kaf24@954 327
kaf24@954 328
kaf24@954 329 void destroy_event_channels(struct task_struct *p)
kaf24@954 330 {
kaf24@954 331 int i;
kaf24@954 332 if ( p->event_channel != NULL )
kaf24@954 333 {
kaf24@954 334 for ( i = 0; i < p->max_event_channel; i++ )
kaf24@954 335 (void)event_channel_close((u16)i);
kaf24@954 336 kfree(p->event_channel);
kaf24@954 337 }
kaf24@954 338 }