ia64/xen-unstable

view xen/common/event_channel.c @ 2919:fe5933507ca5

bitkeeper revision 1.1159.1.393 (4190a145cbZFKzGdkH5xPlOlxNNPnw)

Split struct exec_domain out of struct domain.
author cl349@freefall.cl.cam.ac.uk
date Tue Nov 09 10:51:49 2004 +0000 (2004-11-09)
parents 3f929065a1d1
children 7ed93ab784b6
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
26 #include <public/xen.h>
27 #include <public/event_channel.h>
29 #define INIT_EVENT_CHANNELS 16
30 #define MAX_EVENT_CHANNELS 1024
33 static int get_free_port(struct domain *d)
34 {
35 int max, port;
36 event_channel_t *chn;
38 max = d->max_event_channel;
39 chn = d->event_channel;
41 for ( port = 0; port < max; port++ )
42 if ( chn[port].state == ECS_FREE )
43 break;
45 if ( port == max )
46 {
47 if ( max == MAX_EVENT_CHANNELS )
48 return -ENOSPC;
50 max *= 2;
52 chn = xmalloc(max * sizeof(event_channel_t));
53 if ( unlikely(chn == NULL) )
54 return -ENOMEM;
56 memset(chn, 0, max * sizeof(event_channel_t));
58 if ( d->event_channel != NULL )
59 {
60 memcpy(chn, d->event_channel, (max/2) * sizeof(event_channel_t));
61 xfree(d->event_channel);
62 }
64 d->event_channel = chn;
65 d->max_event_channel = max;
66 }
68 return port;
69 }
72 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
73 {
74 struct domain *d = current->domain;
75 int port;
77 spin_lock(&d->event_channel_lock);
79 if ( (port = get_free_port(d)) >= 0 )
80 {
81 d->event_channel[port].state = ECS_UNBOUND;
82 d->event_channel[port].u.unbound.remote_domid = alloc->dom;
83 }
85 spin_unlock(&d->event_channel_lock);
87 if ( port < 0 )
88 return port;
90 alloc->port = port;
91 return 0;
92 }
95 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
96 {
97 #define ERROR_EXIT(_errno) do { rc = (_errno); goto out; } while ( 0 )
98 struct domain *d1, *d2;
99 int port1 = bind->port1, port2 = bind->port2;
100 domid_t dom1 = bind->dom1, dom2 = bind->dom2;
101 long rc = 0;
103 if ( !IS_PRIV(current->domain) && (dom1 != DOMID_SELF) )
104 return -EPERM;
106 if ( (port1 < 0) || (port2 < 0) )
107 return -EINVAL;
109 if ( dom1 == DOMID_SELF )
110 dom1 = current->domain->id;
111 if ( dom2 == DOMID_SELF )
112 dom2 = current->domain->id;
114 if ( ((d1 = find_domain_by_id(dom1)) == NULL) ||
115 ((d2 = find_domain_by_id(dom2)) == NULL) )
116 {
117 if ( d1 != NULL )
118 put_domain(d1);
119 return -ESRCH;
120 }
122 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
123 if ( d1 < d2 )
124 {
125 spin_lock(&d1->event_channel_lock);
126 spin_lock(&d2->event_channel_lock);
127 }
128 else
129 {
130 if ( d1 != d2 )
131 spin_lock(&d2->event_channel_lock);
132 spin_lock(&d1->event_channel_lock);
133 }
135 /* Obtain, or ensure that we already have, a valid <port1>. */
136 if ( port1 == 0 )
137 {
138 if ( (port1 = get_free_port(d1)) < 0 )
139 ERROR_EXIT(port1);
140 }
141 else if ( port1 >= d1->max_event_channel )
142 ERROR_EXIT(-EINVAL);
144 /* Obtain, or ensure that we already have, a valid <port2>. */
145 if ( port2 == 0 )
146 {
147 /* Make port1 non-free while we allocate port2 (in case dom1==dom2). */
148 u16 tmp = d1->event_channel[port1].state;
149 d1->event_channel[port1].state = ECS_INTERDOMAIN;
150 port2 = get_free_port(d2);
151 d1->event_channel[port1].state = tmp;
152 if ( port2 < 0 )
153 ERROR_EXIT(port2);
154 }
155 else if ( port2 >= d2->max_event_channel )
156 ERROR_EXIT(-EINVAL);
158 /* Validate <dom1,port1>'s current state. */
159 switch ( d1->event_channel[port1].state )
160 {
161 case ECS_FREE:
162 break;
164 case ECS_UNBOUND:
165 if ( d1->event_channel[port1].u.unbound.remote_domid != dom2 )
166 ERROR_EXIT(-EINVAL);
167 break;
169 case ECS_INTERDOMAIN:
170 if ( d1->event_channel[port1].u.interdomain.remote_dom != d2 )
171 ERROR_EXIT(-EINVAL);
172 if ( (d1->event_channel[port1].u.interdomain.remote_port != port2) &&
173 (bind->port2 != 0) )
174 ERROR_EXIT(-EINVAL);
175 port2 = d1->event_channel[port1].u.interdomain.remote_port;
176 goto out;
178 default:
179 ERROR_EXIT(-EINVAL);
180 }
182 /* Validate <dom2,port2>'s current state. */
183 switch ( d2->event_channel[port2].state )
184 {
185 case ECS_FREE:
186 if ( !IS_PRIV(current->domain) && (dom2 != DOMID_SELF) )
187 ERROR_EXIT(-EPERM);
188 break;
190 case ECS_UNBOUND:
191 if ( d2->event_channel[port2].u.unbound.remote_domid != dom1 )
192 ERROR_EXIT(-EINVAL);
193 break;
195 case ECS_INTERDOMAIN:
196 if ( d2->event_channel[port2].u.interdomain.remote_dom != d1 )
197 ERROR_EXIT(-EINVAL);
198 if ( (d2->event_channel[port2].u.interdomain.remote_port != port1) &&
199 (bind->port1 != 0) )
200 ERROR_EXIT(-EINVAL);
201 port1 = d2->event_channel[port2].u.interdomain.remote_port;
202 goto out;
204 default:
205 ERROR_EXIT(-EINVAL);
206 }
208 /*
209 * Everything checked out okay -- bind <dom1,port1> to <dom2,port2>.
210 */
212 d1->event_channel[port1].u.interdomain.remote_dom = d2;
213 d1->event_channel[port1].u.interdomain.remote_port = (u16)port2;
214 d1->event_channel[port1].state = ECS_INTERDOMAIN;
216 d2->event_channel[port2].u.interdomain.remote_dom = d1;
217 d2->event_channel[port2].u.interdomain.remote_port = (u16)port1;
218 d2->event_channel[port2].state = ECS_INTERDOMAIN;
220 out:
221 spin_unlock(&d1->event_channel_lock);
222 if ( d1 != d2 )
223 spin_unlock(&d2->event_channel_lock);
225 put_domain(d1);
226 put_domain(d2);
228 bind->port1 = port1;
229 bind->port2 = port2;
231 return rc;
232 #undef ERROR_EXIT
233 }
236 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
237 {
238 struct domain *d = current->domain;
239 int port, virq = bind->virq;
241 if ( virq >= ARRAY_SIZE(d->virq_to_evtchn) )
242 return -EINVAL;
244 spin_lock(&d->event_channel_lock);
246 /*
247 * Port 0 is the fallback port for VIRQs that haven't been explicitly
248 * bound yet. The exception is the 'misdirect VIRQ', which is permanently
249 * bound to port 0.
250 */
251 if ( ((port = d->virq_to_evtchn[virq]) != 0) ||
252 (virq == VIRQ_MISDIRECT) ||
253 ((port = get_free_port(d)) < 0) )
254 goto out;
256 d->event_channel[port].state = ECS_VIRQ;
257 d->event_channel[port].u.virq = virq;
259 d->virq_to_evtchn[virq] = port;
261 out:
262 spin_unlock(&d->event_channel_lock);
264 if ( port < 0 )
265 return port;
267 bind->port = port;
268 return 0;
269 }
272 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
273 {
274 struct domain *d = current->domain;
275 int port, rc, pirq = bind->pirq;
277 if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
278 return -EINVAL;
280 spin_lock(&d->event_channel_lock);
282 if ( ((rc = port = d->pirq_to_evtchn[pirq]) != 0) ||
283 ((rc = port = get_free_port(d)) < 0) )
284 goto out;
286 d->pirq_to_evtchn[pirq] = port;
287 rc = pirq_guest_bind(d, pirq,
288 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
289 if ( rc != 0 )
290 {
291 d->pirq_to_evtchn[pirq] = 0;
292 goto out;
293 }
295 d->event_channel[port].state = ECS_PIRQ;
296 d->event_channel[port].u.pirq = pirq;
298 out:
299 spin_unlock(&d->event_channel_lock);
301 if ( rc < 0 )
302 return rc;
304 bind->port = port;
305 return 0;
306 }
309 static long __evtchn_close(struct domain *d1, int port1)
310 {
311 struct domain *d2 = NULL;
312 event_channel_t *chn1, *chn2;
313 int port2;
314 long rc = 0;
316 again:
317 spin_lock(&d1->event_channel_lock);
319 chn1 = d1->event_channel;
321 /* NB. Port 0 is special (VIRQ_MISDIRECT). Never let it be closed. */
322 if ( (port1 <= 0) || (port1 >= d1->max_event_channel) )
323 {
324 rc = -EINVAL;
325 goto out;
326 }
328 switch ( chn1[port1].state )
329 {
330 case ECS_FREE:
331 rc = -EINVAL;
332 goto out;
334 case ECS_UNBOUND:
335 break;
337 case ECS_PIRQ:
338 if ( (rc = pirq_guest_unbind(d1, chn1[port1].u.pirq)) == 0 )
339 d1->pirq_to_evtchn[chn1[port1].u.pirq] = 0;
340 break;
342 case ECS_VIRQ:
343 d1->virq_to_evtchn[chn1[port1].u.virq] = 0;
344 break;
346 case ECS_INTERDOMAIN:
347 if ( d2 == NULL )
348 {
349 d2 = chn1[port1].u.interdomain.remote_dom;
351 /* If we unlock d1 then we could lose d2. Must get a reference. */
352 if ( unlikely(!get_domain(d2)) )
353 {
354 /*
355 * Failed to obtain a reference. No matter: d2 must be dying
356 * and so will close this event channel for us.
357 */
358 d2 = NULL;
359 goto out;
360 }
362 if ( d1 < d2 )
363 {
364 spin_lock(&d2->event_channel_lock);
365 }
366 else if ( d1 != d2 )
367 {
368 spin_unlock(&d1->event_channel_lock);
369 spin_lock(&d2->event_channel_lock);
370 goto again;
371 }
372 }
373 else if ( d2 != chn1[port1].u.interdomain.remote_dom )
374 {
375 rc = -EINVAL;
376 goto out;
377 }
379 chn2 = d2->event_channel;
380 port2 = chn1[port1].u.interdomain.remote_port;
382 if ( port2 >= d2->max_event_channel )
383 BUG();
384 if ( chn2[port2].state != ECS_INTERDOMAIN )
385 BUG();
386 if ( chn2[port2].u.interdomain.remote_dom != d1 )
387 BUG();
389 chn2[port2].state = ECS_UNBOUND;
390 chn2[port2].u.unbound.remote_domid = d1->id;
391 break;
393 default:
394 BUG();
395 }
397 chn1[port1].state = ECS_FREE;
399 out:
400 if ( d2 != NULL )
401 {
402 if ( d1 != d2 )
403 spin_unlock(&d2->event_channel_lock);
404 put_domain(d2);
405 }
407 spin_unlock(&d1->event_channel_lock);
409 return rc;
410 }
413 static long evtchn_close(evtchn_close_t *close)
414 {
415 struct domain *d;
416 long rc;
417 domid_t dom = close->dom;
419 if ( dom == DOMID_SELF )
420 dom = current->domain->id;
421 else if ( !IS_PRIV(current->domain) )
422 return -EPERM;
424 if ( (d = find_domain_by_id(dom)) == NULL )
425 return -ESRCH;
427 rc = __evtchn_close(d, close->port);
429 put_domain(d);
430 return rc;
431 }
434 static long evtchn_send(int lport)
435 {
436 struct domain *ld = current->domain, *rd;
437 int rport;
439 spin_lock(&ld->event_channel_lock);
441 if ( unlikely(lport < 0) ||
442 unlikely(lport >= ld->max_event_channel) ||
443 unlikely(ld->event_channel[lport].state != ECS_INTERDOMAIN) )
444 {
445 spin_unlock(&ld->event_channel_lock);
446 return -EINVAL;
447 }
449 rd = ld->event_channel[lport].u.interdomain.remote_dom;
450 rport = ld->event_channel[lport].u.interdomain.remote_port;
452 evtchn_set_pending(rd, rport);
454 spin_unlock(&ld->event_channel_lock);
456 return 0;
457 }
460 static long evtchn_status(evtchn_status_t *status)
461 {
462 struct domain *d;
463 domid_t dom = status->dom;
464 int port = status->port;
465 event_channel_t *chn;
466 long rc = 0;
468 if ( dom == DOMID_SELF )
469 dom = current->domain->id;
470 else if ( !IS_PRIV(current->domain) )
471 return -EPERM;
473 if ( (d = find_domain_by_id(dom)) == NULL )
474 return -ESRCH;
476 spin_lock(&d->event_channel_lock);
478 chn = d->event_channel;
480 if ( (port < 0) || (port >= d->max_event_channel) )
481 {
482 rc = -EINVAL;
483 goto out;
484 }
486 switch ( chn[port].state )
487 {
488 case ECS_FREE:
489 status->status = EVTCHNSTAT_closed;
490 break;
491 case ECS_UNBOUND:
492 status->status = EVTCHNSTAT_unbound;
493 status->u.unbound.dom = chn[port].u.unbound.remote_domid;
494 break;
495 case ECS_INTERDOMAIN:
496 status->status = EVTCHNSTAT_interdomain;
497 status->u.interdomain.dom = chn[port].u.interdomain.remote_dom->id;
498 status->u.interdomain.port = chn[port].u.interdomain.remote_port;
499 break;
500 case ECS_PIRQ:
501 status->status = EVTCHNSTAT_pirq;
502 status->u.pirq = chn[port].u.pirq;
503 break;
504 case ECS_VIRQ:
505 status->status = EVTCHNSTAT_virq;
506 status->u.virq = chn[port].u.virq;
507 break;
508 default:
509 BUG();
510 }
512 out:
513 spin_unlock(&d->event_channel_lock);
514 put_domain(d);
515 return rc;
516 }
519 long do_event_channel_op(evtchn_op_t *uop)
520 {
521 long rc;
522 evtchn_op_t op;
524 if ( copy_from_user(&op, uop, sizeof(op)) != 0 )
525 return -EFAULT;
527 switch ( op.cmd )
528 {
529 case EVTCHNOP_alloc_unbound:
530 rc = evtchn_alloc_unbound(&op.u.alloc_unbound);
531 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
532 rc = -EFAULT; /* Cleaning up here would be a mess! */
533 break;
535 case EVTCHNOP_bind_interdomain:
536 rc = evtchn_bind_interdomain(&op.u.bind_interdomain);
537 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
538 rc = -EFAULT; /* Cleaning up here would be a mess! */
539 break;
541 case EVTCHNOP_bind_virq:
542 rc = evtchn_bind_virq(&op.u.bind_virq);
543 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
544 rc = -EFAULT; /* Cleaning up here would be a mess! */
545 break;
547 case EVTCHNOP_bind_pirq:
548 rc = evtchn_bind_pirq(&op.u.bind_pirq);
549 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
550 rc = -EFAULT; /* Cleaning up here would be a mess! */
551 break;
553 case EVTCHNOP_close:
554 rc = evtchn_close(&op.u.close);
555 break;
557 case EVTCHNOP_send:
558 rc = evtchn_send(op.u.send.local_port);
559 break;
561 case EVTCHNOP_status:
562 rc = evtchn_status(&op.u.status);
563 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
564 rc = -EFAULT;
565 break;
567 default:
568 rc = -ENOSYS;
569 break;
570 }
572 return rc;
573 }
576 int init_event_channels(struct domain *d)
577 {
578 spin_lock_init(&d->event_channel_lock);
579 d->event_channel = xmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t));
580 if ( unlikely(d->event_channel == NULL) )
581 return -ENOMEM;
582 d->max_event_channel = INIT_EVENT_CHANNELS;
583 memset(d->event_channel, 0, INIT_EVENT_CHANNELS * sizeof(event_channel_t));
584 d->event_channel[0].state = ECS_VIRQ;
585 d->event_channel[0].u.virq = VIRQ_MISDIRECT;
586 return 0;
587 }
590 void destroy_event_channels(struct domain *d)
591 {
592 int i;
593 if ( d->event_channel != NULL )
594 {
595 for ( i = 0; i < d->max_event_channel; i++ )
596 (void)__evtchn_close(d, i);
597 xfree(d->event_channel);
598 }
599 }