ia64/xen-unstable

view xen/common/event_channel.c @ 3297:53a0cc27ab17

bitkeeper revision 1.1159.205.1 (41c1b63cODLXVNkV2OsvJtzvIRxtKg)

Remove VIRQ_MISDIRECT altogether.
author cl349@arcadians.cl.cam.ac.uk
date Thu Dec 16 16:22:20 2004 +0000 (2004-12-16)
parents 37cb59b9ddfd
children cd90fe329149
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
26 #include <public/xen.h>
27 #include <public/event_channel.h>
29 #define INIT_EVENT_CHANNELS 16
30 #define MAX_EVENT_CHANNELS 1024
31 #define EVENT_CHANNELS_SPREAD 32
34 static int get_free_port(struct exec_domain *ed)
35 {
36 struct domain *d = ed->domain;
37 int max, port;
38 event_channel_t *chn;
40 max = d->max_event_channel;
41 chn = d->event_channel;
43 for ( port = ed->eid * EVENT_CHANNELS_SPREAD; port < max; port++ )
44 if ( chn[port].state == ECS_FREE )
45 break;
47 if ( port >= max )
48 {
49 if ( max == MAX_EVENT_CHANNELS )
50 return -ENOSPC;
52 if ( port == 0 )
53 max = INIT_EVENT_CHANNELS;
54 else
55 max = port + EVENT_CHANNELS_SPREAD;
57 chn = xmalloc(max * sizeof(event_channel_t));
58 if ( unlikely(chn == NULL) )
59 return -ENOMEM;
61 memset(chn, 0, max * sizeof(event_channel_t));
63 if ( d->event_channel != NULL )
64 {
65 memcpy(chn, d->event_channel, d->max_event_channel *
66 sizeof(event_channel_t));
67 xfree(d->event_channel);
68 }
70 d->event_channel = chn;
71 d->max_event_channel = max;
72 }
74 return port;
75 }
78 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
79 {
80 struct domain *d = current->domain;
81 int port;
83 spin_lock(&d->event_channel_lock);
85 if ( (port = get_free_port(current)) >= 0 )
86 {
87 d->event_channel[port].state = ECS_UNBOUND;
88 d->event_channel[port].u.unbound.remote_domid = alloc->dom;
89 }
91 spin_unlock(&d->event_channel_lock);
93 if ( port < 0 )
94 return port;
96 alloc->port = port;
97 return 0;
98 }
101 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
102 {
103 #define ERROR_EXIT(_errno) do { rc = (_errno); goto out; } while ( 0 )
104 struct domain *d1, *d2;
105 struct exec_domain *ed1, *ed2;
106 int port1 = bind->port1, port2 = bind->port2;
107 domid_t dom1 = bind->dom1, dom2 = bind->dom2;
108 long rc = 0;
110 if ( !IS_PRIV(current->domain) && (dom1 != DOMID_SELF) )
111 return -EPERM;
113 if ( (port1 < 0) || (port2 < 0) )
114 return -EINVAL;
116 if ( dom1 == DOMID_SELF )
117 dom1 = current->domain->id;
118 if ( dom2 == DOMID_SELF )
119 dom2 = current->domain->id;
121 if ( ((d1 = find_domain_by_id(dom1)) == NULL) ||
122 ((d2 = find_domain_by_id(dom2)) == NULL) )
123 {
124 if ( d1 != NULL )
125 put_domain(d1);
126 return -ESRCH;
127 }
129 ed1 = d1->exec_domain[0]; /* XXX */
130 ed2 = d2->exec_domain[0]; /* XXX */
132 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
133 if ( d1 < d2 )
134 {
135 spin_lock(&d1->event_channel_lock);
136 spin_lock(&d2->event_channel_lock);
137 }
138 else
139 {
140 if ( d1 != d2 )
141 spin_lock(&d2->event_channel_lock);
142 spin_lock(&d1->event_channel_lock);
143 }
145 /* Obtain, or ensure that we already have, a valid <port1>. */
146 if ( port1 == 0 )
147 {
148 if ( (port1 = get_free_port(ed1)) < 0 )
149 ERROR_EXIT(port1);
150 }
151 else if ( port1 >= d1->max_event_channel )
152 ERROR_EXIT(-EINVAL);
154 /* Obtain, or ensure that we already have, a valid <port2>. */
155 if ( port2 == 0 )
156 {
157 /* Make port1 non-free while we allocate port2 (in case dom1==dom2). */
158 u16 tmp = d1->event_channel[port1].state;
159 d1->event_channel[port1].state = ECS_INTERDOMAIN;
160 port2 = get_free_port(ed2);
161 d1->event_channel[port1].state = tmp;
162 if ( port2 < 0 )
163 ERROR_EXIT(port2);
164 }
165 else if ( port2 >= d2->max_event_channel )
166 ERROR_EXIT(-EINVAL);
168 /* Validate <dom1,port1>'s current state. */
169 switch ( d1->event_channel[port1].state )
170 {
171 case ECS_FREE:
172 break;
174 case ECS_UNBOUND:
175 if ( d1->event_channel[port1].u.unbound.remote_domid != dom2 )
176 ERROR_EXIT(-EINVAL);
177 break;
179 case ECS_INTERDOMAIN:
180 if ( d1->event_channel[port1].u.interdomain.remote_dom != ed2 )
181 ERROR_EXIT(-EINVAL);
182 if ( (d1->event_channel[port1].u.interdomain.remote_port != port2) &&
183 (bind->port2 != 0) )
184 ERROR_EXIT(-EINVAL);
185 port2 = d1->event_channel[port1].u.interdomain.remote_port;
186 goto out;
188 default:
189 ERROR_EXIT(-EINVAL);
190 }
192 /* Validate <dom2,port2>'s current state. */
193 switch ( d2->event_channel[port2].state )
194 {
195 case ECS_FREE:
196 if ( !IS_PRIV(current->domain) && (dom2 != DOMID_SELF) )
197 ERROR_EXIT(-EPERM);
198 break;
200 case ECS_UNBOUND:
201 if ( d2->event_channel[port2].u.unbound.remote_domid != dom1 )
202 ERROR_EXIT(-EINVAL);
203 break;
205 case ECS_INTERDOMAIN:
206 if ( d2->event_channel[port2].u.interdomain.remote_dom != ed1 )
207 ERROR_EXIT(-EINVAL);
208 if ( (d2->event_channel[port2].u.interdomain.remote_port != port1) &&
209 (bind->port1 != 0) )
210 ERROR_EXIT(-EINVAL);
211 port1 = d2->event_channel[port2].u.interdomain.remote_port;
212 goto out;
214 default:
215 ERROR_EXIT(-EINVAL);
216 }
218 /*
219 * Everything checked out okay -- bind <dom1,port1> to <dom2,port2>.
220 */
222 d1->event_channel[port1].u.interdomain.remote_dom = ed2;
223 d1->event_channel[port1].u.interdomain.remote_port = (u16)port2;
224 d1->event_channel[port1].state = ECS_INTERDOMAIN;
226 d2->event_channel[port2].u.interdomain.remote_dom = ed1;
227 d2->event_channel[port2].u.interdomain.remote_port = (u16)port1;
228 d2->event_channel[port2].state = ECS_INTERDOMAIN;
230 out:
231 spin_unlock(&d1->event_channel_lock);
232 if ( d1 != d2 )
233 spin_unlock(&d2->event_channel_lock);
235 put_domain(d1);
236 put_domain(d2);
238 bind->port1 = port1;
239 bind->port2 = port2;
241 return rc;
242 #undef ERROR_EXIT
243 }
246 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
247 {
248 struct exec_domain *ed = current;
249 struct domain *d = ed->domain;
250 int port, virq = bind->virq;
252 if ( virq >= ARRAY_SIZE(ed->virq_to_evtchn) )
253 return -EINVAL;
255 spin_lock(&d->event_channel_lock);
257 /*
258 * Port 0 is the fallback port for VIRQs that haven't been explicitly
259 * bound yet.
260 */
261 if ( ((port = ed->virq_to_evtchn[virq]) != 0) ||
262 ((port = get_free_port(ed)) < 0) )
263 goto out;
265 d->event_channel[port].state = ECS_VIRQ;
266 d->event_channel[port].u.virq = virq;
268 ed->virq_to_evtchn[virq] = port;
270 out:
271 spin_unlock(&d->event_channel_lock);
273 if ( port < 0 )
274 return port;
276 bind->port = port;
277 return 0;
278 }
280 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
281 {
282 struct exec_domain *ed = current;
283 struct domain *d = ed->domain;
284 int port, ipi_edom = bind->ipi_edom;
286 spin_lock(&d->event_channel_lock);
288 if ( (port = get_free_port(ed)) >= 0 )
289 {
290 d->event_channel[port].state = ECS_IPI;
291 d->event_channel[port].u.ipi_edom = ipi_edom;
292 }
294 spin_unlock(&d->event_channel_lock);
296 if ( port < 0 )
297 return port;
299 bind->port = port;
300 return 0;
301 }
304 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
305 {
306 struct domain *d = current->domain;
307 int port, rc, pirq = bind->pirq;
309 if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
310 return -EINVAL;
312 spin_lock(&d->event_channel_lock);
314 if ( ((rc = port = d->pirq_to_evtchn[pirq]) != 0) ||
315 ((rc = port = get_free_port(current)) < 0) )
316 goto out;
318 d->pirq_to_evtchn[pirq] = port;
319 rc = pirq_guest_bind(current, pirq,
320 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
321 if ( rc != 0 )
322 {
323 d->pirq_to_evtchn[pirq] = 0;
324 goto out;
325 }
327 d->event_channel[port].state = ECS_PIRQ;
328 d->event_channel[port].u.pirq = pirq;
330 out:
331 spin_unlock(&d->event_channel_lock);
333 if ( rc < 0 )
334 return rc;
336 bind->port = port;
337 return 0;
338 }
341 static long __evtchn_close(struct domain *d1, int port1)
342 {
343 struct domain *d2 = NULL;
344 struct exec_domain *ed;
345 event_channel_t *chn1, *chn2;
346 int port2;
347 long rc = 0;
349 again:
350 spin_lock(&d1->event_channel_lock);
352 chn1 = d1->event_channel;
354 if ( (port1 < 0) || (port1 >= d1->max_event_channel) )
355 {
356 rc = -EINVAL;
357 goto out;
358 }
360 switch ( chn1[port1].state )
361 {
362 case ECS_FREE:
363 case ECS_RESERVED:
364 rc = -EINVAL;
365 goto out;
367 case ECS_UNBOUND:
368 break;
370 case ECS_PIRQ:
371 if ( (rc = pirq_guest_unbind(d1, chn1[port1].u.pirq)) == 0 )
372 d1->pirq_to_evtchn[chn1[port1].u.pirq] = 0;
373 break;
375 case ECS_VIRQ:
376 /* XXX could store exec_domain in chn1[port1].u */
377 for_each_exec_domain(d1, ed)
378 if (ed->virq_to_evtchn[chn1[port1].u.virq] == port1)
379 ed->virq_to_evtchn[chn1[port1].u.virq] = 0;
380 break;
382 case ECS_IPI:
383 break;
385 case ECS_INTERDOMAIN:
386 if ( d2 == NULL )
387 {
388 d2 = chn1[port1].u.interdomain.remote_dom->domain;
390 /* If we unlock d1 then we could lose d2. Must get a reference. */
391 if ( unlikely(!get_domain(d2)) )
392 {
393 /*
394 * Failed to obtain a reference. No matter: d2 must be dying
395 * and so will close this event channel for us.
396 */
397 d2 = NULL;
398 goto out;
399 }
401 if ( d1 < d2 )
402 {
403 spin_lock(&d2->event_channel_lock);
404 }
405 else if ( d1 != d2 )
406 {
407 spin_unlock(&d1->event_channel_lock);
408 spin_lock(&d2->event_channel_lock);
409 goto again;
410 }
411 }
412 else if ( d2 != chn1[port1].u.interdomain.remote_dom->domain )
413 {
414 rc = -EINVAL;
415 goto out;
416 }
418 chn2 = d2->event_channel;
419 port2 = chn1[port1].u.interdomain.remote_port;
421 if ( port2 >= d2->max_event_channel )
422 BUG();
423 if ( chn2[port2].state != ECS_INTERDOMAIN )
424 BUG();
425 if ( chn2[port2].u.interdomain.remote_dom->domain != d1 )
426 BUG();
428 chn2[port2].state = ECS_UNBOUND;
429 chn2[port2].u.unbound.remote_domid = d1->id;
430 break;
432 default:
433 BUG();
434 }
436 chn1[port1].state = ECS_FREE;
438 out:
439 if ( d2 != NULL )
440 {
441 if ( d1 != d2 )
442 spin_unlock(&d2->event_channel_lock);
443 put_domain(d2);
444 }
446 spin_unlock(&d1->event_channel_lock);
448 return rc;
449 }
452 static long evtchn_close(evtchn_close_t *close)
453 {
454 struct domain *d;
455 long rc;
456 domid_t dom = close->dom;
458 if ( dom == DOMID_SELF )
459 dom = current->domain->id;
460 else if ( !IS_PRIV(current->domain) )
461 return -EPERM;
463 if ( (d = find_domain_by_id(dom)) == NULL )
464 return -ESRCH;
466 rc = __evtchn_close(d, close->port);
468 put_domain(d);
469 return rc;
470 }
473 long evtchn_send(int lport)
474 {
475 struct domain *ld = current->domain;
476 struct exec_domain *rd;
477 int rport, ret = 0;
479 spin_lock(&ld->event_channel_lock);
481 if ( unlikely(lport < 0) ||
482 unlikely(lport >= ld->max_event_channel))
483 {
484 spin_unlock(&ld->event_channel_lock);
485 return -EINVAL;
486 }
488 switch ( ld->event_channel[lport].state )
489 {
490 case ECS_INTERDOMAIN:
491 rd = ld->event_channel[lport].u.interdomain.remote_dom;
492 rport = ld->event_channel[lport].u.interdomain.remote_port;
494 evtchn_set_pending(rd, rport);
495 break;
496 case ECS_IPI:
497 rd = ld->exec_domain[ld->event_channel[lport].u.ipi_edom];
498 if ( rd )
499 evtchn_set_pending(rd, lport);
500 else
501 ret = -EINVAL;
502 break;
503 default:
504 ret = -EINVAL;
505 }
507 spin_unlock(&ld->event_channel_lock);
509 return ret;
510 }
513 static long evtchn_status(evtchn_status_t *status)
514 {
515 struct domain *d;
516 domid_t dom = status->dom;
517 int port = status->port;
518 event_channel_t *chn;
519 long rc = 0;
521 if ( dom == DOMID_SELF )
522 dom = current->domain->id;
523 else if ( !IS_PRIV(current->domain) )
524 return -EPERM;
526 if ( (d = find_domain_by_id(dom)) == NULL )
527 return -ESRCH;
529 spin_lock(&d->event_channel_lock);
531 chn = d->event_channel;
533 if ( (port < 0) || (port >= d->max_event_channel) )
534 {
535 rc = -EINVAL;
536 goto out;
537 }
539 switch ( chn[port].state )
540 {
541 case ECS_FREE:
542 case ECS_RESERVED:
543 status->status = EVTCHNSTAT_closed;
544 break;
545 case ECS_UNBOUND:
546 status->status = EVTCHNSTAT_unbound;
547 status->u.unbound.dom = chn[port].u.unbound.remote_domid;
548 break;
549 case ECS_INTERDOMAIN:
550 status->status = EVTCHNSTAT_interdomain;
551 status->u.interdomain.dom =
552 chn[port].u.interdomain.remote_dom->domain->id;
553 status->u.interdomain.port = chn[port].u.interdomain.remote_port;
554 break;
555 case ECS_PIRQ:
556 status->status = EVTCHNSTAT_pirq;
557 status->u.pirq = chn[port].u.pirq;
558 break;
559 case ECS_VIRQ:
560 status->status = EVTCHNSTAT_virq;
561 status->u.virq = chn[port].u.virq;
562 break;
563 case ECS_IPI:
564 status->status = EVTCHNSTAT_ipi;
565 status->u.ipi_edom = chn[port].u.ipi_edom;
566 break;
567 default:
568 BUG();
569 }
571 out:
572 spin_unlock(&d->event_channel_lock);
573 put_domain(d);
574 return rc;
575 }
578 long do_event_channel_op(evtchn_op_t *uop)
579 {
580 long rc;
581 evtchn_op_t op;
583 if ( copy_from_user(&op, uop, sizeof(op)) != 0 )
584 return -EFAULT;
586 switch ( op.cmd )
587 {
588 case EVTCHNOP_alloc_unbound:
589 rc = evtchn_alloc_unbound(&op.u.alloc_unbound);
590 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
591 rc = -EFAULT; /* Cleaning up here would be a mess! */
592 break;
594 case EVTCHNOP_bind_interdomain:
595 rc = evtchn_bind_interdomain(&op.u.bind_interdomain);
596 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
597 rc = -EFAULT; /* Cleaning up here would be a mess! */
598 break;
600 case EVTCHNOP_bind_virq:
601 rc = evtchn_bind_virq(&op.u.bind_virq);
602 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
603 rc = -EFAULT; /* Cleaning up here would be a mess! */
604 break;
606 case EVTCHNOP_bind_ipi:
607 rc = evtchn_bind_ipi(&op.u.bind_ipi);
608 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
609 rc = -EFAULT; /* Cleaning up here would be a mess! */
610 break;
612 case EVTCHNOP_bind_pirq:
613 rc = evtchn_bind_pirq(&op.u.bind_pirq);
614 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
615 rc = -EFAULT; /* Cleaning up here would be a mess! */
616 break;
618 case EVTCHNOP_close:
619 rc = evtchn_close(&op.u.close);
620 break;
622 case EVTCHNOP_send:
623 rc = evtchn_send(op.u.send.local_port);
624 break;
626 case EVTCHNOP_status:
627 rc = evtchn_status(&op.u.status);
628 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
629 rc = -EFAULT;
630 break;
632 default:
633 rc = -ENOSYS;
634 break;
635 }
637 return rc;
638 }
641 int init_event_channels(struct domain *d)
642 {
643 spin_lock_init(&d->event_channel_lock);
644 /* Call get_free_port to initialize d->event_channel */
645 if ( get_free_port(d->exec_domain[0]) != 0 )
646 return -EINVAL;
647 d->event_channel[0].state = ECS_RESERVED;
648 return 0;
649 }
652 void destroy_event_channels(struct domain *d)
653 {
654 int i;
655 if ( d->event_channel != NULL )
656 {
657 for ( i = 0; i < d->max_event_channel; i++ )
658 (void)__evtchn_close(d, i);
659 xfree(d->event_channel);
660 }
661 }