ia64/xen-unstable

view xen/common/event_channel.c @ 9776:72f9c751d3ea

Replace &foo[0] with foo where the latter seems cleaner
(which is usually, and particularly when its an argument
to one of the bitops functions).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Apr 19 18:32:20 2006 +0100 (2006-04-19)
parents 050ad9813cdb
children 4e1b8be54311
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/guest_access.h>
27 #include <asm/current.h>
29 #include <public/xen.h>
30 #include <public/event_channel.h>
31 #include <acm/acm_hooks.h>
33 #define bucket_from_port(d,p) \
34 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
35 #define port_is_valid(d,p) \
36 (((p) >= 0) && ((p) < MAX_EVTCHNS) && \
37 (bucket_from_port(d,p) != NULL))
38 #define evtchn_from_port(d,p) \
39 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
41 #define ERROR_EXIT(_errno) \
42 do { \
43 DPRINTK("EVTCHNOP failure: domain %d, error %d, line %d\n", \
44 current->domain->domain_id, (_errno), __LINE__); \
45 rc = (_errno); \
46 goto out; \
47 } while ( 0 )
50 static int virq_is_global(int virq)
51 {
52 int rc;
54 ASSERT((virq >= 0) && (virq < NR_VIRQS));
56 switch ( virq )
57 {
58 case VIRQ_TIMER:
59 case VIRQ_DEBUG:
60 case VIRQ_XENOPROF:
61 rc = 0;
62 break;
63 default:
64 rc = 1;
65 break;
66 }
68 return rc;
69 }
72 static int get_free_port(struct domain *d)
73 {
74 struct evtchn *chn;
75 int port;
77 for ( port = 0; port_is_valid(d, port); port++ )
78 if ( evtchn_from_port(d, port)->state == ECS_FREE )
79 return port;
81 if ( port == MAX_EVTCHNS )
82 return -ENOSPC;
84 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
85 if ( unlikely(chn == NULL) )
86 return -ENOMEM;
87 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
88 bucket_from_port(d, port) = chn;
90 return port;
91 }
94 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
95 {
96 struct evtchn *chn;
97 struct domain *d;
98 int port;
99 domid_t dom = alloc->dom;
100 long rc = 0;
102 if ( dom == DOMID_SELF )
103 dom = current->domain->domain_id;
104 else if ( !IS_PRIV(current->domain) )
105 return -EPERM;
107 if ( (d = find_domain_by_id(dom)) == NULL )
108 return -ESRCH;
110 spin_lock(&d->evtchn_lock);
112 if ( (port = get_free_port(d)) < 0 )
113 ERROR_EXIT(port);
114 chn = evtchn_from_port(d, port);
116 chn->state = ECS_UNBOUND;
117 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
118 chn->u.unbound.remote_domid = current->domain->domain_id;
120 alloc->port = port;
122 out:
123 spin_unlock(&d->evtchn_lock);
125 put_domain(d);
127 return rc;
128 }
131 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
132 {
133 struct evtchn *lchn, *rchn;
134 struct domain *ld = current->domain, *rd;
135 int lport, rport = bind->remote_port;
136 domid_t rdom = bind->remote_dom;
137 long rc = 0;
139 if ( rdom == DOMID_SELF )
140 rdom = current->domain->domain_id;
142 if ( (rd = find_domain_by_id(rdom)) == NULL )
143 return -ESRCH;
145 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
146 if ( ld < rd )
147 {
148 spin_lock(&ld->evtchn_lock);
149 spin_lock(&rd->evtchn_lock);
150 }
151 else
152 {
153 if ( ld != rd )
154 spin_lock(&rd->evtchn_lock);
155 spin_lock(&ld->evtchn_lock);
156 }
158 if ( (lport = get_free_port(ld)) < 0 )
159 ERROR_EXIT(lport);
160 lchn = evtchn_from_port(ld, lport);
162 if ( !port_is_valid(rd, rport) )
163 ERROR_EXIT(-EINVAL);
164 rchn = evtchn_from_port(rd, rport);
165 if ( (rchn->state != ECS_UNBOUND) ||
166 (rchn->u.unbound.remote_domid != ld->domain_id) )
167 ERROR_EXIT(-EINVAL);
169 lchn->u.interdomain.remote_dom = rd;
170 lchn->u.interdomain.remote_port = (u16)rport;
171 lchn->state = ECS_INTERDOMAIN;
173 rchn->u.interdomain.remote_dom = ld;
174 rchn->u.interdomain.remote_port = (u16)lport;
175 rchn->state = ECS_INTERDOMAIN;
177 /*
178 * We may have lost notifications on the remote unbound port. Fix that up
179 * here by conservatively always setting a notification on the local port.
180 */
181 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
183 bind->local_port = lport;
185 out:
186 spin_unlock(&ld->evtchn_lock);
187 if ( ld != rd )
188 spin_unlock(&rd->evtchn_lock);
190 put_domain(rd);
192 return rc;
193 }
196 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
197 {
198 struct evtchn *chn;
199 struct vcpu *v;
200 struct domain *d = current->domain;
201 int port, virq = bind->virq, vcpu = bind->vcpu;
202 long rc = 0;
204 if ( virq >= ARRAY_SIZE(v->virq_to_evtchn) )
205 return -EINVAL;
207 if ( virq_is_global(virq) && (vcpu != 0) )
208 return -EINVAL;
210 if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || ((v = d->vcpu[vcpu]) == NULL) )
211 return -ENOENT;
213 spin_lock(&d->evtchn_lock);
215 if ( v->virq_to_evtchn[virq] != 0 )
216 ERROR_EXIT(-EEXIST);
218 if ( (port = get_free_port(d)) < 0 )
219 ERROR_EXIT(port);
221 chn = evtchn_from_port(d, port);
222 chn->state = ECS_VIRQ;
223 chn->notify_vcpu_id = vcpu;
224 chn->u.virq = virq;
226 v->virq_to_evtchn[virq] = bind->port = port;
228 out:
229 spin_unlock(&d->evtchn_lock);
231 return rc;
232 }
235 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
236 {
237 struct evtchn *chn;
238 struct domain *d = current->domain;
239 int port, vcpu = bind->vcpu;
240 long rc = 0;
242 if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) )
243 return -ENOENT;
245 spin_lock(&d->evtchn_lock);
247 if ( (port = get_free_port(d)) < 0 )
248 ERROR_EXIT(port);
250 chn = evtchn_from_port(d, port);
251 chn->state = ECS_IPI;
252 chn->notify_vcpu_id = vcpu;
254 bind->port = port;
256 out:
257 spin_unlock(&d->evtchn_lock);
259 return rc;
260 }
263 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
264 {
265 struct evtchn *chn;
266 struct domain *d = current->domain;
267 int port, pirq = bind->pirq;
268 long rc;
270 if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
271 return -EINVAL;
273 if ( !irq_access_permitted(d, pirq) )
274 return -EPERM;
276 spin_lock(&d->evtchn_lock);
278 if ( d->pirq_to_evtchn[pirq] != 0 )
279 ERROR_EXIT(-EEXIST);
281 if ( (port = get_free_port(d)) < 0 )
282 ERROR_EXIT(port);
284 chn = evtchn_from_port(d, port);
286 d->pirq_to_evtchn[pirq] = port;
287 rc = pirq_guest_bind(d->vcpu[0], pirq,
288 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
289 if ( rc != 0 )
290 {
291 d->pirq_to_evtchn[pirq] = 0;
292 goto out;
293 }
295 chn->state = ECS_PIRQ;
296 chn->u.pirq = pirq;
298 bind->port = port;
300 out:
301 spin_unlock(&d->evtchn_lock);
303 return rc;
304 }
307 static long __evtchn_close(struct domain *d1, int port1)
308 {
309 struct domain *d2 = NULL;
310 struct vcpu *v;
311 struct evtchn *chn1, *chn2;
312 int port2;
313 long rc = 0;
315 again:
316 spin_lock(&d1->evtchn_lock);
318 if ( !port_is_valid(d1, port1) )
319 {
320 rc = -EINVAL;
321 goto out;
322 }
324 chn1 = evtchn_from_port(d1, port1);
325 switch ( chn1->state )
326 {
327 case ECS_FREE:
328 case ECS_RESERVED:
329 rc = -EINVAL;
330 goto out;
332 case ECS_UNBOUND:
333 break;
335 case ECS_PIRQ:
336 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
337 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
338 break;
340 case ECS_VIRQ:
341 for_each_vcpu ( d1, v )
342 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
343 v->virq_to_evtchn[chn1->u.virq] = 0;
344 break;
346 case ECS_IPI:
347 break;
349 case ECS_INTERDOMAIN:
350 if ( d2 == NULL )
351 {
352 d2 = chn1->u.interdomain.remote_dom;
354 /* If we unlock d1 then we could lose d2. Must get a reference. */
355 if ( unlikely(!get_domain(d2)) )
356 {
357 /*
358 * Failed to obtain a reference. No matter: d2 must be dying
359 * and so will close this event channel for us.
360 */
361 d2 = NULL;
362 goto out;
363 }
365 if ( d1 < d2 )
366 {
367 spin_lock(&d2->evtchn_lock);
368 }
369 else if ( d1 != d2 )
370 {
371 spin_unlock(&d1->evtchn_lock);
372 spin_lock(&d2->evtchn_lock);
373 goto again;
374 }
375 }
376 else if ( d2 != chn1->u.interdomain.remote_dom )
377 {
378 /*
379 * We can only get here if the port was closed and re-bound after
380 * unlocking d1 but before locking d2 above. We could retry but
381 * it is easier to return the same error as if we had seen the
382 * port in ECS_CLOSED. It must have passed through that state for
383 * us to end up here, so it's a valid error to return.
384 */
385 BUG_ON(d1 != current->domain);
386 rc = -EINVAL;
387 goto out;
388 }
390 port2 = chn1->u.interdomain.remote_port;
391 BUG_ON(!port_is_valid(d2, port2));
393 chn2 = evtchn_from_port(d2, port2);
394 BUG_ON(chn2->state != ECS_INTERDOMAIN);
395 BUG_ON(chn2->u.interdomain.remote_dom != d1);
397 chn2->state = ECS_UNBOUND;
398 chn2->u.unbound.remote_domid = d1->domain_id;
399 break;
401 default:
402 BUG();
403 }
405 /* Reset binding to vcpu0 when the channel is freed. */
406 chn1->state = ECS_FREE;
407 chn1->notify_vcpu_id = 0;
409 out:
410 if ( d2 != NULL )
411 {
412 if ( d1 != d2 )
413 spin_unlock(&d2->evtchn_lock);
414 put_domain(d2);
415 }
417 spin_unlock(&d1->evtchn_lock);
419 return rc;
420 }
423 static long evtchn_close(evtchn_close_t *close)
424 {
425 return __evtchn_close(current->domain, close->port);
426 }
429 long evtchn_send(unsigned int lport)
430 {
431 struct evtchn *lchn, *rchn;
432 struct domain *ld = current->domain, *rd;
433 int rport, ret = 0;
435 spin_lock(&ld->evtchn_lock);
437 if ( unlikely(!port_is_valid(ld, lport)) )
438 {
439 spin_unlock(&ld->evtchn_lock);
440 return -EINVAL;
441 }
443 lchn = evtchn_from_port(ld, lport);
444 switch ( lchn->state )
445 {
446 case ECS_INTERDOMAIN:
447 rd = lchn->u.interdomain.remote_dom;
448 rport = lchn->u.interdomain.remote_port;
449 rchn = evtchn_from_port(rd, rport);
450 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
451 break;
452 case ECS_IPI:
453 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
454 break;
455 case ECS_UNBOUND:
456 /* silently drop the notification */
457 break;
458 default:
459 ret = -EINVAL;
460 }
462 spin_unlock(&ld->evtchn_lock);
464 return ret;
465 }
468 void evtchn_set_pending(struct vcpu *v, int port)
469 {
470 struct domain *d = v->domain;
471 shared_info_t *s = d->shared_info;
473 /*
474 * The following bit operations must happen in strict order.
475 * NB. On x86, the atomic bit operations also act as memory barriers.
476 * There is therefore sufficiently strict ordering for this architecture --
477 * others may require explicit memory barriers.
478 */
480 if ( test_and_set_bit(port, s->evtchn_pending) )
481 return;
483 if ( !test_bit (port, s->evtchn_mask) &&
484 !test_and_set_bit(port / BITS_PER_LONG,
485 &v->vcpu_info->evtchn_pending_sel) &&
486 !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
487 {
488 evtchn_notify(v);
489 }
490 else if ( unlikely(test_bit(_VCPUF_blocked, &v->vcpu_flags) &&
491 v->vcpu_info->evtchn_upcall_mask) )
492 {
493 /*
494 * Blocked and masked will usually mean that the VCPU executed
495 * SCHEDOP_poll. Kick the VCPU in case this port is in its poll list.
496 */
497 vcpu_unblock(v);
498 }
499 }
502 void send_guest_vcpu_virq(struct vcpu *v, int virq)
503 {
504 int port;
506 ASSERT(!virq_is_global(virq));
508 port = v->virq_to_evtchn[virq];
509 if ( unlikely(port == 0) )
510 return;
512 evtchn_set_pending(v, port);
513 }
515 void send_guest_global_virq(struct domain *d, int virq)
516 {
517 int port;
518 struct evtchn *chn;
520 ASSERT(virq_is_global(virq));
522 port = d->vcpu[0]->virq_to_evtchn[virq];
523 if ( unlikely(port == 0) )
524 return;
526 chn = evtchn_from_port(d, port);
527 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
528 }
531 void send_guest_pirq(struct domain *d, int pirq)
532 {
533 int port = d->pirq_to_evtchn[pirq];
534 struct evtchn *chn;
536 ASSERT(port != 0);
538 chn = evtchn_from_port(d, port);
539 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
540 }
543 static long evtchn_status(evtchn_status_t *status)
544 {
545 struct domain *d;
546 domid_t dom = status->dom;
547 int port = status->port;
548 struct evtchn *chn;
549 long rc = 0;
551 if ( dom == DOMID_SELF )
552 dom = current->domain->domain_id;
553 else if ( !IS_PRIV(current->domain) )
554 return -EPERM;
556 if ( (d = find_domain_by_id(dom)) == NULL )
557 return -ESRCH;
559 spin_lock(&d->evtchn_lock);
561 if ( !port_is_valid(d, port) )
562 {
563 rc = -EINVAL;
564 goto out;
565 }
567 chn = evtchn_from_port(d, port);
568 switch ( chn->state )
569 {
570 case ECS_FREE:
571 case ECS_RESERVED:
572 status->status = EVTCHNSTAT_closed;
573 break;
574 case ECS_UNBOUND:
575 status->status = EVTCHNSTAT_unbound;
576 status->u.unbound.dom = chn->u.unbound.remote_domid;
577 break;
578 case ECS_INTERDOMAIN:
579 status->status = EVTCHNSTAT_interdomain;
580 status->u.interdomain.dom =
581 chn->u.interdomain.remote_dom->domain_id;
582 status->u.interdomain.port = chn->u.interdomain.remote_port;
583 break;
584 case ECS_PIRQ:
585 status->status = EVTCHNSTAT_pirq;
586 status->u.pirq = chn->u.pirq;
587 break;
588 case ECS_VIRQ:
589 status->status = EVTCHNSTAT_virq;
590 status->u.virq = chn->u.virq;
591 break;
592 case ECS_IPI:
593 status->status = EVTCHNSTAT_ipi;
594 break;
595 default:
596 BUG();
597 }
599 status->vcpu = chn->notify_vcpu_id;
601 out:
602 spin_unlock(&d->evtchn_lock);
603 put_domain(d);
604 return rc;
605 }
608 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
609 {
610 struct domain *d = current->domain;
611 struct evtchn *chn;
612 long rc = 0;
614 if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
615 return -ENOENT;
617 spin_lock(&d->evtchn_lock);
619 if ( !port_is_valid(d, port) )
620 {
621 rc = -EINVAL;
622 goto out;
623 }
625 chn = evtchn_from_port(d, port);
626 switch ( chn->state )
627 {
628 case ECS_VIRQ:
629 if ( virq_is_global(chn->u.virq) )
630 chn->notify_vcpu_id = vcpu_id;
631 else
632 rc = -EINVAL;
633 break;
634 case ECS_UNBOUND:
635 case ECS_INTERDOMAIN:
636 case ECS_PIRQ:
637 chn->notify_vcpu_id = vcpu_id;
638 break;
639 default:
640 rc = -EINVAL;
641 break;
642 }
644 out:
645 spin_unlock(&d->evtchn_lock);
646 return rc;
647 }
650 static long evtchn_unmask(evtchn_unmask_t *unmask)
651 {
652 struct domain *d = current->domain;
653 shared_info_t *s = d->shared_info;
654 int port = unmask->port;
655 struct vcpu *v;
657 spin_lock(&d->evtchn_lock);
659 if ( unlikely(!port_is_valid(d, port)) )
660 {
661 spin_unlock(&d->evtchn_lock);
662 return -EINVAL;
663 }
665 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
667 /*
668 * These operations must happen in strict order. Based on
669 * include/xen/event.h:evtchn_set_pending().
670 */
671 if ( test_and_clear_bit(port, s->evtchn_mask) &&
672 test_bit (port, s->evtchn_pending) &&
673 !test_and_set_bit (port / BITS_PER_LONG,
674 &v->vcpu_info->evtchn_pending_sel) &&
675 !test_and_set_bit (0, &v->vcpu_info->evtchn_upcall_pending) )
676 {
677 evtchn_notify(v);
678 }
680 spin_unlock(&d->evtchn_lock);
682 return 0;
683 }
686 long do_event_channel_op(GUEST_HANDLE(evtchn_op_t) uop)
687 {
688 long rc;
689 struct evtchn_op op;
691 if ( copy_from_guest(&op, uop, 1) != 0 )
692 return -EFAULT;
694 if (acm_pre_event_channel(&op))
695 return -EACCES;
697 switch ( op.cmd )
698 {
699 case EVTCHNOP_alloc_unbound:
700 rc = evtchn_alloc_unbound(&op.u.alloc_unbound);
701 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
702 rc = -EFAULT; /* Cleaning up here would be a mess! */
703 break;
705 case EVTCHNOP_bind_interdomain:
706 rc = evtchn_bind_interdomain(&op.u.bind_interdomain);
707 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
708 rc = -EFAULT; /* Cleaning up here would be a mess! */
709 break;
711 case EVTCHNOP_bind_virq:
712 rc = evtchn_bind_virq(&op.u.bind_virq);
713 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
714 rc = -EFAULT; /* Cleaning up here would be a mess! */
715 break;
717 case EVTCHNOP_bind_ipi:
718 rc = evtchn_bind_ipi(&op.u.bind_ipi);
719 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
720 rc = -EFAULT; /* Cleaning up here would be a mess! */
721 break;
723 case EVTCHNOP_bind_pirq:
724 rc = evtchn_bind_pirq(&op.u.bind_pirq);
725 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
726 rc = -EFAULT; /* Cleaning up here would be a mess! */
727 break;
729 case EVTCHNOP_close:
730 rc = evtchn_close(&op.u.close);
731 break;
733 case EVTCHNOP_send:
734 rc = evtchn_send(op.u.send.port);
735 break;
737 case EVTCHNOP_status:
738 rc = evtchn_status(&op.u.status);
739 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
740 rc = -EFAULT;
741 break;
743 case EVTCHNOP_bind_vcpu:
744 rc = evtchn_bind_vcpu(op.u.bind_vcpu.port, op.u.bind_vcpu.vcpu);
745 break;
747 case EVTCHNOP_unmask:
748 rc = evtchn_unmask(&op.u.unmask);
749 break;
751 default:
752 rc = -ENOSYS;
753 break;
754 }
756 return rc;
757 }
760 void evtchn_notify_reserved_port(struct domain *d, int port)
761 {
762 struct evtchn *chn = evtchn_from_port(d, port);
763 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
764 }
767 int evtchn_init(struct domain *d)
768 {
769 spin_lock_init(&d->evtchn_lock);
770 if ( get_free_port(d) != 0 )
771 return -EINVAL;
772 evtchn_from_port(d, 0)->state = ECS_RESERVED;
773 return 0;
774 }
777 void evtchn_destroy(struct domain *d)
778 {
779 int i;
781 for ( i = 0; port_is_valid(d, i); i++ )
782 (void)__evtchn_close(d, i);
784 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
785 xfree(d->evtchn[i]);
786 }
788 /*
789 * Local variables:
790 * mode: C
791 * c-set-style: "BSD"
792 * c-basic-offset: 4
793 * tab-width: 4
794 * indent-tabs-mode: nil
795 * End:
796 */