ia64/xen-unstable

view xen/common/event_channel.c @ 15927:b7eb2bb9b625

IRQ injection changes for HVM PCI passthru.
Signed-off-by: Allen Kay <allen.m.kay@intel.com>
Signed-off-by: Guy Zana <guy@neocleus.com>
author kfraser@localhost.localdomain
date Tue Sep 18 16:09:19 2007 +0100 (2007-09-18)
parents fa4d44c9d9f6
children 8d5517355aa8
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/compat.h>
27 #include <xen/guest_access.h>
28 #include <asm/current.h>
30 #include <public/xen.h>
31 #include <public/event_channel.h>
32 #include <xsm/xsm.h>
34 #define bucket_from_port(d,p) \
35 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
36 #define port_is_valid(d,p) \
37 (((p) >= 0) && ((p) < MAX_EVTCHNS(d)) && \
38 (bucket_from_port(d,p) != NULL))
39 #define evtchn_from_port(d,p) \
40 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
42 #define ERROR_EXIT(_errno) \
43 do { \
44 gdprintk(XENLOG_WARNING, \
45 "EVTCHNOP failure: domain %d, error %d, line %d\n", \
46 current->domain->domain_id, (_errno), __LINE__); \
47 rc = (_errno); \
48 goto out; \
49 } while ( 0 )
52 static int virq_is_global(int virq)
53 {
54 int rc;
56 ASSERT((virq >= 0) && (virq < NR_VIRQS));
58 switch ( virq )
59 {
60 case VIRQ_TIMER:
61 case VIRQ_DEBUG:
62 case VIRQ_XENOPROF:
63 rc = 0;
64 break;
65 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
66 rc = arch_virq_is_global(virq);
67 break;
68 default:
69 rc = 1;
70 break;
71 }
73 return rc;
74 }
77 static int get_free_port(struct domain *d)
78 {
79 struct evtchn *chn;
80 int port;
81 int i, j;
83 if ( d->is_dying )
84 return -EINVAL;
86 for ( port = 0; port_is_valid(d, port); port++ )
87 if ( evtchn_from_port(d, port)->state == ECS_FREE )
88 return port;
90 if ( port == MAX_EVTCHNS(d) )
91 return -ENOSPC;
93 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
94 if ( unlikely(chn == NULL) )
95 return -ENOMEM;
96 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
97 bucket_from_port(d, port) = chn;
99 for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
100 {
101 if ( xsm_alloc_security_evtchn(&chn[i]) )
102 {
103 for ( j = 0; j < i; j++ )
104 {
105 xsm_free_security_evtchn(&chn[j]);
106 }
107 xfree(chn);
108 return -ENOMEM;
109 }
110 }
112 return port;
113 }
116 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
117 {
118 struct evtchn *chn;
119 struct domain *d;
120 int port;
121 domid_t dom = alloc->dom;
122 long rc;
124 if ( dom == DOMID_SELF )
125 dom = current->domain->domain_id;
126 else if ( !IS_PRIV(current->domain) )
127 return -EPERM;
129 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
130 return -ESRCH;
132 spin_lock(&d->evtchn_lock);
134 if ( (port = get_free_port(d)) < 0 )
135 ERROR_EXIT(port);
136 chn = evtchn_from_port(d, port);
138 rc = xsm_evtchn_unbound(d, chn, alloc->remote_dom);
139 if ( rc )
140 goto out;
142 chn->state = ECS_UNBOUND;
143 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
144 chn->u.unbound.remote_domid = current->domain->domain_id;
146 alloc->port = port;
148 out:
149 spin_unlock(&d->evtchn_lock);
151 rcu_unlock_domain(d);
153 return rc;
154 }
157 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
158 {
159 struct evtchn *lchn, *rchn;
160 struct domain *ld = current->domain, *rd;
161 int lport, rport = bind->remote_port;
162 domid_t rdom = bind->remote_dom;
163 long rc;
165 if ( rdom == DOMID_SELF )
166 rdom = current->domain->domain_id;
168 if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
169 return -ESRCH;
171 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
172 if ( ld < rd )
173 {
174 spin_lock(&ld->evtchn_lock);
175 spin_lock(&rd->evtchn_lock);
176 }
177 else
178 {
179 if ( ld != rd )
180 spin_lock(&rd->evtchn_lock);
181 spin_lock(&ld->evtchn_lock);
182 }
184 if ( (lport = get_free_port(ld)) < 0 )
185 ERROR_EXIT(lport);
186 lchn = evtchn_from_port(ld, lport);
188 if ( !port_is_valid(rd, rport) )
189 ERROR_EXIT(-EINVAL);
190 rchn = evtchn_from_port(rd, rport);
191 if ( (rchn->state != ECS_UNBOUND) ||
192 (rchn->u.unbound.remote_domid != ld->domain_id) )
193 ERROR_EXIT(-EINVAL);
195 rc = xsm_evtchn_interdomain(ld, lchn, rd, rchn);
196 if ( rc )
197 goto out;
199 lchn->u.interdomain.remote_dom = rd;
200 lchn->u.interdomain.remote_port = (u16)rport;
201 lchn->state = ECS_INTERDOMAIN;
203 rchn->u.interdomain.remote_dom = ld;
204 rchn->u.interdomain.remote_port = (u16)lport;
205 rchn->state = ECS_INTERDOMAIN;
207 /*
208 * We may have lost notifications on the remote unbound port. Fix that up
209 * here by conservatively always setting a notification on the local port.
210 */
211 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
213 bind->local_port = lport;
215 out:
216 spin_unlock(&ld->evtchn_lock);
217 if ( ld != rd )
218 spin_unlock(&rd->evtchn_lock);
220 rcu_unlock_domain(rd);
222 return rc;
223 }
226 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
227 {
228 struct evtchn *chn;
229 struct vcpu *v;
230 struct domain *d = current->domain;
231 int port, virq = bind->virq, vcpu = bind->vcpu;
232 long rc = 0;
234 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
235 return -EINVAL;
237 if ( virq_is_global(virq) && (vcpu != 0) )
238 return -EINVAL;
240 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
241 ((v = d->vcpu[vcpu]) == NULL) )
242 return -ENOENT;
244 spin_lock(&d->evtchn_lock);
246 if ( v->virq_to_evtchn[virq] != 0 )
247 ERROR_EXIT(-EEXIST);
249 if ( (port = get_free_port(d)) < 0 )
250 ERROR_EXIT(port);
252 chn = evtchn_from_port(d, port);
253 chn->state = ECS_VIRQ;
254 chn->notify_vcpu_id = vcpu;
255 chn->u.virq = virq;
257 v->virq_to_evtchn[virq] = bind->port = port;
259 out:
260 spin_unlock(&d->evtchn_lock);
262 return rc;
263 }
266 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
267 {
268 struct evtchn *chn;
269 struct domain *d = current->domain;
270 int port, vcpu = bind->vcpu;
271 long rc = 0;
273 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
274 (d->vcpu[vcpu] == NULL) )
275 return -ENOENT;
277 spin_lock(&d->evtchn_lock);
279 if ( (port = get_free_port(d)) < 0 )
280 ERROR_EXIT(port);
282 chn = evtchn_from_port(d, port);
283 chn->state = ECS_IPI;
284 chn->notify_vcpu_id = vcpu;
286 bind->port = port;
288 out:
289 spin_unlock(&d->evtchn_lock);
291 return rc;
292 }
295 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
296 {
297 struct evtchn *chn;
298 struct domain *d = current->domain;
299 int port, pirq = bind->pirq;
300 long rc;
302 if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) )
303 return -EINVAL;
305 if ( !irq_access_permitted(d, pirq) )
306 return -EPERM;
308 spin_lock(&d->evtchn_lock);
310 if ( d->pirq_to_evtchn[pirq] != 0 )
311 ERROR_EXIT(-EEXIST);
313 if ( (port = get_free_port(d)) < 0 )
314 ERROR_EXIT(port);
316 chn = evtchn_from_port(d, port);
318 d->pirq_to_evtchn[pirq] = port;
319 rc = pirq_guest_bind(d->vcpu[0], pirq,
320 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
321 if ( rc != 0 )
322 {
323 d->pirq_to_evtchn[pirq] = 0;
324 goto out;
325 }
327 chn->state = ECS_PIRQ;
328 chn->u.pirq = pirq;
330 bind->port = port;
332 out:
333 spin_unlock(&d->evtchn_lock);
335 return rc;
336 }
339 static long __evtchn_close(struct domain *d1, int port1)
340 {
341 struct domain *d2 = NULL;
342 struct vcpu *v;
343 struct evtchn *chn1, *chn2;
344 int port2;
345 long rc = 0;
347 again:
348 spin_lock(&d1->evtchn_lock);
350 if ( !port_is_valid(d1, port1) )
351 {
352 rc = -EINVAL;
353 goto out;
354 }
356 chn1 = evtchn_from_port(d1, port1);
358 /* Guest cannot close a Xen-attached event channel. */
359 if ( unlikely(chn1->consumer_is_xen) )
360 {
361 rc = -EINVAL;
362 goto out;
363 }
365 switch ( chn1->state )
366 {
367 case ECS_FREE:
368 case ECS_RESERVED:
369 rc = -EINVAL;
370 goto out;
372 case ECS_UNBOUND:
373 break;
375 case ECS_PIRQ:
376 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
377 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
378 break;
380 case ECS_VIRQ:
381 for_each_vcpu ( d1, v )
382 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
383 v->virq_to_evtchn[chn1->u.virq] = 0;
384 break;
386 case ECS_IPI:
387 break;
389 case ECS_INTERDOMAIN:
390 if ( d2 == NULL )
391 {
392 d2 = chn1->u.interdomain.remote_dom;
394 /* If we unlock d1 then we could lose d2. Must get a reference. */
395 if ( unlikely(!get_domain(d2)) )
396 BUG();
398 if ( d1 < d2 )
399 {
400 spin_lock(&d2->evtchn_lock);
401 }
402 else if ( d1 != d2 )
403 {
404 spin_unlock(&d1->evtchn_lock);
405 spin_lock(&d2->evtchn_lock);
406 goto again;
407 }
408 }
409 else if ( d2 != chn1->u.interdomain.remote_dom )
410 {
411 /*
412 * We can only get here if the port was closed and re-bound after
413 * unlocking d1 but before locking d2 above. We could retry but
414 * it is easier to return the same error as if we had seen the
415 * port in ECS_CLOSED. It must have passed through that state for
416 * us to end up here, so it's a valid error to return.
417 */
418 rc = -EINVAL;
419 goto out;
420 }
422 port2 = chn1->u.interdomain.remote_port;
423 BUG_ON(!port_is_valid(d2, port2));
425 chn2 = evtchn_from_port(d2, port2);
426 BUG_ON(chn2->state != ECS_INTERDOMAIN);
427 BUG_ON(chn2->u.interdomain.remote_dom != d1);
429 chn2->state = ECS_UNBOUND;
430 chn2->u.unbound.remote_domid = d1->domain_id;
431 break;
433 default:
434 BUG();
435 }
437 /* Reset binding to vcpu0 when the channel is freed. */
438 chn1->state = ECS_FREE;
439 chn1->notify_vcpu_id = 0;
441 xsm_evtchn_close_post(chn1);
443 out:
444 if ( d2 != NULL )
445 {
446 if ( d1 != d2 )
447 spin_unlock(&d2->evtchn_lock);
448 put_domain(d2);
449 }
451 spin_unlock(&d1->evtchn_lock);
453 return rc;
454 }
457 static long evtchn_close(evtchn_close_t *close)
458 {
459 return __evtchn_close(current->domain, close->port);
460 }
463 long evtchn_send(unsigned int lport)
464 {
465 struct evtchn *lchn, *rchn;
466 struct domain *ld = current->domain, *rd;
467 struct vcpu *rvcpu;
468 int rport, ret = 0;
470 spin_lock(&ld->evtchn_lock);
472 if ( unlikely(!port_is_valid(ld, lport)) )
473 {
474 spin_unlock(&ld->evtchn_lock);
475 return -EINVAL;
476 }
478 lchn = evtchn_from_port(ld, lport);
480 /* Guest cannot send via a Xen-attached event channel. */
481 if ( unlikely(lchn->consumer_is_xen) )
482 {
483 spin_unlock(&ld->evtchn_lock);
484 return -EINVAL;
485 }
487 ret = xsm_evtchn_send(ld, lchn);
488 if ( ret )
489 goto out;
491 switch ( lchn->state )
492 {
493 case ECS_INTERDOMAIN:
494 rd = lchn->u.interdomain.remote_dom;
495 rport = lchn->u.interdomain.remote_port;
496 rchn = evtchn_from_port(rd, rport);
497 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
498 if ( rchn->consumer_is_xen )
499 {
500 /* Xen consumers need notification only if they are blocked. */
501 if ( test_and_clear_bit(_VPF_blocked_in_xen,
502 &rvcpu->pause_flags) )
503 vcpu_wake(rvcpu);
504 }
505 else
506 {
507 evtchn_set_pending(rvcpu, rport);
508 }
509 break;
510 case ECS_IPI:
511 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
512 break;
513 case ECS_UNBOUND:
514 /* silently drop the notification */
515 break;
516 default:
517 ret = -EINVAL;
518 }
520 out:
521 spin_unlock(&ld->evtchn_lock);
523 return ret;
524 }
527 void evtchn_set_pending(struct vcpu *v, int port)
528 {
529 struct domain *d = v->domain;
530 shared_info_t *s = d->shared_info;
532 /*
533 * The following bit operations must happen in strict order.
534 * NB. On x86, the atomic bit operations also act as memory barriers.
535 * There is therefore sufficiently strict ordering for this architecture --
536 * others may require explicit memory barriers.
537 */
539 if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) )
540 return;
542 if ( !test_bit (port, __shared_info_addr(d, s, evtchn_mask)) &&
543 !test_and_set_bit(port / BITS_PER_GUEST_LONG(d),
544 vcpu_info_addr(v, evtchn_pending_sel)) )
545 {
546 vcpu_mark_events_pending(v);
547 }
549 /* Check if some VCPU might be polling for this event. */
550 if ( unlikely(d->is_polling) )
551 {
552 d->is_polling = 0;
553 smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */
554 for_each_vcpu ( d, v )
555 {
556 if ( !v->is_polling )
557 continue;
558 v->is_polling = 0;
559 vcpu_unblock(v);
560 }
561 }
562 }
565 void send_guest_vcpu_virq(struct vcpu *v, int virq)
566 {
567 int port;
569 ASSERT(!virq_is_global(virq));
571 port = v->virq_to_evtchn[virq];
572 if ( unlikely(port == 0) )
573 return;
575 evtchn_set_pending(v, port);
576 }
578 void send_guest_global_virq(struct domain *d, int virq)
579 {
580 int port;
581 struct vcpu *v;
582 struct evtchn *chn;
584 ASSERT(virq_is_global(virq));
586 if ( unlikely(d == NULL) )
587 return;
589 v = d->vcpu[0];
590 if ( unlikely(v == NULL) )
591 return;
593 port = v->virq_to_evtchn[virq];
594 if ( unlikely(port == 0) )
595 return;
597 chn = evtchn_from_port(d, port);
598 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
599 }
602 void send_guest_pirq(struct domain *d, int pirq)
603 {
604 int port = d->pirq_to_evtchn[pirq];
605 struct evtchn *chn;
607 ASSERT(port != 0);
609 chn = evtchn_from_port(d, port);
610 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
611 }
614 static long evtchn_status(evtchn_status_t *status)
615 {
616 struct domain *d;
617 domid_t dom = status->dom;
618 int port = status->port;
619 struct evtchn *chn;
620 long rc = 0;
622 if ( dom == DOMID_SELF )
623 dom = current->domain->domain_id;
624 else if ( !IS_PRIV(current->domain) )
625 return -EPERM;
627 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
628 return -ESRCH;
630 spin_lock(&d->evtchn_lock);
632 if ( !port_is_valid(d, port) )
633 {
634 rc = -EINVAL;
635 goto out;
636 }
638 chn = evtchn_from_port(d, port);
640 rc = xsm_evtchn_status(d, chn);
641 if ( rc )
642 goto out;
644 switch ( chn->state )
645 {
646 case ECS_FREE:
647 case ECS_RESERVED:
648 status->status = EVTCHNSTAT_closed;
649 break;
650 case ECS_UNBOUND:
651 status->status = EVTCHNSTAT_unbound;
652 status->u.unbound.dom = chn->u.unbound.remote_domid;
653 break;
654 case ECS_INTERDOMAIN:
655 status->status = EVTCHNSTAT_interdomain;
656 status->u.interdomain.dom =
657 chn->u.interdomain.remote_dom->domain_id;
658 status->u.interdomain.port = chn->u.interdomain.remote_port;
659 break;
660 case ECS_PIRQ:
661 status->status = EVTCHNSTAT_pirq;
662 status->u.pirq = chn->u.pirq;
663 break;
664 case ECS_VIRQ:
665 status->status = EVTCHNSTAT_virq;
666 status->u.virq = chn->u.virq;
667 break;
668 case ECS_IPI:
669 status->status = EVTCHNSTAT_ipi;
670 break;
671 default:
672 BUG();
673 }
675 status->vcpu = chn->notify_vcpu_id;
677 out:
678 spin_unlock(&d->evtchn_lock);
679 rcu_unlock_domain(d);
680 return rc;
681 }
684 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
685 {
686 struct domain *d = current->domain;
687 struct evtchn *chn;
688 long rc = 0;
690 if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
691 return -ENOENT;
693 spin_lock(&d->evtchn_lock);
695 if ( !port_is_valid(d, port) )
696 {
697 rc = -EINVAL;
698 goto out;
699 }
701 chn = evtchn_from_port(d, port);
703 /* Guest cannot re-bind a Xen-attached event channel. */
704 if ( unlikely(chn->consumer_is_xen) )
705 {
706 rc = -EINVAL;
707 goto out;
708 }
710 switch ( chn->state )
711 {
712 case ECS_VIRQ:
713 if ( virq_is_global(chn->u.virq) )
714 chn->notify_vcpu_id = vcpu_id;
715 else
716 rc = -EINVAL;
717 break;
718 case ECS_UNBOUND:
719 case ECS_INTERDOMAIN:
720 case ECS_PIRQ:
721 chn->notify_vcpu_id = vcpu_id;
722 break;
723 default:
724 rc = -EINVAL;
725 break;
726 }
728 out:
729 spin_unlock(&d->evtchn_lock);
730 return rc;
731 }
734 static long evtchn_unmask(evtchn_unmask_t *unmask)
735 {
736 struct domain *d = current->domain;
737 shared_info_t *s = d->shared_info;
738 int port = unmask->port;
739 struct vcpu *v;
741 spin_lock(&d->evtchn_lock);
743 if ( unlikely(!port_is_valid(d, port)) )
744 {
745 spin_unlock(&d->evtchn_lock);
746 return -EINVAL;
747 }
749 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
751 /*
752 * These operations must happen in strict order. Based on
753 * include/xen/event.h:evtchn_set_pending().
754 */
755 if ( test_and_clear_bit(port, __shared_info_addr(d, s, evtchn_mask)) &&
756 test_bit (port, __shared_info_addr(d, s, evtchn_pending)) &&
757 !test_and_set_bit (port / BITS_PER_GUEST_LONG(d),
758 vcpu_info_addr(v, evtchn_pending_sel)) )
759 {
760 vcpu_mark_events_pending(v);
761 }
763 spin_unlock(&d->evtchn_lock);
765 return 0;
766 }
769 static long evtchn_reset(evtchn_reset_t *r)
770 {
771 domid_t dom = r->dom;
772 struct domain *d;
773 int i;
774 int rc;
776 if ( dom == DOMID_SELF )
777 dom = current->domain->domain_id;
778 else if ( !IS_PRIV(current->domain) )
779 return -EPERM;
781 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
782 return -ESRCH;
784 rc = xsm_evtchn_reset(current->domain, d);
785 if ( rc )
786 {
787 rcu_unlock_domain(d);
788 return rc;
789 }
791 for ( i = 0; port_is_valid(d, i); i++ )
792 (void)__evtchn_close(d, i);
794 rcu_unlock_domain(d);
796 return 0;
797 }
800 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
801 {
802 long rc;
804 switch ( cmd )
805 {
806 case EVTCHNOP_alloc_unbound: {
807 struct evtchn_alloc_unbound alloc_unbound;
808 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
809 return -EFAULT;
810 rc = evtchn_alloc_unbound(&alloc_unbound);
811 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
812 rc = -EFAULT; /* Cleaning up here would be a mess! */
813 break;
814 }
816 case EVTCHNOP_bind_interdomain: {
817 struct evtchn_bind_interdomain bind_interdomain;
818 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
819 return -EFAULT;
820 rc = evtchn_bind_interdomain(&bind_interdomain);
821 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
822 rc = -EFAULT; /* Cleaning up here would be a mess! */
823 break;
824 }
826 case EVTCHNOP_bind_virq: {
827 struct evtchn_bind_virq bind_virq;
828 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
829 return -EFAULT;
830 rc = evtchn_bind_virq(&bind_virq);
831 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
832 rc = -EFAULT; /* Cleaning up here would be a mess! */
833 break;
834 }
836 case EVTCHNOP_bind_ipi: {
837 struct evtchn_bind_ipi bind_ipi;
838 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
839 return -EFAULT;
840 rc = evtchn_bind_ipi(&bind_ipi);
841 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
842 rc = -EFAULT; /* Cleaning up here would be a mess! */
843 break;
844 }
846 case EVTCHNOP_bind_pirq: {
847 struct evtchn_bind_pirq bind_pirq;
848 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
849 return -EFAULT;
850 rc = evtchn_bind_pirq(&bind_pirq);
851 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
852 rc = -EFAULT; /* Cleaning up here would be a mess! */
853 break;
854 }
856 case EVTCHNOP_close: {
857 struct evtchn_close close;
858 if ( copy_from_guest(&close, arg, 1) != 0 )
859 return -EFAULT;
860 rc = evtchn_close(&close);
861 break;
862 }
864 case EVTCHNOP_send: {
865 struct evtchn_send send;
866 if ( copy_from_guest(&send, arg, 1) != 0 )
867 return -EFAULT;
868 rc = evtchn_send(send.port);
869 break;
870 }
872 case EVTCHNOP_status: {
873 struct evtchn_status status;
874 if ( copy_from_guest(&status, arg, 1) != 0 )
875 return -EFAULT;
876 rc = evtchn_status(&status);
877 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
878 rc = -EFAULT;
879 break;
880 }
882 case EVTCHNOP_bind_vcpu: {
883 struct evtchn_bind_vcpu bind_vcpu;
884 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
885 return -EFAULT;
886 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
887 break;
888 }
890 case EVTCHNOP_unmask: {
891 struct evtchn_unmask unmask;
892 if ( copy_from_guest(&unmask, arg, 1) != 0 )
893 return -EFAULT;
894 rc = evtchn_unmask(&unmask);
895 break;
896 }
898 case EVTCHNOP_reset: {
899 struct evtchn_reset reset;
900 if ( copy_from_guest(&reset, arg, 1) != 0 )
901 return -EFAULT;
902 rc = evtchn_reset(&reset);
903 break;
904 }
906 default:
907 rc = -ENOSYS;
908 break;
909 }
911 return rc;
912 }
915 int alloc_unbound_xen_event_channel(
916 struct vcpu *local_vcpu, domid_t remote_domid)
917 {
918 struct evtchn *chn;
919 struct domain *d = local_vcpu->domain;
920 int port;
922 spin_lock(&d->evtchn_lock);
924 if ( (port = get_free_port(d)) < 0 )
925 goto out;
926 chn = evtchn_from_port(d, port);
928 chn->state = ECS_UNBOUND;
929 chn->consumer_is_xen = 1;
930 chn->notify_vcpu_id = local_vcpu->vcpu_id;
931 chn->u.unbound.remote_domid = remote_domid;
933 out:
934 spin_unlock(&d->evtchn_lock);
936 return port;
937 }
940 void free_xen_event_channel(
941 struct vcpu *local_vcpu, int port)
942 {
943 struct evtchn *chn;
944 struct domain *d = local_vcpu->domain;
946 spin_lock(&d->evtchn_lock);
947 chn = evtchn_from_port(d, port);
948 BUG_ON(!chn->consumer_is_xen);
949 chn->consumer_is_xen = 0;
950 spin_unlock(&d->evtchn_lock);
952 (void)__evtchn_close(d, port);
953 }
956 void notify_via_xen_event_channel(int lport)
957 {
958 struct evtchn *lchn, *rchn;
959 struct domain *ld = current->domain, *rd;
960 int rport;
962 spin_lock(&ld->evtchn_lock);
964 ASSERT(port_is_valid(ld, lport));
965 lchn = evtchn_from_port(ld, lport);
966 ASSERT(lchn->consumer_is_xen);
968 if ( likely(lchn->state == ECS_INTERDOMAIN) )
969 {
970 rd = lchn->u.interdomain.remote_dom;
971 rport = lchn->u.interdomain.remote_port;
972 rchn = evtchn_from_port(rd, rport);
973 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
974 }
976 spin_unlock(&ld->evtchn_lock);
977 }
980 int evtchn_init(struct domain *d)
981 {
982 spin_lock_init(&d->evtchn_lock);
983 if ( get_free_port(d) != 0 )
984 return -EINVAL;
985 evtchn_from_port(d, 0)->state = ECS_RESERVED;
986 return 0;
987 }
990 void evtchn_destroy(struct domain *d)
991 {
992 int i;
994 /* After this barrier no new event-channel allocations can occur. */
995 BUG_ON(!d->is_dying);
996 spin_barrier(&d->evtchn_lock);
998 /* Close all existing event channels. */
999 for ( i = 0; port_is_valid(d, i); i++ )
1001 evtchn_from_port(d, i)->consumer_is_xen = 0;
1002 (void)__evtchn_close(d, i);
1005 /* Free all event-channel buckets. */
1006 spin_lock(&d->evtchn_lock);
1007 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
1009 xsm_free_security_evtchn(d->evtchn[i]);
1010 xfree(d->evtchn[i]);
1012 spin_unlock(&d->evtchn_lock);
1015 /*
1016 * Local variables:
1017 * mode: C
1018 * c-set-style: "BSD"
1019 * c-basic-offset: 4
1020 * tab-width: 4
1021 * indent-tabs-mode: nil
1022 * End:
1023 */