ia64/xen-unstable

view xen/common/event_channel.c @ 14196:9d36026b1b43

xen: Cleanups and bug fixes after the rcu_lock_domain patch.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Mar 01 11:38:55 2007 +0000 (2007-03-01)
parents 09a9b6d6c356
children 38513d22d234
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/compat.h>
27 #include <xen/guest_access.h>
28 #include <asm/current.h>
30 #include <public/xen.h>
31 #include <public/event_channel.h>
32 #include <acm/acm_hooks.h>
34 #define bucket_from_port(d,p) \
35 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
36 #define port_is_valid(d,p) \
37 (((p) >= 0) && ((p) < MAX_EVTCHNS(d)) && \
38 (bucket_from_port(d,p) != NULL))
39 #define evtchn_from_port(d,p) \
40 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
42 #define ERROR_EXIT(_errno) \
43 do { \
44 gdprintk(XENLOG_WARNING, \
45 "EVTCHNOP failure: domain %d, error %d, line %d\n", \
46 current->domain->domain_id, (_errno), __LINE__); \
47 rc = (_errno); \
48 goto out; \
49 } while ( 0 )
52 static int virq_is_global(int virq)
53 {
54 int rc;
56 ASSERT((virq >= 0) && (virq < NR_VIRQS));
58 switch ( virq )
59 {
60 case VIRQ_TIMER:
61 case VIRQ_DEBUG:
62 case VIRQ_XENOPROF:
63 rc = 0;
64 break;
65 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
66 rc = arch_virq_is_global(virq);
67 break;
68 default:
69 rc = 1;
70 break;
71 }
73 return rc;
74 }
77 static int get_free_port(struct domain *d)
78 {
79 struct evtchn *chn;
80 int port;
82 for ( port = 0; port_is_valid(d, port); port++ )
83 if ( evtchn_from_port(d, port)->state == ECS_FREE )
84 return port;
86 if ( port == MAX_EVTCHNS(d) )
87 return -ENOSPC;
89 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
90 if ( unlikely(chn == NULL) )
91 return -ENOMEM;
92 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
93 bucket_from_port(d, port) = chn;
95 return port;
96 }
99 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
100 {
101 struct evtchn *chn;
102 struct domain *d;
103 int port;
104 domid_t dom = alloc->dom;
105 long rc;
107 if ( (rc = acm_pre_eventchannel_unbound(dom, alloc->remote_dom)) != 0 )
108 return rc;
110 if ( dom == DOMID_SELF )
111 dom = current->domain->domain_id;
112 else if ( !IS_PRIV(current->domain) )
113 return -EPERM;
115 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
116 return -ESRCH;
118 spin_lock(&d->evtchn_lock);
120 if ( (port = get_free_port(d)) < 0 )
121 ERROR_EXIT(port);
122 chn = evtchn_from_port(d, port);
124 chn->state = ECS_UNBOUND;
125 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
126 chn->u.unbound.remote_domid = current->domain->domain_id;
128 alloc->port = port;
130 out:
131 spin_unlock(&d->evtchn_lock);
133 rcu_unlock_domain(d);
135 return rc;
136 }
139 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
140 {
141 struct evtchn *lchn, *rchn;
142 struct domain *ld = current->domain, *rd;
143 int lport, rport = bind->remote_port;
144 domid_t rdom = bind->remote_dom;
145 long rc;
147 if ( (rc = acm_pre_eventchannel_interdomain(rdom)) != 0 )
148 return rc;
150 if ( rdom == DOMID_SELF )
151 rdom = current->domain->domain_id;
153 if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
154 return -ESRCH;
156 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
157 if ( ld < rd )
158 {
159 spin_lock(&ld->evtchn_lock);
160 spin_lock(&rd->evtchn_lock);
161 }
162 else
163 {
164 if ( ld != rd )
165 spin_lock(&rd->evtchn_lock);
166 spin_lock(&ld->evtchn_lock);
167 }
169 if ( (lport = get_free_port(ld)) < 0 )
170 ERROR_EXIT(lport);
171 lchn = evtchn_from_port(ld, lport);
173 if ( !port_is_valid(rd, rport) )
174 ERROR_EXIT(-EINVAL);
175 rchn = evtchn_from_port(rd, rport);
176 if ( (rchn->state != ECS_UNBOUND) ||
177 (rchn->u.unbound.remote_domid != ld->domain_id) )
178 ERROR_EXIT(-EINVAL);
180 lchn->u.interdomain.remote_dom = rd;
181 lchn->u.interdomain.remote_port = (u16)rport;
182 lchn->state = ECS_INTERDOMAIN;
184 rchn->u.interdomain.remote_dom = ld;
185 rchn->u.interdomain.remote_port = (u16)lport;
186 rchn->state = ECS_INTERDOMAIN;
188 /*
189 * We may have lost notifications on the remote unbound port. Fix that up
190 * here by conservatively always setting a notification on the local port.
191 */
192 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
194 bind->local_port = lport;
196 out:
197 spin_unlock(&ld->evtchn_lock);
198 if ( ld != rd )
199 spin_unlock(&rd->evtchn_lock);
201 rcu_unlock_domain(rd);
203 return rc;
204 }
207 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
208 {
209 struct evtchn *chn;
210 struct vcpu *v;
211 struct domain *d = current->domain;
212 int port, virq = bind->virq, vcpu = bind->vcpu;
213 long rc = 0;
215 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
216 return -EINVAL;
218 if ( virq_is_global(virq) && (vcpu != 0) )
219 return -EINVAL;
221 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
222 ((v = d->vcpu[vcpu]) == NULL) )
223 return -ENOENT;
225 spin_lock(&d->evtchn_lock);
227 if ( v->virq_to_evtchn[virq] != 0 )
228 ERROR_EXIT(-EEXIST);
230 if ( (port = get_free_port(d)) < 0 )
231 ERROR_EXIT(port);
233 chn = evtchn_from_port(d, port);
234 chn->state = ECS_VIRQ;
235 chn->notify_vcpu_id = vcpu;
236 chn->u.virq = virq;
238 v->virq_to_evtchn[virq] = bind->port = port;
240 out:
241 spin_unlock(&d->evtchn_lock);
243 return rc;
244 }
247 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
248 {
249 struct evtchn *chn;
250 struct domain *d = current->domain;
251 int port, vcpu = bind->vcpu;
252 long rc = 0;
254 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
255 (d->vcpu[vcpu] == NULL) )
256 return -ENOENT;
258 spin_lock(&d->evtchn_lock);
260 if ( (port = get_free_port(d)) < 0 )
261 ERROR_EXIT(port);
263 chn = evtchn_from_port(d, port);
264 chn->state = ECS_IPI;
265 chn->notify_vcpu_id = vcpu;
267 bind->port = port;
269 out:
270 spin_unlock(&d->evtchn_lock);
272 return rc;
273 }
276 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
277 {
278 struct evtchn *chn;
279 struct domain *d = current->domain;
280 int port, pirq = bind->pirq;
281 long rc;
283 if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) )
284 return -EINVAL;
286 if ( !irq_access_permitted(d, pirq) )
287 return -EPERM;
289 spin_lock(&d->evtchn_lock);
291 if ( d->pirq_to_evtchn[pirq] != 0 )
292 ERROR_EXIT(-EEXIST);
294 if ( (port = get_free_port(d)) < 0 )
295 ERROR_EXIT(port);
297 chn = evtchn_from_port(d, port);
299 d->pirq_to_evtchn[pirq] = port;
300 rc = pirq_guest_bind(d->vcpu[0], pirq,
301 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
302 if ( rc != 0 )
303 {
304 d->pirq_to_evtchn[pirq] = 0;
305 goto out;
306 }
308 chn->state = ECS_PIRQ;
309 chn->u.pirq = pirq;
311 bind->port = port;
313 out:
314 spin_unlock(&d->evtchn_lock);
316 return rc;
317 }
320 static long __evtchn_close(struct domain *d1, int port1)
321 {
322 struct domain *d2 = NULL;
323 struct vcpu *v;
324 struct evtchn *chn1, *chn2;
325 int port2;
326 long rc = 0;
328 again:
329 spin_lock(&d1->evtchn_lock);
331 if ( !port_is_valid(d1, port1) )
332 {
333 rc = -EINVAL;
334 goto out;
335 }
337 chn1 = evtchn_from_port(d1, port1);
339 /* Guest cannot close a Xen-attached event channel. */
340 if ( unlikely(chn1->consumer_is_xen) )
341 {
342 rc = -EINVAL;
343 goto out;
344 }
346 switch ( chn1->state )
347 {
348 case ECS_FREE:
349 case ECS_RESERVED:
350 rc = -EINVAL;
351 goto out;
353 case ECS_UNBOUND:
354 break;
356 case ECS_PIRQ:
357 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
358 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
359 break;
361 case ECS_VIRQ:
362 for_each_vcpu ( d1, v )
363 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
364 v->virq_to_evtchn[chn1->u.virq] = 0;
365 break;
367 case ECS_IPI:
368 break;
370 case ECS_INTERDOMAIN:
371 if ( d2 == NULL )
372 {
373 d2 = chn1->u.interdomain.remote_dom;
375 /* If we unlock d1 then we could lose d2. Must get a reference. */
376 if ( unlikely(!get_domain(d2)) )
377 {
378 /*
379 * Failed to obtain a reference. No matter: d2 must be dying
380 * and so will close this event channel for us.
381 */
382 d2 = NULL;
383 goto out;
384 }
386 if ( d1 < d2 )
387 {
388 spin_lock(&d2->evtchn_lock);
389 }
390 else if ( d1 != d2 )
391 {
392 spin_unlock(&d1->evtchn_lock);
393 spin_lock(&d2->evtchn_lock);
394 goto again;
395 }
396 }
397 else if ( d2 != chn1->u.interdomain.remote_dom )
398 {
399 /*
400 * We can only get here if the port was closed and re-bound after
401 * unlocking d1 but before locking d2 above. We could retry but
402 * it is easier to return the same error as if we had seen the
403 * port in ECS_CLOSED. It must have passed through that state for
404 * us to end up here, so it's a valid error to return.
405 */
406 BUG_ON(d1 != current->domain);
407 rc = -EINVAL;
408 goto out;
409 }
411 port2 = chn1->u.interdomain.remote_port;
412 BUG_ON(!port_is_valid(d2, port2));
414 chn2 = evtchn_from_port(d2, port2);
415 BUG_ON(chn2->state != ECS_INTERDOMAIN);
416 BUG_ON(chn2->u.interdomain.remote_dom != d1);
418 chn2->state = ECS_UNBOUND;
419 chn2->u.unbound.remote_domid = d1->domain_id;
420 break;
422 default:
423 BUG();
424 }
426 /* Reset binding to vcpu0 when the channel is freed. */
427 chn1->state = ECS_FREE;
428 chn1->notify_vcpu_id = 0;
430 out:
431 if ( d2 != NULL )
432 {
433 if ( d1 != d2 )
434 spin_unlock(&d2->evtchn_lock);
435 put_domain(d2);
436 }
438 spin_unlock(&d1->evtchn_lock);
440 return rc;
441 }
444 static long evtchn_close(evtchn_close_t *close)
445 {
446 return __evtchn_close(current->domain, close->port);
447 }
450 long evtchn_send(unsigned int lport)
451 {
452 struct evtchn *lchn, *rchn;
453 struct domain *ld = current->domain, *rd;
454 struct vcpu *rvcpu;
455 int rport, ret = 0;
457 spin_lock(&ld->evtchn_lock);
459 if ( unlikely(!port_is_valid(ld, lport)) )
460 {
461 spin_unlock(&ld->evtchn_lock);
462 return -EINVAL;
463 }
465 lchn = evtchn_from_port(ld, lport);
467 /* Guest cannot send via a Xen-attached event channel. */
468 if ( unlikely(lchn->consumer_is_xen) )
469 {
470 spin_unlock(&ld->evtchn_lock);
471 return -EINVAL;
472 }
474 switch ( lchn->state )
475 {
476 case ECS_INTERDOMAIN:
477 rd = lchn->u.interdomain.remote_dom;
478 rport = lchn->u.interdomain.remote_port;
479 rchn = evtchn_from_port(rd, rport);
480 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
481 if ( rchn->consumer_is_xen )
482 {
483 /* Xen consumers need notification only if they are blocked. */
484 if ( test_and_clear_bit(_VCPUF_blocked_in_xen,
485 &rvcpu->vcpu_flags) )
486 vcpu_wake(rvcpu);
487 }
488 else
489 {
490 evtchn_set_pending(rvcpu, rport);
491 }
492 break;
493 case ECS_IPI:
494 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
495 break;
496 case ECS_UNBOUND:
497 /* silently drop the notification */
498 break;
499 default:
500 ret = -EINVAL;
501 }
503 spin_unlock(&ld->evtchn_lock);
505 return ret;
506 }
509 void evtchn_set_pending(struct vcpu *v, int port)
510 {
511 struct domain *d = v->domain;
512 shared_info_t *s = d->shared_info;
514 /*
515 * The following bit operations must happen in strict order.
516 * NB. On x86, the atomic bit operations also act as memory barriers.
517 * There is therefore sufficiently strict ordering for this architecture --
518 * others may require explicit memory barriers.
519 */
521 if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) )
522 return;
524 if ( !test_bit (port, __shared_info_addr(d, s, evtchn_mask)) &&
525 !test_and_set_bit(port / BITS_PER_GUEST_LONG(d),
526 vcpu_info_addr(v, evtchn_pending_sel)) )
527 {
528 vcpu_mark_events_pending(v);
529 }
531 /* Check if some VCPU might be polling for this event. */
532 if ( unlikely(test_bit(_DOMF_polling, &d->domain_flags)) &&
533 likely(test_and_clear_bit(_DOMF_polling, &d->domain_flags)) )
534 {
535 for_each_vcpu ( d, v )
536 if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) )
537 vcpu_unblock(v);
538 }
539 }
542 void send_guest_vcpu_virq(struct vcpu *v, int virq)
543 {
544 int port;
546 ASSERT(!virq_is_global(virq));
548 port = v->virq_to_evtchn[virq];
549 if ( unlikely(port == 0) )
550 return;
552 evtchn_set_pending(v, port);
553 }
555 void send_guest_global_virq(struct domain *d, int virq)
556 {
557 int port;
558 struct vcpu *v;
559 struct evtchn *chn;
561 ASSERT(virq_is_global(virq));
563 v = d->vcpu[0];
564 if ( unlikely(v == NULL) )
565 return;
567 port = v->virq_to_evtchn[virq];
568 if ( unlikely(port == 0) )
569 return;
571 chn = evtchn_from_port(d, port);
572 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
573 }
576 void send_guest_pirq(struct domain *d, int pirq)
577 {
578 int port = d->pirq_to_evtchn[pirq];
579 struct evtchn *chn;
581 ASSERT(port != 0);
583 chn = evtchn_from_port(d, port);
584 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
585 }
588 static long evtchn_status(evtchn_status_t *status)
589 {
590 struct domain *d;
591 domid_t dom = status->dom;
592 int port = status->port;
593 struct evtchn *chn;
594 long rc = 0;
596 if ( dom == DOMID_SELF )
597 dom = current->domain->domain_id;
598 else if ( !IS_PRIV(current->domain) )
599 return -EPERM;
601 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
602 return -ESRCH;
604 spin_lock(&d->evtchn_lock);
606 if ( !port_is_valid(d, port) )
607 {
608 rc = -EINVAL;
609 goto out;
610 }
612 chn = evtchn_from_port(d, port);
613 switch ( chn->state )
614 {
615 case ECS_FREE:
616 case ECS_RESERVED:
617 status->status = EVTCHNSTAT_closed;
618 break;
619 case ECS_UNBOUND:
620 status->status = EVTCHNSTAT_unbound;
621 status->u.unbound.dom = chn->u.unbound.remote_domid;
622 break;
623 case ECS_INTERDOMAIN:
624 status->status = EVTCHNSTAT_interdomain;
625 status->u.interdomain.dom =
626 chn->u.interdomain.remote_dom->domain_id;
627 status->u.interdomain.port = chn->u.interdomain.remote_port;
628 break;
629 case ECS_PIRQ:
630 status->status = EVTCHNSTAT_pirq;
631 status->u.pirq = chn->u.pirq;
632 break;
633 case ECS_VIRQ:
634 status->status = EVTCHNSTAT_virq;
635 status->u.virq = chn->u.virq;
636 break;
637 case ECS_IPI:
638 status->status = EVTCHNSTAT_ipi;
639 break;
640 default:
641 BUG();
642 }
644 status->vcpu = chn->notify_vcpu_id;
646 out:
647 spin_unlock(&d->evtchn_lock);
648 rcu_unlock_domain(d);
649 return rc;
650 }
653 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
654 {
655 struct domain *d = current->domain;
656 struct evtchn *chn;
657 long rc = 0;
659 if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
660 return -ENOENT;
662 spin_lock(&d->evtchn_lock);
664 if ( !port_is_valid(d, port) )
665 {
666 rc = -EINVAL;
667 goto out;
668 }
670 chn = evtchn_from_port(d, port);
672 /* Guest cannot re-bind a Xen-attached event channel. */
673 if ( unlikely(chn->consumer_is_xen) )
674 {
675 rc = -EINVAL;
676 goto out;
677 }
679 switch ( chn->state )
680 {
681 case ECS_VIRQ:
682 if ( virq_is_global(chn->u.virq) )
683 chn->notify_vcpu_id = vcpu_id;
684 else
685 rc = -EINVAL;
686 break;
687 case ECS_UNBOUND:
688 case ECS_INTERDOMAIN:
689 case ECS_PIRQ:
690 chn->notify_vcpu_id = vcpu_id;
691 break;
692 default:
693 rc = -EINVAL;
694 break;
695 }
697 out:
698 spin_unlock(&d->evtchn_lock);
699 return rc;
700 }
703 static long evtchn_unmask(evtchn_unmask_t *unmask)
704 {
705 struct domain *d = current->domain;
706 shared_info_t *s = d->shared_info;
707 int port = unmask->port;
708 struct vcpu *v;
710 spin_lock(&d->evtchn_lock);
712 if ( unlikely(!port_is_valid(d, port)) )
713 {
714 spin_unlock(&d->evtchn_lock);
715 return -EINVAL;
716 }
718 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
720 /*
721 * These operations must happen in strict order. Based on
722 * include/xen/event.h:evtchn_set_pending().
723 */
724 if ( test_and_clear_bit(port, __shared_info_addr(d, s, evtchn_mask)) &&
725 test_bit (port, __shared_info_addr(d, s, evtchn_pending)) &&
726 !test_and_set_bit (port / BITS_PER_GUEST_LONG(d),
727 vcpu_info_addr(v, evtchn_pending_sel)) )
728 {
729 vcpu_mark_events_pending(v);
730 }
732 spin_unlock(&d->evtchn_lock);
734 return 0;
735 }
738 static long evtchn_reset(evtchn_reset_t *r)
739 {
740 domid_t dom = r->dom;
741 struct domain *d;
742 int i;
744 if ( dom == DOMID_SELF )
745 dom = current->domain->domain_id;
746 else if ( !IS_PRIV(current->domain) )
747 return -EPERM;
749 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
750 return -ESRCH;
752 for ( i = 0; port_is_valid(d, i); i++ )
753 (void)__evtchn_close(d, i);
755 rcu_unlock_domain(d);
757 return 0;
758 }
761 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
762 {
763 long rc;
765 switch ( cmd )
766 {
767 case EVTCHNOP_alloc_unbound: {
768 struct evtchn_alloc_unbound alloc_unbound;
769 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
770 return -EFAULT;
771 rc = evtchn_alloc_unbound(&alloc_unbound);
772 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
773 rc = -EFAULT; /* Cleaning up here would be a mess! */
774 break;
775 }
777 case EVTCHNOP_bind_interdomain: {
778 struct evtchn_bind_interdomain bind_interdomain;
779 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
780 return -EFAULT;
781 rc = evtchn_bind_interdomain(&bind_interdomain);
782 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
783 rc = -EFAULT; /* Cleaning up here would be a mess! */
784 break;
785 }
787 case EVTCHNOP_bind_virq: {
788 struct evtchn_bind_virq bind_virq;
789 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
790 return -EFAULT;
791 rc = evtchn_bind_virq(&bind_virq);
792 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
793 rc = -EFAULT; /* Cleaning up here would be a mess! */
794 break;
795 }
797 case EVTCHNOP_bind_ipi: {
798 struct evtchn_bind_ipi bind_ipi;
799 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
800 return -EFAULT;
801 rc = evtchn_bind_ipi(&bind_ipi);
802 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
803 rc = -EFAULT; /* Cleaning up here would be a mess! */
804 break;
805 }
807 case EVTCHNOP_bind_pirq: {
808 struct evtchn_bind_pirq bind_pirq;
809 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
810 return -EFAULT;
811 rc = evtchn_bind_pirq(&bind_pirq);
812 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
813 rc = -EFAULT; /* Cleaning up here would be a mess! */
814 break;
815 }
817 case EVTCHNOP_close: {
818 struct evtchn_close close;
819 if ( copy_from_guest(&close, arg, 1) != 0 )
820 return -EFAULT;
821 rc = evtchn_close(&close);
822 break;
823 }
825 case EVTCHNOP_send: {
826 struct evtchn_send send;
827 if ( copy_from_guest(&send, arg, 1) != 0 )
828 return -EFAULT;
829 rc = evtchn_send(send.port);
830 break;
831 }
833 case EVTCHNOP_status: {
834 struct evtchn_status status;
835 if ( copy_from_guest(&status, arg, 1) != 0 )
836 return -EFAULT;
837 rc = evtchn_status(&status);
838 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
839 rc = -EFAULT;
840 break;
841 }
843 case EVTCHNOP_bind_vcpu: {
844 struct evtchn_bind_vcpu bind_vcpu;
845 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
846 return -EFAULT;
847 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
848 break;
849 }
851 case EVTCHNOP_unmask: {
852 struct evtchn_unmask unmask;
853 if ( copy_from_guest(&unmask, arg, 1) != 0 )
854 return -EFAULT;
855 rc = evtchn_unmask(&unmask);
856 break;
857 }
859 case EVTCHNOP_reset: {
860 struct evtchn_reset reset;
861 if ( copy_from_guest(&reset, arg, 1) != 0 )
862 return -EFAULT;
863 rc = evtchn_reset(&reset);
864 break;
865 }
867 default:
868 rc = -ENOSYS;
869 break;
870 }
872 return rc;
873 }
876 int alloc_unbound_xen_event_channel(
877 struct vcpu *local_vcpu, domid_t remote_domid)
878 {
879 struct evtchn *chn;
880 struct domain *d = local_vcpu->domain;
881 int port;
883 spin_lock(&d->evtchn_lock);
885 if ( (port = get_free_port(d)) < 0 )
886 goto out;
887 chn = evtchn_from_port(d, port);
889 chn->state = ECS_UNBOUND;
890 chn->consumer_is_xen = 1;
891 chn->notify_vcpu_id = local_vcpu->vcpu_id;
892 chn->u.unbound.remote_domid = remote_domid;
894 out:
895 spin_unlock(&d->evtchn_lock);
897 return port;
898 }
901 void free_xen_event_channel(
902 struct vcpu *local_vcpu, int port)
903 {
904 struct evtchn *chn;
905 struct domain *d = local_vcpu->domain;
907 spin_lock(&d->evtchn_lock);
908 chn = evtchn_from_port(d, port);
909 BUG_ON(!chn->consumer_is_xen);
910 chn->consumer_is_xen = 0;
911 spin_unlock(&d->evtchn_lock);
913 (void)__evtchn_close(d, port);
914 }
917 void notify_via_xen_event_channel(int lport)
918 {
919 struct evtchn *lchn, *rchn;
920 struct domain *ld = current->domain, *rd;
921 int rport;
923 spin_lock(&ld->evtchn_lock);
925 ASSERT(port_is_valid(ld, lport));
926 lchn = evtchn_from_port(ld, lport);
927 ASSERT(lchn->consumer_is_xen);
929 if ( likely(lchn->state == ECS_INTERDOMAIN) )
930 {
931 rd = lchn->u.interdomain.remote_dom;
932 rport = lchn->u.interdomain.remote_port;
933 rchn = evtchn_from_port(rd, rport);
934 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
935 }
937 spin_unlock(&ld->evtchn_lock);
938 }
941 int evtchn_init(struct domain *d)
942 {
943 spin_lock_init(&d->evtchn_lock);
944 if ( get_free_port(d) != 0 )
945 return -EINVAL;
946 evtchn_from_port(d, 0)->state = ECS_RESERVED;
947 return 0;
948 }
951 void evtchn_destroy(struct domain *d)
952 {
953 int i;
955 for ( i = 0; port_is_valid(d, i); i++ )
956 {
957 evtchn_from_port(d, i)->consumer_is_xen = 0;
958 (void)__evtchn_close(d, i);
959 }
961 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
962 xfree(d->evtchn[i]);
963 }
965 /*
966 * Local variables:
967 * mode: C
968 * c-set-style: "BSD"
969 * c-basic-offset: 4
970 * tab-width: 4
971 * indent-tabs-mode: nil
972 * End:
973 */