ia64/xen-unstable

view xen/common/event_channel.c @ 15647:cc48264ed647

Merge
author Tim Deegan <Tim.Deegan@xensource.com>
date Tue Jul 24 14:53:06 2007 +0100 (2007-07-24)
parents 9fa9346e1c70
children 96f64f4c42f0
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/compat.h>
27 #include <xen/guest_access.h>
28 #include <asm/current.h>
30 #include <public/xen.h>
31 #include <public/event_channel.h>
32 #include <acm/acm_hooks.h>
34 #define bucket_from_port(d,p) \
35 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
36 #define port_is_valid(d,p) \
37 (((p) >= 0) && ((p) < MAX_EVTCHNS(d)) && \
38 (bucket_from_port(d,p) != NULL))
39 #define evtchn_from_port(d,p) \
40 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
42 #define ERROR_EXIT(_errno) \
43 do { \
44 gdprintk(XENLOG_WARNING, \
45 "EVTCHNOP failure: domain %d, error %d, line %d\n", \
46 current->domain->domain_id, (_errno), __LINE__); \
47 rc = (_errno); \
48 goto out; \
49 } while ( 0 )
52 static int virq_is_global(int virq)
53 {
54 int rc;
56 ASSERT((virq >= 0) && (virq < NR_VIRQS));
58 switch ( virq )
59 {
60 case VIRQ_TIMER:
61 case VIRQ_DEBUG:
62 case VIRQ_XENOPROF:
63 rc = 0;
64 break;
65 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
66 rc = arch_virq_is_global(virq);
67 break;
68 default:
69 rc = 1;
70 break;
71 }
73 return rc;
74 }
77 static int get_free_port(struct domain *d)
78 {
79 struct evtchn *chn;
80 int port;
82 if ( d->is_dying )
83 return -EINVAL;
85 for ( port = 0; port_is_valid(d, port); port++ )
86 if ( evtchn_from_port(d, port)->state == ECS_FREE )
87 return port;
89 if ( port == MAX_EVTCHNS(d) )
90 return -ENOSPC;
92 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
93 if ( unlikely(chn == NULL) )
94 return -ENOMEM;
95 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
96 bucket_from_port(d, port) = chn;
98 return port;
99 }
102 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
103 {
104 struct evtchn *chn;
105 struct domain *d;
106 int port;
107 domid_t dom = alloc->dom;
108 long rc;
110 if ( (rc = acm_pre_eventchannel_unbound(dom, alloc->remote_dom)) != 0 )
111 return rc;
113 if ( dom == DOMID_SELF )
114 dom = current->domain->domain_id;
115 else if ( !IS_PRIV(current->domain) )
116 return -EPERM;
118 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
119 return -ESRCH;
121 spin_lock(&d->evtchn_lock);
123 if ( (port = get_free_port(d)) < 0 )
124 ERROR_EXIT(port);
125 chn = evtchn_from_port(d, port);
127 chn->state = ECS_UNBOUND;
128 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
129 chn->u.unbound.remote_domid = current->domain->domain_id;
131 alloc->port = port;
133 out:
134 spin_unlock(&d->evtchn_lock);
136 rcu_unlock_domain(d);
138 return rc;
139 }
142 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
143 {
144 struct evtchn *lchn, *rchn;
145 struct domain *ld = current->domain, *rd;
146 int lport, rport = bind->remote_port;
147 domid_t rdom = bind->remote_dom;
148 long rc;
150 if ( (rc = acm_pre_eventchannel_interdomain(rdom)) != 0 )
151 return rc;
153 if ( rdom == DOMID_SELF )
154 rdom = current->domain->domain_id;
156 if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
157 return -ESRCH;
159 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
160 if ( ld < rd )
161 {
162 spin_lock(&ld->evtchn_lock);
163 spin_lock(&rd->evtchn_lock);
164 }
165 else
166 {
167 if ( ld != rd )
168 spin_lock(&rd->evtchn_lock);
169 spin_lock(&ld->evtchn_lock);
170 }
172 if ( (lport = get_free_port(ld)) < 0 )
173 ERROR_EXIT(lport);
174 lchn = evtchn_from_port(ld, lport);
176 if ( !port_is_valid(rd, rport) )
177 ERROR_EXIT(-EINVAL);
178 rchn = evtchn_from_port(rd, rport);
179 if ( (rchn->state != ECS_UNBOUND) ||
180 (rchn->u.unbound.remote_domid != ld->domain_id) )
181 ERROR_EXIT(-EINVAL);
183 lchn->u.interdomain.remote_dom = rd;
184 lchn->u.interdomain.remote_port = (u16)rport;
185 lchn->state = ECS_INTERDOMAIN;
187 rchn->u.interdomain.remote_dom = ld;
188 rchn->u.interdomain.remote_port = (u16)lport;
189 rchn->state = ECS_INTERDOMAIN;
191 /*
192 * We may have lost notifications on the remote unbound port. Fix that up
193 * here by conservatively always setting a notification on the local port.
194 */
195 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
197 bind->local_port = lport;
199 out:
200 spin_unlock(&ld->evtchn_lock);
201 if ( ld != rd )
202 spin_unlock(&rd->evtchn_lock);
204 rcu_unlock_domain(rd);
206 return rc;
207 }
210 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
211 {
212 struct evtchn *chn;
213 struct vcpu *v;
214 struct domain *d = current->domain;
215 int port, virq = bind->virq, vcpu = bind->vcpu;
216 long rc = 0;
218 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
219 return -EINVAL;
221 if ( virq_is_global(virq) && (vcpu != 0) )
222 return -EINVAL;
224 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
225 ((v = d->vcpu[vcpu]) == NULL) )
226 return -ENOENT;
228 spin_lock(&d->evtchn_lock);
230 if ( v->virq_to_evtchn[virq] != 0 )
231 ERROR_EXIT(-EEXIST);
233 if ( (port = get_free_port(d)) < 0 )
234 ERROR_EXIT(port);
236 chn = evtchn_from_port(d, port);
237 chn->state = ECS_VIRQ;
238 chn->notify_vcpu_id = vcpu;
239 chn->u.virq = virq;
241 v->virq_to_evtchn[virq] = bind->port = port;
243 out:
244 spin_unlock(&d->evtchn_lock);
246 return rc;
247 }
250 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
251 {
252 struct evtchn *chn;
253 struct domain *d = current->domain;
254 int port, vcpu = bind->vcpu;
255 long rc = 0;
257 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
258 (d->vcpu[vcpu] == NULL) )
259 return -ENOENT;
261 spin_lock(&d->evtchn_lock);
263 if ( (port = get_free_port(d)) < 0 )
264 ERROR_EXIT(port);
266 chn = evtchn_from_port(d, port);
267 chn->state = ECS_IPI;
268 chn->notify_vcpu_id = vcpu;
270 bind->port = port;
272 out:
273 spin_unlock(&d->evtchn_lock);
275 return rc;
276 }
279 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
280 {
281 struct evtchn *chn;
282 struct domain *d = current->domain;
283 int port, pirq = bind->pirq;
284 long rc;
286 if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) )
287 return -EINVAL;
289 if ( !irq_access_permitted(d, pirq) )
290 return -EPERM;
292 spin_lock(&d->evtchn_lock);
294 if ( d->pirq_to_evtchn[pirq] != 0 )
295 ERROR_EXIT(-EEXIST);
297 if ( (port = get_free_port(d)) < 0 )
298 ERROR_EXIT(port);
300 chn = evtchn_from_port(d, port);
302 d->pirq_to_evtchn[pirq] = port;
303 rc = pirq_guest_bind(d->vcpu[0], pirq,
304 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
305 if ( rc != 0 )
306 {
307 d->pirq_to_evtchn[pirq] = 0;
308 goto out;
309 }
311 chn->state = ECS_PIRQ;
312 chn->u.pirq = pirq;
314 bind->port = port;
316 out:
317 spin_unlock(&d->evtchn_lock);
319 return rc;
320 }
323 static long __evtchn_close(struct domain *d1, int port1)
324 {
325 struct domain *d2 = NULL;
326 struct vcpu *v;
327 struct evtchn *chn1, *chn2;
328 int port2;
329 long rc = 0;
331 again:
332 spin_lock(&d1->evtchn_lock);
334 if ( !port_is_valid(d1, port1) )
335 {
336 rc = -EINVAL;
337 goto out;
338 }
340 chn1 = evtchn_from_port(d1, port1);
342 /* Guest cannot close a Xen-attached event channel. */
343 if ( unlikely(chn1->consumer_is_xen) )
344 {
345 rc = -EINVAL;
346 goto out;
347 }
349 switch ( chn1->state )
350 {
351 case ECS_FREE:
352 case ECS_RESERVED:
353 rc = -EINVAL;
354 goto out;
356 case ECS_UNBOUND:
357 break;
359 case ECS_PIRQ:
360 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
361 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
362 break;
364 case ECS_VIRQ:
365 for_each_vcpu ( d1, v )
366 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
367 v->virq_to_evtchn[chn1->u.virq] = 0;
368 break;
370 case ECS_IPI:
371 break;
373 case ECS_INTERDOMAIN:
374 if ( d2 == NULL )
375 {
376 d2 = chn1->u.interdomain.remote_dom;
378 /* If we unlock d1 then we could lose d2. Must get a reference. */
379 if ( unlikely(!get_domain(d2)) )
380 BUG();
382 if ( d1 < d2 )
383 {
384 spin_lock(&d2->evtchn_lock);
385 }
386 else if ( d1 != d2 )
387 {
388 spin_unlock(&d1->evtchn_lock);
389 spin_lock(&d2->evtchn_lock);
390 goto again;
391 }
392 }
393 else if ( d2 != chn1->u.interdomain.remote_dom )
394 {
395 /*
396 * We can only get here if the port was closed and re-bound after
397 * unlocking d1 but before locking d2 above. We could retry but
398 * it is easier to return the same error as if we had seen the
399 * port in ECS_CLOSED. It must have passed through that state for
400 * us to end up here, so it's a valid error to return.
401 */
402 rc = -EINVAL;
403 goto out;
404 }
406 port2 = chn1->u.interdomain.remote_port;
407 BUG_ON(!port_is_valid(d2, port2));
409 chn2 = evtchn_from_port(d2, port2);
410 BUG_ON(chn2->state != ECS_INTERDOMAIN);
411 BUG_ON(chn2->u.interdomain.remote_dom != d1);
413 chn2->state = ECS_UNBOUND;
414 chn2->u.unbound.remote_domid = d1->domain_id;
415 break;
417 default:
418 BUG();
419 }
421 /* Reset binding to vcpu0 when the channel is freed. */
422 chn1->state = ECS_FREE;
423 chn1->notify_vcpu_id = 0;
425 out:
426 if ( d2 != NULL )
427 {
428 if ( d1 != d2 )
429 spin_unlock(&d2->evtchn_lock);
430 put_domain(d2);
431 }
433 spin_unlock(&d1->evtchn_lock);
435 return rc;
436 }
439 static long evtchn_close(evtchn_close_t *close)
440 {
441 return __evtchn_close(current->domain, close->port);
442 }
445 long evtchn_send(unsigned int lport)
446 {
447 struct evtchn *lchn, *rchn;
448 struct domain *ld = current->domain, *rd;
449 struct vcpu *rvcpu;
450 int rport, ret = 0;
452 spin_lock(&ld->evtchn_lock);
454 if ( unlikely(!port_is_valid(ld, lport)) )
455 {
456 spin_unlock(&ld->evtchn_lock);
457 return -EINVAL;
458 }
460 lchn = evtchn_from_port(ld, lport);
462 /* Guest cannot send via a Xen-attached event channel. */
463 if ( unlikely(lchn->consumer_is_xen) )
464 {
465 spin_unlock(&ld->evtchn_lock);
466 return -EINVAL;
467 }
469 switch ( lchn->state )
470 {
471 case ECS_INTERDOMAIN:
472 rd = lchn->u.interdomain.remote_dom;
473 rport = lchn->u.interdomain.remote_port;
474 rchn = evtchn_from_port(rd, rport);
475 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
476 if ( rchn->consumer_is_xen )
477 {
478 /* Xen consumers need notification only if they are blocked. */
479 if ( test_and_clear_bit(_VPF_blocked_in_xen,
480 &rvcpu->pause_flags) )
481 vcpu_wake(rvcpu);
482 }
483 else
484 {
485 evtchn_set_pending(rvcpu, rport);
486 }
487 break;
488 case ECS_IPI:
489 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
490 break;
491 case ECS_UNBOUND:
492 /* silently drop the notification */
493 break;
494 default:
495 ret = -EINVAL;
496 }
498 spin_unlock(&ld->evtchn_lock);
500 return ret;
501 }
504 void evtchn_set_pending(struct vcpu *v, int port)
505 {
506 struct domain *d = v->domain;
507 shared_info_t *s = d->shared_info;
509 /*
510 * The following bit operations must happen in strict order.
511 * NB. On x86, the atomic bit operations also act as memory barriers.
512 * There is therefore sufficiently strict ordering for this architecture --
513 * others may require explicit memory barriers.
514 */
516 if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) )
517 return;
519 if ( !test_bit (port, __shared_info_addr(d, s, evtchn_mask)) &&
520 !test_and_set_bit(port / BITS_PER_GUEST_LONG(d),
521 vcpu_info_addr(v, evtchn_pending_sel)) )
522 {
523 vcpu_mark_events_pending(v);
524 }
526 /* Check if some VCPU might be polling for this event. */
527 if ( unlikely(d->is_polling) )
528 {
529 d->is_polling = 0;
530 smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */
531 for_each_vcpu ( d, v )
532 {
533 if ( !v->is_polling )
534 continue;
535 v->is_polling = 0;
536 vcpu_unblock(v);
537 }
538 }
539 }
542 void send_guest_vcpu_virq(struct vcpu *v, int virq)
543 {
544 int port;
546 ASSERT(!virq_is_global(virq));
548 port = v->virq_to_evtchn[virq];
549 if ( unlikely(port == 0) )
550 return;
552 evtchn_set_pending(v, port);
553 }
555 void send_guest_global_virq(struct domain *d, int virq)
556 {
557 int port;
558 struct vcpu *v;
559 struct evtchn *chn;
561 ASSERT(virq_is_global(virq));
563 if ( unlikely(d == NULL) )
564 return;
566 v = d->vcpu[0];
567 if ( unlikely(v == NULL) )
568 return;
570 port = v->virq_to_evtchn[virq];
571 if ( unlikely(port == 0) )
572 return;
574 chn = evtchn_from_port(d, port);
575 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
576 }
579 void send_guest_pirq(struct domain *d, int pirq)
580 {
581 int port = d->pirq_to_evtchn[pirq];
582 struct evtchn *chn;
584 ASSERT(port != 0);
586 chn = evtchn_from_port(d, port);
587 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
588 }
591 static long evtchn_status(evtchn_status_t *status)
592 {
593 struct domain *d;
594 domid_t dom = status->dom;
595 int port = status->port;
596 struct evtchn *chn;
597 long rc = 0;
599 if ( dom == DOMID_SELF )
600 dom = current->domain->domain_id;
601 else if ( !IS_PRIV(current->domain) )
602 return -EPERM;
604 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
605 return -ESRCH;
607 spin_lock(&d->evtchn_lock);
609 if ( !port_is_valid(d, port) )
610 {
611 rc = -EINVAL;
612 goto out;
613 }
615 chn = evtchn_from_port(d, port);
616 switch ( chn->state )
617 {
618 case ECS_FREE:
619 case ECS_RESERVED:
620 status->status = EVTCHNSTAT_closed;
621 break;
622 case ECS_UNBOUND:
623 status->status = EVTCHNSTAT_unbound;
624 status->u.unbound.dom = chn->u.unbound.remote_domid;
625 break;
626 case ECS_INTERDOMAIN:
627 status->status = EVTCHNSTAT_interdomain;
628 status->u.interdomain.dom =
629 chn->u.interdomain.remote_dom->domain_id;
630 status->u.interdomain.port = chn->u.interdomain.remote_port;
631 break;
632 case ECS_PIRQ:
633 status->status = EVTCHNSTAT_pirq;
634 status->u.pirq = chn->u.pirq;
635 break;
636 case ECS_VIRQ:
637 status->status = EVTCHNSTAT_virq;
638 status->u.virq = chn->u.virq;
639 break;
640 case ECS_IPI:
641 status->status = EVTCHNSTAT_ipi;
642 break;
643 default:
644 BUG();
645 }
647 status->vcpu = chn->notify_vcpu_id;
649 out:
650 spin_unlock(&d->evtchn_lock);
651 rcu_unlock_domain(d);
652 return rc;
653 }
656 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
657 {
658 struct domain *d = current->domain;
659 struct evtchn *chn;
660 long rc = 0;
662 if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
663 return -ENOENT;
665 spin_lock(&d->evtchn_lock);
667 if ( !port_is_valid(d, port) )
668 {
669 rc = -EINVAL;
670 goto out;
671 }
673 chn = evtchn_from_port(d, port);
675 /* Guest cannot re-bind a Xen-attached event channel. */
676 if ( unlikely(chn->consumer_is_xen) )
677 {
678 rc = -EINVAL;
679 goto out;
680 }
682 switch ( chn->state )
683 {
684 case ECS_VIRQ:
685 if ( virq_is_global(chn->u.virq) )
686 chn->notify_vcpu_id = vcpu_id;
687 else
688 rc = -EINVAL;
689 break;
690 case ECS_UNBOUND:
691 case ECS_INTERDOMAIN:
692 case ECS_PIRQ:
693 chn->notify_vcpu_id = vcpu_id;
694 break;
695 default:
696 rc = -EINVAL;
697 break;
698 }
700 out:
701 spin_unlock(&d->evtchn_lock);
702 return rc;
703 }
706 static long evtchn_unmask(evtchn_unmask_t *unmask)
707 {
708 struct domain *d = current->domain;
709 shared_info_t *s = d->shared_info;
710 int port = unmask->port;
711 struct vcpu *v;
713 spin_lock(&d->evtchn_lock);
715 if ( unlikely(!port_is_valid(d, port)) )
716 {
717 spin_unlock(&d->evtchn_lock);
718 return -EINVAL;
719 }
721 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
723 /*
724 * These operations must happen in strict order. Based on
725 * include/xen/event.h:evtchn_set_pending().
726 */
727 if ( test_and_clear_bit(port, __shared_info_addr(d, s, evtchn_mask)) &&
728 test_bit (port, __shared_info_addr(d, s, evtchn_pending)) &&
729 !test_and_set_bit (port / BITS_PER_GUEST_LONG(d),
730 vcpu_info_addr(v, evtchn_pending_sel)) )
731 {
732 vcpu_mark_events_pending(v);
733 }
735 spin_unlock(&d->evtchn_lock);
737 return 0;
738 }
741 static long evtchn_reset(evtchn_reset_t *r)
742 {
743 domid_t dom = r->dom;
744 struct domain *d;
745 int i;
747 if ( dom == DOMID_SELF )
748 dom = current->domain->domain_id;
749 else if ( !IS_PRIV(current->domain) )
750 return -EPERM;
752 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
753 return -ESRCH;
755 for ( i = 0; port_is_valid(d, i); i++ )
756 (void)__evtchn_close(d, i);
758 rcu_unlock_domain(d);
760 return 0;
761 }
764 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
765 {
766 long rc;
768 switch ( cmd )
769 {
770 case EVTCHNOP_alloc_unbound: {
771 struct evtchn_alloc_unbound alloc_unbound;
772 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
773 return -EFAULT;
774 rc = evtchn_alloc_unbound(&alloc_unbound);
775 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
776 rc = -EFAULT; /* Cleaning up here would be a mess! */
777 break;
778 }
780 case EVTCHNOP_bind_interdomain: {
781 struct evtchn_bind_interdomain bind_interdomain;
782 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
783 return -EFAULT;
784 rc = evtchn_bind_interdomain(&bind_interdomain);
785 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
786 rc = -EFAULT; /* Cleaning up here would be a mess! */
787 break;
788 }
790 case EVTCHNOP_bind_virq: {
791 struct evtchn_bind_virq bind_virq;
792 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
793 return -EFAULT;
794 rc = evtchn_bind_virq(&bind_virq);
795 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
796 rc = -EFAULT; /* Cleaning up here would be a mess! */
797 break;
798 }
800 case EVTCHNOP_bind_ipi: {
801 struct evtchn_bind_ipi bind_ipi;
802 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
803 return -EFAULT;
804 rc = evtchn_bind_ipi(&bind_ipi);
805 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
806 rc = -EFAULT; /* Cleaning up here would be a mess! */
807 break;
808 }
810 case EVTCHNOP_bind_pirq: {
811 struct evtchn_bind_pirq bind_pirq;
812 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
813 return -EFAULT;
814 rc = evtchn_bind_pirq(&bind_pirq);
815 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
816 rc = -EFAULT; /* Cleaning up here would be a mess! */
817 break;
818 }
820 case EVTCHNOP_close: {
821 struct evtchn_close close;
822 if ( copy_from_guest(&close, arg, 1) != 0 )
823 return -EFAULT;
824 rc = evtchn_close(&close);
825 break;
826 }
828 case EVTCHNOP_send: {
829 struct evtchn_send send;
830 if ( copy_from_guest(&send, arg, 1) != 0 )
831 return -EFAULT;
832 rc = evtchn_send(send.port);
833 break;
834 }
836 case EVTCHNOP_status: {
837 struct evtchn_status status;
838 if ( copy_from_guest(&status, arg, 1) != 0 )
839 return -EFAULT;
840 rc = evtchn_status(&status);
841 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
842 rc = -EFAULT;
843 break;
844 }
846 case EVTCHNOP_bind_vcpu: {
847 struct evtchn_bind_vcpu bind_vcpu;
848 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
849 return -EFAULT;
850 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
851 break;
852 }
854 case EVTCHNOP_unmask: {
855 struct evtchn_unmask unmask;
856 if ( copy_from_guest(&unmask, arg, 1) != 0 )
857 return -EFAULT;
858 rc = evtchn_unmask(&unmask);
859 break;
860 }
862 case EVTCHNOP_reset: {
863 struct evtchn_reset reset;
864 if ( copy_from_guest(&reset, arg, 1) != 0 )
865 return -EFAULT;
866 rc = evtchn_reset(&reset);
867 break;
868 }
870 default:
871 rc = -ENOSYS;
872 break;
873 }
875 return rc;
876 }
879 int alloc_unbound_xen_event_channel(
880 struct vcpu *local_vcpu, domid_t remote_domid)
881 {
882 struct evtchn *chn;
883 struct domain *d = local_vcpu->domain;
884 int port;
886 spin_lock(&d->evtchn_lock);
888 if ( (port = get_free_port(d)) < 0 )
889 goto out;
890 chn = evtchn_from_port(d, port);
892 chn->state = ECS_UNBOUND;
893 chn->consumer_is_xen = 1;
894 chn->notify_vcpu_id = local_vcpu->vcpu_id;
895 chn->u.unbound.remote_domid = remote_domid;
897 out:
898 spin_unlock(&d->evtchn_lock);
900 return port;
901 }
904 void free_xen_event_channel(
905 struct vcpu *local_vcpu, int port)
906 {
907 struct evtchn *chn;
908 struct domain *d = local_vcpu->domain;
910 spin_lock(&d->evtchn_lock);
911 chn = evtchn_from_port(d, port);
912 BUG_ON(!chn->consumer_is_xen);
913 chn->consumer_is_xen = 0;
914 spin_unlock(&d->evtchn_lock);
916 (void)__evtchn_close(d, port);
917 }
920 void notify_via_xen_event_channel(int lport)
921 {
922 struct evtchn *lchn, *rchn;
923 struct domain *ld = current->domain, *rd;
924 int rport;
926 spin_lock(&ld->evtchn_lock);
928 ASSERT(port_is_valid(ld, lport));
929 lchn = evtchn_from_port(ld, lport);
930 ASSERT(lchn->consumer_is_xen);
932 if ( likely(lchn->state == ECS_INTERDOMAIN) )
933 {
934 rd = lchn->u.interdomain.remote_dom;
935 rport = lchn->u.interdomain.remote_port;
936 rchn = evtchn_from_port(rd, rport);
937 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
938 }
940 spin_unlock(&ld->evtchn_lock);
941 }
944 int evtchn_init(struct domain *d)
945 {
946 spin_lock_init(&d->evtchn_lock);
947 if ( get_free_port(d) != 0 )
948 return -EINVAL;
949 evtchn_from_port(d, 0)->state = ECS_RESERVED;
950 return 0;
951 }
954 void evtchn_destroy(struct domain *d)
955 {
956 int i;
958 /* After this barrier no new event-channel allocations can occur. */
959 BUG_ON(!d->is_dying);
960 spin_barrier(&d->evtchn_lock);
962 /* Close all existing event channels. */
963 for ( i = 0; port_is_valid(d, i); i++ )
964 {
965 evtchn_from_port(d, i)->consumer_is_xen = 0;
966 (void)__evtchn_close(d, i);
967 }
969 /* Free all event-channel buckets. */
970 spin_lock(&d->evtchn_lock);
971 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
972 xfree(d->evtchn[i]);
973 spin_unlock(&d->evtchn_lock);
974 }
976 /*
977 * Local variables:
978 * mode: C
979 * c-set-style: "BSD"
980 * c-basic-offset: 4
981 * tab-width: 4
982 * indent-tabs-mode: nil
983 * End:
984 */