direct-io.hg

view xen/common/event_channel.c @ 10949:ffa5b2975dff

[XEN] Add Xen-attached event channels, which will be used
by HVM for the ioreq_packet port.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Aug 04 20:34:44 2006 +0100 (2006-08-04)
parents 462d6e4cb29a
children 64100a77fd17
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/guest_access.h>
27 #include <asm/current.h>
29 #include <public/xen.h>
30 #include <public/event_channel.h>
31 #include <acm/acm_hooks.h>
33 #define bucket_from_port(d,p) \
34 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
35 #define port_is_valid(d,p) \
36 (((p) >= 0) && ((p) < MAX_EVTCHNS) && \
37 (bucket_from_port(d,p) != NULL))
38 #define evtchn_from_port(d,p) \
39 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
41 #define ERROR_EXIT(_errno) \
42 do { \
43 DPRINTK("EVTCHNOP failure: domain %d, error %d, line %d\n", \
44 current->domain->domain_id, (_errno), __LINE__); \
45 rc = (_errno); \
46 goto out; \
47 } while ( 0 )
50 static int virq_is_global(int virq)
51 {
52 int rc;
54 ASSERT((virq >= 0) && (virq < NR_VIRQS));
56 switch ( virq )
57 {
58 case VIRQ_TIMER:
59 case VIRQ_DEBUG:
60 case VIRQ_XENOPROF:
61 rc = 0;
62 break;
63 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
64 rc = arch_virq_is_global(virq);
65 break;
66 default:
67 rc = 1;
68 break;
69 }
71 return rc;
72 }
75 static int get_free_port(struct domain *d)
76 {
77 struct evtchn *chn;
78 int port;
80 for ( port = 0; port_is_valid(d, port); port++ )
81 if ( evtchn_from_port(d, port)->state == ECS_FREE )
82 return port;
84 if ( port == MAX_EVTCHNS )
85 return -ENOSPC;
87 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
88 if ( unlikely(chn == NULL) )
89 return -ENOMEM;
90 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
91 bucket_from_port(d, port) = chn;
93 return port;
94 }
97 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
98 {
99 struct evtchn *chn;
100 struct domain *d;
101 int port;
102 domid_t dom = alloc->dom;
103 long rc;
105 if ( (rc = acm_pre_eventchannel_unbound(dom, alloc->remote_dom)) != 0 )
106 return rc;
108 if ( dom == DOMID_SELF )
109 dom = current->domain->domain_id;
110 else if ( !IS_PRIV(current->domain) )
111 return -EPERM;
113 if ( (d = find_domain_by_id(dom)) == NULL )
114 return -ESRCH;
116 spin_lock(&d->evtchn_lock);
118 if ( (port = get_free_port(d)) < 0 )
119 ERROR_EXIT(port);
120 chn = evtchn_from_port(d, port);
122 chn->state = ECS_UNBOUND;
123 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
124 chn->u.unbound.remote_domid = current->domain->domain_id;
126 alloc->port = port;
128 out:
129 spin_unlock(&d->evtchn_lock);
131 put_domain(d);
133 return rc;
134 }
137 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
138 {
139 struct evtchn *lchn, *rchn;
140 struct domain *ld = current->domain, *rd;
141 int lport, rport = bind->remote_port;
142 domid_t rdom = bind->remote_dom;
143 long rc;
145 if ( (rc = acm_pre_eventchannel_interdomain(rdom)) != 0 )
146 return rc;
148 if ( rdom == DOMID_SELF )
149 rdom = current->domain->domain_id;
151 if ( (rd = find_domain_by_id(rdom)) == NULL )
152 return -ESRCH;
154 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
155 if ( ld < rd )
156 {
157 spin_lock(&ld->evtchn_lock);
158 spin_lock(&rd->evtchn_lock);
159 }
160 else
161 {
162 if ( ld != rd )
163 spin_lock(&rd->evtchn_lock);
164 spin_lock(&ld->evtchn_lock);
165 }
167 if ( (lport = get_free_port(ld)) < 0 )
168 ERROR_EXIT(lport);
169 lchn = evtchn_from_port(ld, lport);
171 if ( !port_is_valid(rd, rport) )
172 ERROR_EXIT(-EINVAL);
173 rchn = evtchn_from_port(rd, rport);
174 if ( (rchn->state != ECS_UNBOUND) ||
175 (rchn->u.unbound.remote_domid != ld->domain_id) )
176 ERROR_EXIT(-EINVAL);
178 lchn->u.interdomain.remote_dom = rd;
179 lchn->u.interdomain.remote_port = (u16)rport;
180 lchn->state = ECS_INTERDOMAIN;
182 rchn->u.interdomain.remote_dom = ld;
183 rchn->u.interdomain.remote_port = (u16)lport;
184 rchn->state = ECS_INTERDOMAIN;
186 /*
187 * We may have lost notifications on the remote unbound port. Fix that up
188 * here by conservatively always setting a notification on the local port.
189 */
190 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
192 bind->local_port = lport;
194 out:
195 spin_unlock(&ld->evtchn_lock);
196 if ( ld != rd )
197 spin_unlock(&rd->evtchn_lock);
199 put_domain(rd);
201 return rc;
202 }
205 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
206 {
207 struct evtchn *chn;
208 struct vcpu *v;
209 struct domain *d = current->domain;
210 int port, virq = bind->virq, vcpu = bind->vcpu;
211 long rc = 0;
213 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
214 return -EINVAL;
216 if ( virq_is_global(virq) && (vcpu != 0) )
217 return -EINVAL;
219 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
220 ((v = d->vcpu[vcpu]) == NULL) )
221 return -ENOENT;
223 spin_lock(&d->evtchn_lock);
225 if ( v->virq_to_evtchn[virq] != 0 )
226 ERROR_EXIT(-EEXIST);
228 if ( (port = get_free_port(d)) < 0 )
229 ERROR_EXIT(port);
231 chn = evtchn_from_port(d, port);
232 chn->state = ECS_VIRQ;
233 chn->notify_vcpu_id = vcpu;
234 chn->u.virq = virq;
236 v->virq_to_evtchn[virq] = bind->port = port;
238 out:
239 spin_unlock(&d->evtchn_lock);
241 return rc;
242 }
245 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
246 {
247 struct evtchn *chn;
248 struct domain *d = current->domain;
249 int port, vcpu = bind->vcpu;
250 long rc = 0;
252 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
253 (d->vcpu[vcpu] == NULL) )
254 return -ENOENT;
256 spin_lock(&d->evtchn_lock);
258 if ( (port = get_free_port(d)) < 0 )
259 ERROR_EXIT(port);
261 chn = evtchn_from_port(d, port);
262 chn->state = ECS_IPI;
263 chn->notify_vcpu_id = vcpu;
265 bind->port = port;
267 out:
268 spin_unlock(&d->evtchn_lock);
270 return rc;
271 }
274 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
275 {
276 struct evtchn *chn;
277 struct domain *d = current->domain;
278 int port, pirq = bind->pirq;
279 long rc;
281 if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) )
282 return -EINVAL;
284 if ( !irq_access_permitted(d, pirq) )
285 return -EPERM;
287 spin_lock(&d->evtchn_lock);
289 if ( d->pirq_to_evtchn[pirq] != 0 )
290 ERROR_EXIT(-EEXIST);
292 if ( (port = get_free_port(d)) < 0 )
293 ERROR_EXIT(port);
295 chn = evtchn_from_port(d, port);
297 d->pirq_to_evtchn[pirq] = port;
298 rc = pirq_guest_bind(d->vcpu[0], pirq,
299 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
300 if ( rc != 0 )
301 {
302 d->pirq_to_evtchn[pirq] = 0;
303 goto out;
304 }
306 chn->state = ECS_PIRQ;
307 chn->u.pirq = pirq;
309 bind->port = port;
311 out:
312 spin_unlock(&d->evtchn_lock);
314 return rc;
315 }
318 static long __evtchn_close(struct domain *d1, int port1)
319 {
320 struct domain *d2 = NULL;
321 struct vcpu *v;
322 struct evtchn *chn1, *chn2;
323 int port2;
324 long rc = 0;
326 again:
327 spin_lock(&d1->evtchn_lock);
329 if ( !port_is_valid(d1, port1) )
330 {
331 rc = -EINVAL;
332 goto out;
333 }
335 chn1 = evtchn_from_port(d1, port1);
337 /* Guest cannot close a Xen-attached event channel. */
338 if ( unlikely(chn1->consumer_is_xen) )
339 {
340 rc = -EINVAL;
341 goto out;
342 }
344 switch ( chn1->state )
345 {
346 case ECS_FREE:
347 case ECS_RESERVED:
348 rc = -EINVAL;
349 goto out;
351 case ECS_UNBOUND:
352 break;
354 case ECS_PIRQ:
355 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
356 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
357 break;
359 case ECS_VIRQ:
360 for_each_vcpu ( d1, v )
361 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
362 v->virq_to_evtchn[chn1->u.virq] = 0;
363 break;
365 case ECS_IPI:
366 break;
368 case ECS_INTERDOMAIN:
369 if ( d2 == NULL )
370 {
371 d2 = chn1->u.interdomain.remote_dom;
373 /* If we unlock d1 then we could lose d2. Must get a reference. */
374 if ( unlikely(!get_domain(d2)) )
375 {
376 /*
377 * Failed to obtain a reference. No matter: d2 must be dying
378 * and so will close this event channel for us.
379 */
380 d2 = NULL;
381 goto out;
382 }
384 if ( d1 < d2 )
385 {
386 spin_lock(&d2->evtchn_lock);
387 }
388 else if ( d1 != d2 )
389 {
390 spin_unlock(&d1->evtchn_lock);
391 spin_lock(&d2->evtchn_lock);
392 goto again;
393 }
394 }
395 else if ( d2 != chn1->u.interdomain.remote_dom )
396 {
397 /*
398 * We can only get here if the port was closed and re-bound after
399 * unlocking d1 but before locking d2 above. We could retry but
400 * it is easier to return the same error as if we had seen the
401 * port in ECS_CLOSED. It must have passed through that state for
402 * us to end up here, so it's a valid error to return.
403 */
404 BUG_ON(d1 != current->domain);
405 rc = -EINVAL;
406 goto out;
407 }
409 port2 = chn1->u.interdomain.remote_port;
410 BUG_ON(!port_is_valid(d2, port2));
412 chn2 = evtchn_from_port(d2, port2);
413 BUG_ON(chn2->state != ECS_INTERDOMAIN);
414 BUG_ON(chn2->u.interdomain.remote_dom != d1);
416 chn2->state = ECS_UNBOUND;
417 chn2->u.unbound.remote_domid = d1->domain_id;
418 break;
420 default:
421 BUG();
422 }
424 /* Reset binding to vcpu0 when the channel is freed. */
425 chn1->state = ECS_FREE;
426 chn1->notify_vcpu_id = 0;
428 out:
429 if ( d2 != NULL )
430 {
431 if ( d1 != d2 )
432 spin_unlock(&d2->evtchn_lock);
433 put_domain(d2);
434 }
436 spin_unlock(&d1->evtchn_lock);
438 return rc;
439 }
442 static long evtchn_close(evtchn_close_t *close)
443 {
444 return __evtchn_close(current->domain, close->port);
445 }
448 long evtchn_send(unsigned int lport)
449 {
450 struct evtchn *lchn, *rchn;
451 struct domain *ld = current->domain, *rd;
452 struct vcpu *rvcpu;
453 int rport, ret = 0;
455 spin_lock(&ld->evtchn_lock);
457 if ( unlikely(!port_is_valid(ld, lport)) )
458 {
459 spin_unlock(&ld->evtchn_lock);
460 return -EINVAL;
461 }
463 lchn = evtchn_from_port(ld, lport);
465 /* Guest cannot send via a Xen-attached event channel. */
466 if ( unlikely(lchn->consumer_is_xen) )
467 {
468 spin_unlock(&ld->evtchn_lock);
469 return -EINVAL;
470 }
472 switch ( lchn->state )
473 {
474 case ECS_INTERDOMAIN:
475 rd = lchn->u.interdomain.remote_dom;
476 rport = lchn->u.interdomain.remote_port;
477 rchn = evtchn_from_port(rd, rport);
478 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
479 if ( rchn->consumer_is_xen )
480 {
481 /* Xen consumers need notification only if they are blocked. */
482 if ( test_and_clear_bit(_VCPUF_blocked_in_xen,
483 &rvcpu->vcpu_flags) )
484 vcpu_wake(rvcpu);
485 }
486 else
487 {
488 evtchn_set_pending(rvcpu, rport);
489 }
490 break;
491 case ECS_IPI:
492 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
493 break;
494 case ECS_UNBOUND:
495 /* silently drop the notification */
496 break;
497 default:
498 ret = -EINVAL;
499 }
501 spin_unlock(&ld->evtchn_lock);
503 return ret;
504 }
507 void evtchn_set_pending(struct vcpu *v, int port)
508 {
509 struct domain *d = v->domain;
510 shared_info_t *s = d->shared_info;
512 /*
513 * The following bit operations must happen in strict order.
514 * NB. On x86, the atomic bit operations also act as memory barriers.
515 * There is therefore sufficiently strict ordering for this architecture --
516 * others may require explicit memory barriers.
517 */
519 if ( test_and_set_bit(port, s->evtchn_pending) )
520 return;
522 if ( !test_bit (port, s->evtchn_mask) &&
523 !test_and_set_bit(port / BITS_PER_LONG,
524 &v->vcpu_info->evtchn_pending_sel) )
525 {
526 vcpu_mark_events_pending(v);
527 }
529 /* Check if some VCPU might be polling for this event. */
530 if ( unlikely(test_bit(_DOMF_polling, &d->domain_flags)) &&
531 likely(test_and_clear_bit(_DOMF_polling, &d->domain_flags)) )
532 {
533 for_each_vcpu ( d, v )
534 if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) )
535 vcpu_unblock(v);
536 }
537 }
540 void send_guest_vcpu_virq(struct vcpu *v, int virq)
541 {
542 int port;
544 ASSERT(!virq_is_global(virq));
546 port = v->virq_to_evtchn[virq];
547 if ( unlikely(port == 0) )
548 return;
550 evtchn_set_pending(v, port);
551 }
553 void send_guest_global_virq(struct domain *d, int virq)
554 {
555 int port;
556 struct vcpu *v;
557 struct evtchn *chn;
559 ASSERT(virq_is_global(virq));
561 v = d->vcpu[0];
562 if ( unlikely(v == NULL) )
563 return;
565 port = v->virq_to_evtchn[virq];
566 if ( unlikely(port == 0) )
567 return;
569 chn = evtchn_from_port(d, port);
570 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
571 }
574 void send_guest_pirq(struct domain *d, int pirq)
575 {
576 int port = d->pirq_to_evtchn[pirq];
577 struct evtchn *chn;
579 ASSERT(port != 0);
581 chn = evtchn_from_port(d, port);
582 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
583 }
586 static long evtchn_status(evtchn_status_t *status)
587 {
588 struct domain *d;
589 domid_t dom = status->dom;
590 int port = status->port;
591 struct evtchn *chn;
592 long rc = 0;
594 if ( dom == DOMID_SELF )
595 dom = current->domain->domain_id;
596 else if ( !IS_PRIV(current->domain) )
597 return -EPERM;
599 if ( (d = find_domain_by_id(dom)) == NULL )
600 return -ESRCH;
602 spin_lock(&d->evtchn_lock);
604 if ( !port_is_valid(d, port) )
605 {
606 rc = -EINVAL;
607 goto out;
608 }
610 chn = evtchn_from_port(d, port);
611 switch ( chn->state )
612 {
613 case ECS_FREE:
614 case ECS_RESERVED:
615 status->status = EVTCHNSTAT_closed;
616 break;
617 case ECS_UNBOUND:
618 status->status = EVTCHNSTAT_unbound;
619 status->u.unbound.dom = chn->u.unbound.remote_domid;
620 break;
621 case ECS_INTERDOMAIN:
622 status->status = EVTCHNSTAT_interdomain;
623 status->u.interdomain.dom =
624 chn->u.interdomain.remote_dom->domain_id;
625 status->u.interdomain.port = chn->u.interdomain.remote_port;
626 break;
627 case ECS_PIRQ:
628 status->status = EVTCHNSTAT_pirq;
629 status->u.pirq = chn->u.pirq;
630 break;
631 case ECS_VIRQ:
632 status->status = EVTCHNSTAT_virq;
633 status->u.virq = chn->u.virq;
634 break;
635 case ECS_IPI:
636 status->status = EVTCHNSTAT_ipi;
637 break;
638 default:
639 BUG();
640 }
642 status->vcpu = chn->notify_vcpu_id;
644 out:
645 spin_unlock(&d->evtchn_lock);
646 put_domain(d);
647 return rc;
648 }
651 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
652 {
653 struct domain *d = current->domain;
654 struct evtchn *chn;
655 long rc = 0;
657 if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
658 return -ENOENT;
660 spin_lock(&d->evtchn_lock);
662 if ( !port_is_valid(d, port) )
663 {
664 rc = -EINVAL;
665 goto out;
666 }
668 chn = evtchn_from_port(d, port);
670 /* Guest cannot re-bind a Xen-attached event channel. */
671 if ( unlikely(chn->consumer_is_xen) )
672 {
673 rc = -EINVAL;
674 goto out;
675 }
677 switch ( chn->state )
678 {
679 case ECS_VIRQ:
680 if ( virq_is_global(chn->u.virq) )
681 chn->notify_vcpu_id = vcpu_id;
682 else
683 rc = -EINVAL;
684 break;
685 case ECS_UNBOUND:
686 case ECS_INTERDOMAIN:
687 case ECS_PIRQ:
688 chn->notify_vcpu_id = vcpu_id;
689 break;
690 default:
691 rc = -EINVAL;
692 break;
693 }
695 out:
696 spin_unlock(&d->evtchn_lock);
697 return rc;
698 }
701 static long evtchn_unmask(evtchn_unmask_t *unmask)
702 {
703 struct domain *d = current->domain;
704 shared_info_t *s = d->shared_info;
705 int port = unmask->port;
706 struct vcpu *v;
708 spin_lock(&d->evtchn_lock);
710 if ( unlikely(!port_is_valid(d, port)) )
711 {
712 spin_unlock(&d->evtchn_lock);
713 return -EINVAL;
714 }
716 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
718 /*
719 * These operations must happen in strict order. Based on
720 * include/xen/event.h:evtchn_set_pending().
721 */
722 if ( test_and_clear_bit(port, s->evtchn_mask) &&
723 test_bit (port, s->evtchn_pending) &&
724 !test_and_set_bit (port / BITS_PER_LONG,
725 &v->vcpu_info->evtchn_pending_sel) )
726 {
727 vcpu_mark_events_pending(v);
728 }
730 spin_unlock(&d->evtchn_lock);
732 return 0;
733 }
736 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
737 {
738 long rc;
740 switch ( cmd )
741 {
742 case EVTCHNOP_alloc_unbound: {
743 struct evtchn_alloc_unbound alloc_unbound;
744 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
745 return -EFAULT;
746 rc = evtchn_alloc_unbound(&alloc_unbound);
747 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
748 rc = -EFAULT; /* Cleaning up here would be a mess! */
749 break;
750 }
752 case EVTCHNOP_bind_interdomain: {
753 struct evtchn_bind_interdomain bind_interdomain;
754 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
755 return -EFAULT;
756 rc = evtchn_bind_interdomain(&bind_interdomain);
757 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
758 rc = -EFAULT; /* Cleaning up here would be a mess! */
759 break;
760 }
762 case EVTCHNOP_bind_virq: {
763 struct evtchn_bind_virq bind_virq;
764 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
765 return -EFAULT;
766 rc = evtchn_bind_virq(&bind_virq);
767 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
768 rc = -EFAULT; /* Cleaning up here would be a mess! */
769 break;
770 }
772 case EVTCHNOP_bind_ipi: {
773 struct evtchn_bind_ipi bind_ipi;
774 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
775 return -EFAULT;
776 rc = evtchn_bind_ipi(&bind_ipi);
777 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
778 rc = -EFAULT; /* Cleaning up here would be a mess! */
779 break;
780 }
782 case EVTCHNOP_bind_pirq: {
783 struct evtchn_bind_pirq bind_pirq;
784 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
785 return -EFAULT;
786 rc = evtchn_bind_pirq(&bind_pirq);
787 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
788 rc = -EFAULT; /* Cleaning up here would be a mess! */
789 break;
790 }
792 case EVTCHNOP_close: {
793 struct evtchn_close close;
794 if ( copy_from_guest(&close, arg, 1) != 0 )
795 return -EFAULT;
796 rc = evtchn_close(&close);
797 break;
798 }
800 case EVTCHNOP_send: {
801 struct evtchn_send send;
802 if ( copy_from_guest(&send, arg, 1) != 0 )
803 return -EFAULT;
804 rc = evtchn_send(send.port);
805 break;
806 }
808 case EVTCHNOP_status: {
809 struct evtchn_status status;
810 if ( copy_from_guest(&status, arg, 1) != 0 )
811 return -EFAULT;
812 rc = evtchn_status(&status);
813 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
814 rc = -EFAULT;
815 break;
816 }
818 case EVTCHNOP_bind_vcpu: {
819 struct evtchn_bind_vcpu bind_vcpu;
820 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
821 return -EFAULT;
822 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
823 break;
824 }
826 case EVTCHNOP_unmask: {
827 struct evtchn_unmask unmask;
828 if ( copy_from_guest(&unmask, arg, 1) != 0 )
829 return -EFAULT;
830 rc = evtchn_unmask(&unmask);
831 break;
832 }
834 default:
835 rc = -ENOSYS;
836 break;
837 }
839 return rc;
840 }
843 int alloc_unbound_xen_event_channel(
844 struct vcpu *local_vcpu, domid_t remote_domid)
845 {
846 struct evtchn *chn;
847 struct domain *d = local_vcpu->domain;
848 int port;
850 spin_lock(&d->evtchn_lock);
852 if ( (port = get_free_port(d)) < 0 )
853 goto out;
854 chn = evtchn_from_port(d, port);
856 chn->state = ECS_UNBOUND;
857 chn->consumer_is_xen = 1;
858 chn->notify_vcpu_id = local_vcpu->vcpu_id;
859 chn->u.unbound.remote_domid = remote_domid;
861 out:
862 spin_unlock(&d->evtchn_lock);
864 return port;
865 }
868 void free_xen_event_channel(
869 struct vcpu *local_vcpu, int port)
870 {
871 struct evtchn *chn;
872 struct domain *d = local_vcpu->domain;
874 spin_lock(&d->evtchn_lock);
875 chn = evtchn_from_port(d, port);
876 BUG_ON(!chn->consumer_is_xen);
877 chn->consumer_is_xen = 0;
878 spin_unlock(&d->evtchn_lock);
880 (void)__evtchn_close(d, port);
881 }
884 void notify_via_xen_event_channel(int lport)
885 {
886 struct evtchn *lchn, *rchn;
887 struct domain *ld = current->domain, *rd;
888 int rport;
890 spin_lock(&ld->evtchn_lock);
892 ASSERT(port_is_valid(ld, lport));
893 lchn = evtchn_from_port(ld, lport);
894 ASSERT(lchn->consumer_is_xen);
896 if ( likely(lchn->state == ECS_INTERDOMAIN) )
897 {
898 rd = lchn->u.interdomain.remote_dom;
899 rport = lchn->u.interdomain.remote_port;
900 rchn = evtchn_from_port(rd, rport);
901 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
902 }
904 spin_unlock(&ld->evtchn_lock);
905 }
908 int evtchn_init(struct domain *d)
909 {
910 spin_lock_init(&d->evtchn_lock);
911 if ( get_free_port(d) != 0 )
912 return -EINVAL;
913 evtchn_from_port(d, 0)->state = ECS_RESERVED;
914 return 0;
915 }
918 void evtchn_destroy(struct domain *d)
919 {
920 int i;
922 for ( i = 0; port_is_valid(d, i); i++ )
923 {
924 evtchn_from_port(d, i)->consumer_is_xen = 0;
925 (void)__evtchn_close(d, i);
926 }
928 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
929 xfree(d->evtchn[i]);
930 }
932 /*
933 * Local variables:
934 * mode: C
935 * c-set-style: "BSD"
936 * c-basic-offset: 4
937 * tab-width: 4
938 * indent-tabs-mode: nil
939 * End:
940 */