direct-io.hg

view xen/common/event_channel.c @ 14287:38513d22d234

xen: Provide notification of console updates via VIRQ.

The readconsolering capabilities provide the opportunity to
provide console output to other clients (remote systems,
logging systems, etc). This patchs adds the ability to generate
a notification of a change in the console buffer.

Signed-off-by: Ben Thomas <ben@virtualiron.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Mar 08 15:21:10 2007 +0000 (2007-03-08)
parents 9d36026b1b43
children 4b13fc910acf
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/compat.h>
27 #include <xen/guest_access.h>
28 #include <asm/current.h>
30 #include <public/xen.h>
31 #include <public/event_channel.h>
32 #include <acm/acm_hooks.h>
34 #define bucket_from_port(d,p) \
35 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
36 #define port_is_valid(d,p) \
37 (((p) >= 0) && ((p) < MAX_EVTCHNS(d)) && \
38 (bucket_from_port(d,p) != NULL))
39 #define evtchn_from_port(d,p) \
40 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
42 #define ERROR_EXIT(_errno) \
43 do { \
44 gdprintk(XENLOG_WARNING, \
45 "EVTCHNOP failure: domain %d, error %d, line %d\n", \
46 current->domain->domain_id, (_errno), __LINE__); \
47 rc = (_errno); \
48 goto out; \
49 } while ( 0 )
52 static int virq_is_global(int virq)
53 {
54 int rc;
56 ASSERT((virq >= 0) && (virq < NR_VIRQS));
58 switch ( virq )
59 {
60 case VIRQ_TIMER:
61 case VIRQ_DEBUG:
62 case VIRQ_XENOPROF:
63 rc = 0;
64 break;
65 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
66 rc = arch_virq_is_global(virq);
67 break;
68 default:
69 rc = 1;
70 break;
71 }
73 return rc;
74 }
77 static int get_free_port(struct domain *d)
78 {
79 struct evtchn *chn;
80 int port;
82 for ( port = 0; port_is_valid(d, port); port++ )
83 if ( evtchn_from_port(d, port)->state == ECS_FREE )
84 return port;
86 if ( port == MAX_EVTCHNS(d) )
87 return -ENOSPC;
89 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
90 if ( unlikely(chn == NULL) )
91 return -ENOMEM;
92 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
93 bucket_from_port(d, port) = chn;
95 return port;
96 }
99 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
100 {
101 struct evtchn *chn;
102 struct domain *d;
103 int port;
104 domid_t dom = alloc->dom;
105 long rc;
107 if ( (rc = acm_pre_eventchannel_unbound(dom, alloc->remote_dom)) != 0 )
108 return rc;
110 if ( dom == DOMID_SELF )
111 dom = current->domain->domain_id;
112 else if ( !IS_PRIV(current->domain) )
113 return -EPERM;
115 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
116 return -ESRCH;
118 spin_lock(&d->evtchn_lock);
120 if ( (port = get_free_port(d)) < 0 )
121 ERROR_EXIT(port);
122 chn = evtchn_from_port(d, port);
124 chn->state = ECS_UNBOUND;
125 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
126 chn->u.unbound.remote_domid = current->domain->domain_id;
128 alloc->port = port;
130 out:
131 spin_unlock(&d->evtchn_lock);
133 rcu_unlock_domain(d);
135 return rc;
136 }
139 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
140 {
141 struct evtchn *lchn, *rchn;
142 struct domain *ld = current->domain, *rd;
143 int lport, rport = bind->remote_port;
144 domid_t rdom = bind->remote_dom;
145 long rc;
147 if ( (rc = acm_pre_eventchannel_interdomain(rdom)) != 0 )
148 return rc;
150 if ( rdom == DOMID_SELF )
151 rdom = current->domain->domain_id;
153 if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
154 return -ESRCH;
156 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
157 if ( ld < rd )
158 {
159 spin_lock(&ld->evtchn_lock);
160 spin_lock(&rd->evtchn_lock);
161 }
162 else
163 {
164 if ( ld != rd )
165 spin_lock(&rd->evtchn_lock);
166 spin_lock(&ld->evtchn_lock);
167 }
169 if ( (lport = get_free_port(ld)) < 0 )
170 ERROR_EXIT(lport);
171 lchn = evtchn_from_port(ld, lport);
173 if ( !port_is_valid(rd, rport) )
174 ERROR_EXIT(-EINVAL);
175 rchn = evtchn_from_port(rd, rport);
176 if ( (rchn->state != ECS_UNBOUND) ||
177 (rchn->u.unbound.remote_domid != ld->domain_id) )
178 ERROR_EXIT(-EINVAL);
180 lchn->u.interdomain.remote_dom = rd;
181 lchn->u.interdomain.remote_port = (u16)rport;
182 lchn->state = ECS_INTERDOMAIN;
184 rchn->u.interdomain.remote_dom = ld;
185 rchn->u.interdomain.remote_port = (u16)lport;
186 rchn->state = ECS_INTERDOMAIN;
188 /*
189 * We may have lost notifications on the remote unbound port. Fix that up
190 * here by conservatively always setting a notification on the local port.
191 */
192 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
194 bind->local_port = lport;
196 out:
197 spin_unlock(&ld->evtchn_lock);
198 if ( ld != rd )
199 spin_unlock(&rd->evtchn_lock);
201 rcu_unlock_domain(rd);
203 return rc;
204 }
207 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
208 {
209 struct evtchn *chn;
210 struct vcpu *v;
211 struct domain *d = current->domain;
212 int port, virq = bind->virq, vcpu = bind->vcpu;
213 long rc = 0;
215 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
216 return -EINVAL;
218 if ( virq_is_global(virq) && (vcpu != 0) )
219 return -EINVAL;
221 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
222 ((v = d->vcpu[vcpu]) == NULL) )
223 return -ENOENT;
225 spin_lock(&d->evtchn_lock);
227 if ( v->virq_to_evtchn[virq] != 0 )
228 ERROR_EXIT(-EEXIST);
230 if ( (port = get_free_port(d)) < 0 )
231 ERROR_EXIT(port);
233 chn = evtchn_from_port(d, port);
234 chn->state = ECS_VIRQ;
235 chn->notify_vcpu_id = vcpu;
236 chn->u.virq = virq;
238 v->virq_to_evtchn[virq] = bind->port = port;
240 out:
241 spin_unlock(&d->evtchn_lock);
243 return rc;
244 }
247 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
248 {
249 struct evtchn *chn;
250 struct domain *d = current->domain;
251 int port, vcpu = bind->vcpu;
252 long rc = 0;
254 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
255 (d->vcpu[vcpu] == NULL) )
256 return -ENOENT;
258 spin_lock(&d->evtchn_lock);
260 if ( (port = get_free_port(d)) < 0 )
261 ERROR_EXIT(port);
263 chn = evtchn_from_port(d, port);
264 chn->state = ECS_IPI;
265 chn->notify_vcpu_id = vcpu;
267 bind->port = port;
269 out:
270 spin_unlock(&d->evtchn_lock);
272 return rc;
273 }
276 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
277 {
278 struct evtchn *chn;
279 struct domain *d = current->domain;
280 int port, pirq = bind->pirq;
281 long rc;
283 if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) )
284 return -EINVAL;
286 if ( !irq_access_permitted(d, pirq) )
287 return -EPERM;
289 spin_lock(&d->evtchn_lock);
291 if ( d->pirq_to_evtchn[pirq] != 0 )
292 ERROR_EXIT(-EEXIST);
294 if ( (port = get_free_port(d)) < 0 )
295 ERROR_EXIT(port);
297 chn = evtchn_from_port(d, port);
299 d->pirq_to_evtchn[pirq] = port;
300 rc = pirq_guest_bind(d->vcpu[0], pirq,
301 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
302 if ( rc != 0 )
303 {
304 d->pirq_to_evtchn[pirq] = 0;
305 goto out;
306 }
308 chn->state = ECS_PIRQ;
309 chn->u.pirq = pirq;
311 bind->port = port;
313 out:
314 spin_unlock(&d->evtchn_lock);
316 return rc;
317 }
320 static long __evtchn_close(struct domain *d1, int port1)
321 {
322 struct domain *d2 = NULL;
323 struct vcpu *v;
324 struct evtchn *chn1, *chn2;
325 int port2;
326 long rc = 0;
328 again:
329 spin_lock(&d1->evtchn_lock);
331 if ( !port_is_valid(d1, port1) )
332 {
333 rc = -EINVAL;
334 goto out;
335 }
337 chn1 = evtchn_from_port(d1, port1);
339 /* Guest cannot close a Xen-attached event channel. */
340 if ( unlikely(chn1->consumer_is_xen) )
341 {
342 rc = -EINVAL;
343 goto out;
344 }
346 switch ( chn1->state )
347 {
348 case ECS_FREE:
349 case ECS_RESERVED:
350 rc = -EINVAL;
351 goto out;
353 case ECS_UNBOUND:
354 break;
356 case ECS_PIRQ:
357 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
358 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
359 break;
361 case ECS_VIRQ:
362 for_each_vcpu ( d1, v )
363 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
364 v->virq_to_evtchn[chn1->u.virq] = 0;
365 break;
367 case ECS_IPI:
368 break;
370 case ECS_INTERDOMAIN:
371 if ( d2 == NULL )
372 {
373 d2 = chn1->u.interdomain.remote_dom;
375 /* If we unlock d1 then we could lose d2. Must get a reference. */
376 if ( unlikely(!get_domain(d2)) )
377 {
378 /*
379 * Failed to obtain a reference. No matter: d2 must be dying
380 * and so will close this event channel for us.
381 */
382 d2 = NULL;
383 goto out;
384 }
386 if ( d1 < d2 )
387 {
388 spin_lock(&d2->evtchn_lock);
389 }
390 else if ( d1 != d2 )
391 {
392 spin_unlock(&d1->evtchn_lock);
393 spin_lock(&d2->evtchn_lock);
394 goto again;
395 }
396 }
397 else if ( d2 != chn1->u.interdomain.remote_dom )
398 {
399 /*
400 * We can only get here if the port was closed and re-bound after
401 * unlocking d1 but before locking d2 above. We could retry but
402 * it is easier to return the same error as if we had seen the
403 * port in ECS_CLOSED. It must have passed through that state for
404 * us to end up here, so it's a valid error to return.
405 */
406 BUG_ON(d1 != current->domain);
407 rc = -EINVAL;
408 goto out;
409 }
411 port2 = chn1->u.interdomain.remote_port;
412 BUG_ON(!port_is_valid(d2, port2));
414 chn2 = evtchn_from_port(d2, port2);
415 BUG_ON(chn2->state != ECS_INTERDOMAIN);
416 BUG_ON(chn2->u.interdomain.remote_dom != d1);
418 chn2->state = ECS_UNBOUND;
419 chn2->u.unbound.remote_domid = d1->domain_id;
420 break;
422 default:
423 BUG();
424 }
426 /* Reset binding to vcpu0 when the channel is freed. */
427 chn1->state = ECS_FREE;
428 chn1->notify_vcpu_id = 0;
430 out:
431 if ( d2 != NULL )
432 {
433 if ( d1 != d2 )
434 spin_unlock(&d2->evtchn_lock);
435 put_domain(d2);
436 }
438 spin_unlock(&d1->evtchn_lock);
440 return rc;
441 }
444 static long evtchn_close(evtchn_close_t *close)
445 {
446 return __evtchn_close(current->domain, close->port);
447 }
450 long evtchn_send(unsigned int lport)
451 {
452 struct evtchn *lchn, *rchn;
453 struct domain *ld = current->domain, *rd;
454 struct vcpu *rvcpu;
455 int rport, ret = 0;
457 spin_lock(&ld->evtchn_lock);
459 if ( unlikely(!port_is_valid(ld, lport)) )
460 {
461 spin_unlock(&ld->evtchn_lock);
462 return -EINVAL;
463 }
465 lchn = evtchn_from_port(ld, lport);
467 /* Guest cannot send via a Xen-attached event channel. */
468 if ( unlikely(lchn->consumer_is_xen) )
469 {
470 spin_unlock(&ld->evtchn_lock);
471 return -EINVAL;
472 }
474 switch ( lchn->state )
475 {
476 case ECS_INTERDOMAIN:
477 rd = lchn->u.interdomain.remote_dom;
478 rport = lchn->u.interdomain.remote_port;
479 rchn = evtchn_from_port(rd, rport);
480 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
481 if ( rchn->consumer_is_xen )
482 {
483 /* Xen consumers need notification only if they are blocked. */
484 if ( test_and_clear_bit(_VCPUF_blocked_in_xen,
485 &rvcpu->vcpu_flags) )
486 vcpu_wake(rvcpu);
487 }
488 else
489 {
490 evtchn_set_pending(rvcpu, rport);
491 }
492 break;
493 case ECS_IPI:
494 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
495 break;
496 case ECS_UNBOUND:
497 /* silently drop the notification */
498 break;
499 default:
500 ret = -EINVAL;
501 }
503 spin_unlock(&ld->evtchn_lock);
505 return ret;
506 }
509 void evtchn_set_pending(struct vcpu *v, int port)
510 {
511 struct domain *d = v->domain;
512 shared_info_t *s = d->shared_info;
514 /*
515 * The following bit operations must happen in strict order.
516 * NB. On x86, the atomic bit operations also act as memory barriers.
517 * There is therefore sufficiently strict ordering for this architecture --
518 * others may require explicit memory barriers.
519 */
521 if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) )
522 return;
524 if ( !test_bit (port, __shared_info_addr(d, s, evtchn_mask)) &&
525 !test_and_set_bit(port / BITS_PER_GUEST_LONG(d),
526 vcpu_info_addr(v, evtchn_pending_sel)) )
527 {
528 vcpu_mark_events_pending(v);
529 }
531 /* Check if some VCPU might be polling for this event. */
532 if ( unlikely(test_bit(_DOMF_polling, &d->domain_flags)) &&
533 likely(test_and_clear_bit(_DOMF_polling, &d->domain_flags)) )
534 {
535 for_each_vcpu ( d, v )
536 if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) )
537 vcpu_unblock(v);
538 }
539 }
542 void send_guest_vcpu_virq(struct vcpu *v, int virq)
543 {
544 int port;
546 ASSERT(!virq_is_global(virq));
548 port = v->virq_to_evtchn[virq];
549 if ( unlikely(port == 0) )
550 return;
552 evtchn_set_pending(v, port);
553 }
555 void send_guest_global_virq(struct domain *d, int virq)
556 {
557 int port;
558 struct vcpu *v;
559 struct evtchn *chn;
561 ASSERT(virq_is_global(virq));
563 if ( unlikely(d == NULL) )
564 return;
566 v = d->vcpu[0];
567 if ( unlikely(v == NULL) )
568 return;
570 port = v->virq_to_evtchn[virq];
571 if ( unlikely(port == 0) )
572 return;
574 chn = evtchn_from_port(d, port);
575 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
576 }
579 void send_guest_pirq(struct domain *d, int pirq)
580 {
581 int port = d->pirq_to_evtchn[pirq];
582 struct evtchn *chn;
584 ASSERT(port != 0);
586 chn = evtchn_from_port(d, port);
587 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
588 }
591 static long evtchn_status(evtchn_status_t *status)
592 {
593 struct domain *d;
594 domid_t dom = status->dom;
595 int port = status->port;
596 struct evtchn *chn;
597 long rc = 0;
599 if ( dom == DOMID_SELF )
600 dom = current->domain->domain_id;
601 else if ( !IS_PRIV(current->domain) )
602 return -EPERM;
604 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
605 return -ESRCH;
607 spin_lock(&d->evtchn_lock);
609 if ( !port_is_valid(d, port) )
610 {
611 rc = -EINVAL;
612 goto out;
613 }
615 chn = evtchn_from_port(d, port);
616 switch ( chn->state )
617 {
618 case ECS_FREE:
619 case ECS_RESERVED:
620 status->status = EVTCHNSTAT_closed;
621 break;
622 case ECS_UNBOUND:
623 status->status = EVTCHNSTAT_unbound;
624 status->u.unbound.dom = chn->u.unbound.remote_domid;
625 break;
626 case ECS_INTERDOMAIN:
627 status->status = EVTCHNSTAT_interdomain;
628 status->u.interdomain.dom =
629 chn->u.interdomain.remote_dom->domain_id;
630 status->u.interdomain.port = chn->u.interdomain.remote_port;
631 break;
632 case ECS_PIRQ:
633 status->status = EVTCHNSTAT_pirq;
634 status->u.pirq = chn->u.pirq;
635 break;
636 case ECS_VIRQ:
637 status->status = EVTCHNSTAT_virq;
638 status->u.virq = chn->u.virq;
639 break;
640 case ECS_IPI:
641 status->status = EVTCHNSTAT_ipi;
642 break;
643 default:
644 BUG();
645 }
647 status->vcpu = chn->notify_vcpu_id;
649 out:
650 spin_unlock(&d->evtchn_lock);
651 rcu_unlock_domain(d);
652 return rc;
653 }
656 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
657 {
658 struct domain *d = current->domain;
659 struct evtchn *chn;
660 long rc = 0;
662 if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
663 return -ENOENT;
665 spin_lock(&d->evtchn_lock);
667 if ( !port_is_valid(d, port) )
668 {
669 rc = -EINVAL;
670 goto out;
671 }
673 chn = evtchn_from_port(d, port);
675 /* Guest cannot re-bind a Xen-attached event channel. */
676 if ( unlikely(chn->consumer_is_xen) )
677 {
678 rc = -EINVAL;
679 goto out;
680 }
682 switch ( chn->state )
683 {
684 case ECS_VIRQ:
685 if ( virq_is_global(chn->u.virq) )
686 chn->notify_vcpu_id = vcpu_id;
687 else
688 rc = -EINVAL;
689 break;
690 case ECS_UNBOUND:
691 case ECS_INTERDOMAIN:
692 case ECS_PIRQ:
693 chn->notify_vcpu_id = vcpu_id;
694 break;
695 default:
696 rc = -EINVAL;
697 break;
698 }
700 out:
701 spin_unlock(&d->evtchn_lock);
702 return rc;
703 }
706 static long evtchn_unmask(evtchn_unmask_t *unmask)
707 {
708 struct domain *d = current->domain;
709 shared_info_t *s = d->shared_info;
710 int port = unmask->port;
711 struct vcpu *v;
713 spin_lock(&d->evtchn_lock);
715 if ( unlikely(!port_is_valid(d, port)) )
716 {
717 spin_unlock(&d->evtchn_lock);
718 return -EINVAL;
719 }
721 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
723 /*
724 * These operations must happen in strict order. Based on
725 * include/xen/event.h:evtchn_set_pending().
726 */
727 if ( test_and_clear_bit(port, __shared_info_addr(d, s, evtchn_mask)) &&
728 test_bit (port, __shared_info_addr(d, s, evtchn_pending)) &&
729 !test_and_set_bit (port / BITS_PER_GUEST_LONG(d),
730 vcpu_info_addr(v, evtchn_pending_sel)) )
731 {
732 vcpu_mark_events_pending(v);
733 }
735 spin_unlock(&d->evtchn_lock);
737 return 0;
738 }
741 static long evtchn_reset(evtchn_reset_t *r)
742 {
743 domid_t dom = r->dom;
744 struct domain *d;
745 int i;
747 if ( dom == DOMID_SELF )
748 dom = current->domain->domain_id;
749 else if ( !IS_PRIV(current->domain) )
750 return -EPERM;
752 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
753 return -ESRCH;
755 for ( i = 0; port_is_valid(d, i); i++ )
756 (void)__evtchn_close(d, i);
758 rcu_unlock_domain(d);
760 return 0;
761 }
764 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
765 {
766 long rc;
768 switch ( cmd )
769 {
770 case EVTCHNOP_alloc_unbound: {
771 struct evtchn_alloc_unbound alloc_unbound;
772 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
773 return -EFAULT;
774 rc = evtchn_alloc_unbound(&alloc_unbound);
775 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
776 rc = -EFAULT; /* Cleaning up here would be a mess! */
777 break;
778 }
780 case EVTCHNOP_bind_interdomain: {
781 struct evtchn_bind_interdomain bind_interdomain;
782 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
783 return -EFAULT;
784 rc = evtchn_bind_interdomain(&bind_interdomain);
785 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
786 rc = -EFAULT; /* Cleaning up here would be a mess! */
787 break;
788 }
790 case EVTCHNOP_bind_virq: {
791 struct evtchn_bind_virq bind_virq;
792 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
793 return -EFAULT;
794 rc = evtchn_bind_virq(&bind_virq);
795 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
796 rc = -EFAULT; /* Cleaning up here would be a mess! */
797 break;
798 }
800 case EVTCHNOP_bind_ipi: {
801 struct evtchn_bind_ipi bind_ipi;
802 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
803 return -EFAULT;
804 rc = evtchn_bind_ipi(&bind_ipi);
805 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
806 rc = -EFAULT; /* Cleaning up here would be a mess! */
807 break;
808 }
810 case EVTCHNOP_bind_pirq: {
811 struct evtchn_bind_pirq bind_pirq;
812 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
813 return -EFAULT;
814 rc = evtchn_bind_pirq(&bind_pirq);
815 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
816 rc = -EFAULT; /* Cleaning up here would be a mess! */
817 break;
818 }
820 case EVTCHNOP_close: {
821 struct evtchn_close close;
822 if ( copy_from_guest(&close, arg, 1) != 0 )
823 return -EFAULT;
824 rc = evtchn_close(&close);
825 break;
826 }
828 case EVTCHNOP_send: {
829 struct evtchn_send send;
830 if ( copy_from_guest(&send, arg, 1) != 0 )
831 return -EFAULT;
832 rc = evtchn_send(send.port);
833 break;
834 }
836 case EVTCHNOP_status: {
837 struct evtchn_status status;
838 if ( copy_from_guest(&status, arg, 1) != 0 )
839 return -EFAULT;
840 rc = evtchn_status(&status);
841 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
842 rc = -EFAULT;
843 break;
844 }
846 case EVTCHNOP_bind_vcpu: {
847 struct evtchn_bind_vcpu bind_vcpu;
848 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
849 return -EFAULT;
850 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
851 break;
852 }
854 case EVTCHNOP_unmask: {
855 struct evtchn_unmask unmask;
856 if ( copy_from_guest(&unmask, arg, 1) != 0 )
857 return -EFAULT;
858 rc = evtchn_unmask(&unmask);
859 break;
860 }
862 case EVTCHNOP_reset: {
863 struct evtchn_reset reset;
864 if ( copy_from_guest(&reset, arg, 1) != 0 )
865 return -EFAULT;
866 rc = evtchn_reset(&reset);
867 break;
868 }
870 default:
871 rc = -ENOSYS;
872 break;
873 }
875 return rc;
876 }
879 int alloc_unbound_xen_event_channel(
880 struct vcpu *local_vcpu, domid_t remote_domid)
881 {
882 struct evtchn *chn;
883 struct domain *d = local_vcpu->domain;
884 int port;
886 spin_lock(&d->evtchn_lock);
888 if ( (port = get_free_port(d)) < 0 )
889 goto out;
890 chn = evtchn_from_port(d, port);
892 chn->state = ECS_UNBOUND;
893 chn->consumer_is_xen = 1;
894 chn->notify_vcpu_id = local_vcpu->vcpu_id;
895 chn->u.unbound.remote_domid = remote_domid;
897 out:
898 spin_unlock(&d->evtchn_lock);
900 return port;
901 }
904 void free_xen_event_channel(
905 struct vcpu *local_vcpu, int port)
906 {
907 struct evtchn *chn;
908 struct domain *d = local_vcpu->domain;
910 spin_lock(&d->evtchn_lock);
911 chn = evtchn_from_port(d, port);
912 BUG_ON(!chn->consumer_is_xen);
913 chn->consumer_is_xen = 0;
914 spin_unlock(&d->evtchn_lock);
916 (void)__evtchn_close(d, port);
917 }
920 void notify_via_xen_event_channel(int lport)
921 {
922 struct evtchn *lchn, *rchn;
923 struct domain *ld = current->domain, *rd;
924 int rport;
926 spin_lock(&ld->evtchn_lock);
928 ASSERT(port_is_valid(ld, lport));
929 lchn = evtchn_from_port(ld, lport);
930 ASSERT(lchn->consumer_is_xen);
932 if ( likely(lchn->state == ECS_INTERDOMAIN) )
933 {
934 rd = lchn->u.interdomain.remote_dom;
935 rport = lchn->u.interdomain.remote_port;
936 rchn = evtchn_from_port(rd, rport);
937 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
938 }
940 spin_unlock(&ld->evtchn_lock);
941 }
944 int evtchn_init(struct domain *d)
945 {
946 spin_lock_init(&d->evtchn_lock);
947 if ( get_free_port(d) != 0 )
948 return -EINVAL;
949 evtchn_from_port(d, 0)->state = ECS_RESERVED;
950 return 0;
951 }
954 void evtchn_destroy(struct domain *d)
955 {
956 int i;
958 for ( i = 0; port_is_valid(d, i); i++ )
959 {
960 evtchn_from_port(d, i)->consumer_is_xen = 0;
961 (void)__evtchn_close(d, i);
962 }
964 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
965 xfree(d->evtchn[i]);
966 }
968 /*
969 * Local variables:
970 * mode: C
971 * c-set-style: "BSD"
972 * c-basic-offset: 4
973 * tab-width: 4
974 * indent-tabs-mode: nil
975 * End:
976 */