ia64/xen-unstable

view xen/common/event_channel.c @ 17062:0769835cf50f

x86 shadow: Reduce scope of shadow lock.

emulate_map_dest doesn't require holding lock, since
only shadow related operation possibly involved is to
remove shadow which is less frequent and can acquire
lock inside. Rest are either guest table walk or
per-vcpu monitor table manipulation

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 14 10:33:12 2008 +0000 (2008-02-14)
parents cff4c8a1aa28
children af33f2054f47
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/compat.h>
27 #include <xen/guest_access.h>
28 #include <asm/current.h>
30 #include <public/xen.h>
31 #include <public/event_channel.h>
32 #include <xsm/xsm.h>
34 #define bucket_from_port(d,p) \
35 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
36 #define port_is_valid(d,p) \
37 (((p) >= 0) && ((p) < MAX_EVTCHNS(d)) && \
38 (bucket_from_port(d,p) != NULL))
39 #define evtchn_from_port(d,p) \
40 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
42 #define ERROR_EXIT(_errno) \
43 do { \
44 gdprintk(XENLOG_WARNING, \
45 "EVTCHNOP failure: error %d\n", \
46 (_errno)); \
47 rc = (_errno); \
48 goto out; \
49 } while ( 0 )
50 #define ERROR_EXIT_DOM(_errno, _dom) \
51 do { \
52 gdprintk(XENLOG_WARNING, \
53 "EVTCHNOP failure: domain %d, error %d\n", \
54 (_dom)->domain_id, (_errno)); \
55 rc = (_errno); \
56 goto out; \
57 } while ( 0 )
60 static int virq_is_global(int virq)
61 {
62 int rc;
64 ASSERT((virq >= 0) && (virq < NR_VIRQS));
66 switch ( virq )
67 {
68 case VIRQ_TIMER:
69 case VIRQ_DEBUG:
70 case VIRQ_XENOPROF:
71 rc = 0;
72 break;
73 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
74 rc = arch_virq_is_global(virq);
75 break;
76 default:
77 rc = 1;
78 break;
79 }
81 return rc;
82 }
85 static int get_free_port(struct domain *d)
86 {
87 struct evtchn *chn;
88 int port;
89 int i, j;
91 if ( d->is_dying )
92 return -EINVAL;
94 for ( port = 0; port_is_valid(d, port); port++ )
95 if ( evtchn_from_port(d, port)->state == ECS_FREE )
96 return port;
98 if ( port == MAX_EVTCHNS(d) )
99 return -ENOSPC;
101 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
102 if ( unlikely(chn == NULL) )
103 return -ENOMEM;
104 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
105 bucket_from_port(d, port) = chn;
107 for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
108 {
109 if ( xsm_alloc_security_evtchn(&chn[i]) )
110 {
111 for ( j = 0; j < i; j++ )
112 {
113 xsm_free_security_evtchn(&chn[j]);
114 }
115 xfree(chn);
116 return -ENOMEM;
117 }
118 }
120 return port;
121 }
124 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
125 {
126 struct evtchn *chn;
127 struct domain *d;
128 int port;
129 domid_t dom = alloc->dom;
130 long rc;
132 if ( dom == DOMID_SELF )
133 d = current->domain;
134 else {
135 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
136 return -ESRCH;
137 if ( !IS_PRIV_FOR(current->domain, d) ) {
138 rc = -EPERM;
139 goto out2;
140 }
141 }
143 spin_lock(&d->evtchn_lock);
145 if ( (port = get_free_port(d)) < 0 )
146 ERROR_EXIT_DOM(port, d);
147 chn = evtchn_from_port(d, port);
149 rc = xsm_evtchn_unbound(d, chn, alloc->remote_dom);
150 if ( rc )
151 goto out;
153 chn->state = ECS_UNBOUND;
154 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
155 chn->u.unbound.remote_domid = current->domain->domain_id;
157 alloc->port = port;
159 out:
160 spin_unlock(&d->evtchn_lock);
162 out2:
163 rcu_unlock_domain(d);
165 return rc;
166 }
169 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
170 {
171 struct evtchn *lchn, *rchn;
172 struct domain *ld = current->domain, *rd;
173 int lport, rport = bind->remote_port;
174 domid_t rdom = bind->remote_dom;
175 long rc;
177 if ( rdom == DOMID_SELF )
178 rdom = current->domain->domain_id;
180 if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
181 return -ESRCH;
183 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
184 if ( ld < rd )
185 {
186 spin_lock(&ld->evtchn_lock);
187 spin_lock(&rd->evtchn_lock);
188 }
189 else
190 {
191 if ( ld != rd )
192 spin_lock(&rd->evtchn_lock);
193 spin_lock(&ld->evtchn_lock);
194 }
196 if ( (lport = get_free_port(ld)) < 0 )
197 ERROR_EXIT(lport);
198 lchn = evtchn_from_port(ld, lport);
200 if ( !port_is_valid(rd, rport) )
201 ERROR_EXIT_DOM(-EINVAL, rd);
202 rchn = evtchn_from_port(rd, rport);
203 if ( (rchn->state != ECS_UNBOUND) ||
204 (rchn->u.unbound.remote_domid != ld->domain_id && !IS_PRIV_FOR(ld, rd)))
205 ERROR_EXIT_DOM(-EINVAL, rd);
207 rc = xsm_evtchn_interdomain(ld, lchn, rd, rchn);
208 if ( rc )
209 goto out;
211 lchn->u.interdomain.remote_dom = rd;
212 lchn->u.interdomain.remote_port = (u16)rport;
213 lchn->state = ECS_INTERDOMAIN;
215 rchn->u.interdomain.remote_dom = ld;
216 rchn->u.interdomain.remote_port = (u16)lport;
217 rchn->state = ECS_INTERDOMAIN;
219 /*
220 * We may have lost notifications on the remote unbound port. Fix that up
221 * here by conservatively always setting a notification on the local port.
222 */
223 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
225 bind->local_port = lport;
227 out:
228 spin_unlock(&ld->evtchn_lock);
229 if ( ld != rd )
230 spin_unlock(&rd->evtchn_lock);
232 rcu_unlock_domain(rd);
234 return rc;
235 }
238 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
239 {
240 struct evtchn *chn;
241 struct vcpu *v;
242 struct domain *d = current->domain;
243 int port, virq = bind->virq, vcpu = bind->vcpu;
244 long rc = 0;
246 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
247 return -EINVAL;
249 if ( virq_is_global(virq) && (vcpu != 0) )
250 return -EINVAL;
252 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
253 ((v = d->vcpu[vcpu]) == NULL) )
254 return -ENOENT;
256 spin_lock(&d->evtchn_lock);
258 if ( v->virq_to_evtchn[virq] != 0 )
259 ERROR_EXIT(-EEXIST);
261 if ( (port = get_free_port(d)) < 0 )
262 ERROR_EXIT(port);
264 chn = evtchn_from_port(d, port);
265 chn->state = ECS_VIRQ;
266 chn->notify_vcpu_id = vcpu;
267 chn->u.virq = virq;
269 v->virq_to_evtchn[virq] = bind->port = port;
271 out:
272 spin_unlock(&d->evtchn_lock);
274 return rc;
275 }
278 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
279 {
280 struct evtchn *chn;
281 struct domain *d = current->domain;
282 int port, vcpu = bind->vcpu;
283 long rc = 0;
285 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
286 (d->vcpu[vcpu] == NULL) )
287 return -ENOENT;
289 spin_lock(&d->evtchn_lock);
291 if ( (port = get_free_port(d)) < 0 )
292 ERROR_EXIT(port);
294 chn = evtchn_from_port(d, port);
295 chn->state = ECS_IPI;
296 chn->notify_vcpu_id = vcpu;
298 bind->port = port;
300 out:
301 spin_unlock(&d->evtchn_lock);
303 return rc;
304 }
307 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
308 {
309 struct evtchn *chn;
310 struct domain *d = current->domain;
311 int port, pirq = bind->pirq;
312 long rc;
314 if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) )
315 return -EINVAL;
317 if ( !irq_access_permitted(d, pirq) )
318 return -EPERM;
320 spin_lock(&d->evtchn_lock);
322 if ( d->pirq_to_evtchn[pirq] != 0 )
323 ERROR_EXIT(-EEXIST);
325 if ( (port = get_free_port(d)) < 0 )
326 ERROR_EXIT(port);
328 chn = evtchn_from_port(d, port);
330 d->pirq_to_evtchn[pirq] = port;
331 rc = pirq_guest_bind(d->vcpu[0], pirq,
332 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
333 if ( rc != 0 )
334 {
335 d->pirq_to_evtchn[pirq] = 0;
336 goto out;
337 }
339 chn->state = ECS_PIRQ;
340 chn->u.pirq = pirq;
342 bind->port = port;
344 out:
345 spin_unlock(&d->evtchn_lock);
347 return rc;
348 }
351 static long __evtchn_close(struct domain *d1, int port1)
352 {
353 struct domain *d2 = NULL;
354 struct vcpu *v;
355 struct evtchn *chn1, *chn2;
356 int port2;
357 long rc = 0;
359 again:
360 spin_lock(&d1->evtchn_lock);
362 if ( !port_is_valid(d1, port1) )
363 {
364 rc = -EINVAL;
365 goto out;
366 }
368 chn1 = evtchn_from_port(d1, port1);
370 /* Guest cannot close a Xen-attached event channel. */
371 if ( unlikely(chn1->consumer_is_xen) )
372 {
373 rc = -EINVAL;
374 goto out;
375 }
377 switch ( chn1->state )
378 {
379 case ECS_FREE:
380 case ECS_RESERVED:
381 rc = -EINVAL;
382 goto out;
384 case ECS_UNBOUND:
385 break;
387 case ECS_PIRQ:
388 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
389 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
390 break;
392 case ECS_VIRQ:
393 for_each_vcpu ( d1, v )
394 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
395 v->virq_to_evtchn[chn1->u.virq] = 0;
396 break;
398 case ECS_IPI:
399 break;
401 case ECS_INTERDOMAIN:
402 if ( d2 == NULL )
403 {
404 d2 = chn1->u.interdomain.remote_dom;
406 /* If we unlock d1 then we could lose d2. Must get a reference. */
407 if ( unlikely(!get_domain(d2)) )
408 BUG();
410 if ( d1 < d2 )
411 {
412 spin_lock(&d2->evtchn_lock);
413 }
414 else if ( d1 != d2 )
415 {
416 spin_unlock(&d1->evtchn_lock);
417 spin_lock(&d2->evtchn_lock);
418 goto again;
419 }
420 }
421 else if ( d2 != chn1->u.interdomain.remote_dom )
422 {
423 /*
424 * We can only get here if the port was closed and re-bound after
425 * unlocking d1 but before locking d2 above. We could retry but
426 * it is easier to return the same error as if we had seen the
427 * port in ECS_CLOSED. It must have passed through that state for
428 * us to end up here, so it's a valid error to return.
429 */
430 rc = -EINVAL;
431 goto out;
432 }
434 port2 = chn1->u.interdomain.remote_port;
435 BUG_ON(!port_is_valid(d2, port2));
437 chn2 = evtchn_from_port(d2, port2);
438 BUG_ON(chn2->state != ECS_INTERDOMAIN);
439 BUG_ON(chn2->u.interdomain.remote_dom != d1);
441 chn2->state = ECS_UNBOUND;
442 chn2->u.unbound.remote_domid = d1->domain_id;
443 break;
445 default:
446 BUG();
447 }
449 /* Reset binding to vcpu0 when the channel is freed. */
450 chn1->state = ECS_FREE;
451 chn1->notify_vcpu_id = 0;
453 xsm_evtchn_close_post(chn1);
455 out:
456 if ( d2 != NULL )
457 {
458 if ( d1 != d2 )
459 spin_unlock(&d2->evtchn_lock);
460 put_domain(d2);
461 }
463 spin_unlock(&d1->evtchn_lock);
465 return rc;
466 }
469 static long evtchn_close(evtchn_close_t *close)
470 {
471 return __evtchn_close(current->domain, close->port);
472 }
475 long evtchn_send(unsigned int lport)
476 {
477 struct evtchn *lchn, *rchn;
478 struct domain *ld = current->domain, *rd;
479 struct vcpu *rvcpu;
480 int rport, ret = 0;
482 spin_lock(&ld->evtchn_lock);
484 if ( unlikely(!port_is_valid(ld, lport)) )
485 {
486 spin_unlock(&ld->evtchn_lock);
487 return -EINVAL;
488 }
490 lchn = evtchn_from_port(ld, lport);
492 /* Guest cannot send via a Xen-attached event channel. */
493 if ( unlikely(lchn->consumer_is_xen) )
494 {
495 spin_unlock(&ld->evtchn_lock);
496 return -EINVAL;
497 }
499 ret = xsm_evtchn_send(ld, lchn);
500 if ( ret )
501 goto out;
503 switch ( lchn->state )
504 {
505 case ECS_INTERDOMAIN:
506 rd = lchn->u.interdomain.remote_dom;
507 rport = lchn->u.interdomain.remote_port;
508 rchn = evtchn_from_port(rd, rport);
509 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
510 if ( rchn->consumer_is_xen )
511 {
512 /* Xen consumers need notification only if they are blocked. */
513 if ( test_and_clear_bit(_VPF_blocked_in_xen,
514 &rvcpu->pause_flags) )
515 vcpu_wake(rvcpu);
516 }
517 else
518 {
519 evtchn_set_pending(rvcpu, rport);
520 }
521 break;
522 case ECS_IPI:
523 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
524 break;
525 case ECS_UNBOUND:
526 /* silently drop the notification */
527 break;
528 default:
529 ret = -EINVAL;
530 }
532 out:
533 spin_unlock(&ld->evtchn_lock);
535 return ret;
536 }
539 void evtchn_set_pending(struct vcpu *v, int port)
540 {
541 struct domain *d = v->domain;
542 shared_info_t *s = d->shared_info;
544 /*
545 * The following bit operations must happen in strict order.
546 * NB. On x86, the atomic bit operations also act as memory barriers.
547 * There is therefore sufficiently strict ordering for this architecture --
548 * others may require explicit memory barriers.
549 */
551 if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) )
552 return;
554 if ( !test_bit (port, __shared_info_addr(d, s, evtchn_mask)) &&
555 !test_and_set_bit(port / BITS_PER_GUEST_LONG(d),
556 vcpu_info_addr(v, evtchn_pending_sel)) )
557 {
558 vcpu_mark_events_pending(v);
559 }
561 /* Check if some VCPU might be polling for this event. */
562 if ( unlikely(d->is_polling) )
563 {
564 d->is_polling = 0;
565 smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */
566 for_each_vcpu ( d, v )
567 {
568 if ( !v->is_polling )
569 continue;
570 v->is_polling = 0;
571 vcpu_unblock(v);
572 }
573 }
574 }
577 void send_guest_vcpu_virq(struct vcpu *v, int virq)
578 {
579 int port;
581 ASSERT(!virq_is_global(virq));
583 port = v->virq_to_evtchn[virq];
584 if ( unlikely(port == 0) )
585 return;
587 evtchn_set_pending(v, port);
588 }
590 void send_guest_global_virq(struct domain *d, int virq)
591 {
592 int port;
593 struct vcpu *v;
594 struct evtchn *chn;
596 ASSERT(virq_is_global(virq));
598 if ( unlikely(d == NULL) )
599 return;
601 v = d->vcpu[0];
602 if ( unlikely(v == NULL) )
603 return;
605 port = v->virq_to_evtchn[virq];
606 if ( unlikely(port == 0) )
607 return;
609 chn = evtchn_from_port(d, port);
610 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
611 }
614 void send_guest_pirq(struct domain *d, int pirq)
615 {
616 int port = d->pirq_to_evtchn[pirq];
617 struct evtchn *chn;
619 ASSERT(port != 0);
621 chn = evtchn_from_port(d, port);
622 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
623 }
626 static long evtchn_status(evtchn_status_t *status)
627 {
628 struct domain *d;
629 domid_t dom = status->dom;
630 int port = status->port;
631 struct evtchn *chn;
632 long rc = 0;
634 if ( dom == DOMID_SELF )
635 d = current->domain;
636 else {
637 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
638 return -ESRCH;
639 if ( !IS_PRIV_FOR(current->domain, d) ) {
640 rc = -EPERM;
641 goto out2;
642 }
643 }
645 spin_lock(&d->evtchn_lock);
647 if ( !port_is_valid(d, port) )
648 {
649 rc = -EINVAL;
650 goto out;
651 }
653 chn = evtchn_from_port(d, port);
655 rc = xsm_evtchn_status(d, chn);
656 if ( rc )
657 goto out;
659 switch ( chn->state )
660 {
661 case ECS_FREE:
662 case ECS_RESERVED:
663 status->status = EVTCHNSTAT_closed;
664 break;
665 case ECS_UNBOUND:
666 status->status = EVTCHNSTAT_unbound;
667 status->u.unbound.dom = chn->u.unbound.remote_domid;
668 break;
669 case ECS_INTERDOMAIN:
670 status->status = EVTCHNSTAT_interdomain;
671 status->u.interdomain.dom =
672 chn->u.interdomain.remote_dom->domain_id;
673 status->u.interdomain.port = chn->u.interdomain.remote_port;
674 break;
675 case ECS_PIRQ:
676 status->status = EVTCHNSTAT_pirq;
677 status->u.pirq = chn->u.pirq;
678 break;
679 case ECS_VIRQ:
680 status->status = EVTCHNSTAT_virq;
681 status->u.virq = chn->u.virq;
682 break;
683 case ECS_IPI:
684 status->status = EVTCHNSTAT_ipi;
685 break;
686 default:
687 BUG();
688 }
690 status->vcpu = chn->notify_vcpu_id;
692 out:
693 spin_unlock(&d->evtchn_lock);
694 out2:
695 rcu_unlock_domain(d);
696 return rc;
697 }
700 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
701 {
702 struct domain *d = current->domain;
703 struct evtchn *chn;
704 long rc = 0;
706 if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
707 return -ENOENT;
709 spin_lock(&d->evtchn_lock);
711 if ( !port_is_valid(d, port) )
712 {
713 rc = -EINVAL;
714 goto out;
715 }
717 chn = evtchn_from_port(d, port);
719 /* Guest cannot re-bind a Xen-attached event channel. */
720 if ( unlikely(chn->consumer_is_xen) )
721 {
722 rc = -EINVAL;
723 goto out;
724 }
726 switch ( chn->state )
727 {
728 case ECS_VIRQ:
729 if ( virq_is_global(chn->u.virq) )
730 chn->notify_vcpu_id = vcpu_id;
731 else
732 rc = -EINVAL;
733 break;
734 case ECS_UNBOUND:
735 case ECS_INTERDOMAIN:
736 case ECS_PIRQ:
737 chn->notify_vcpu_id = vcpu_id;
738 break;
739 default:
740 rc = -EINVAL;
741 break;
742 }
744 out:
745 spin_unlock(&d->evtchn_lock);
746 return rc;
747 }
750 static long evtchn_unmask(evtchn_unmask_t *unmask)
751 {
752 struct domain *d = current->domain;
753 shared_info_t *s = d->shared_info;
754 int port = unmask->port;
755 struct vcpu *v;
757 spin_lock(&d->evtchn_lock);
759 if ( unlikely(!port_is_valid(d, port)) )
760 {
761 spin_unlock(&d->evtchn_lock);
762 return -EINVAL;
763 }
765 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
767 /*
768 * These operations must happen in strict order. Based on
769 * include/xen/event.h:evtchn_set_pending().
770 */
771 if ( test_and_clear_bit(port, __shared_info_addr(d, s, evtchn_mask)) &&
772 test_bit (port, __shared_info_addr(d, s, evtchn_pending)) &&
773 !test_and_set_bit (port / BITS_PER_GUEST_LONG(d),
774 vcpu_info_addr(v, evtchn_pending_sel)) )
775 {
776 vcpu_mark_events_pending(v);
777 }
779 spin_unlock(&d->evtchn_lock);
781 return 0;
782 }
785 static long evtchn_reset(evtchn_reset_t *r)
786 {
787 domid_t dom = r->dom;
788 struct domain *d;
789 int i;
790 int rc;
792 if ( dom == DOMID_SELF )
793 d = current->domain;
794 else {
795 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
796 return -ESRCH;
797 if ( !IS_PRIV_FOR(current->domain, d) ) {
798 rc = -EPERM;
799 goto out;
800 }
801 }
803 rc = xsm_evtchn_reset(current->domain, d);
804 if ( rc )
805 goto out;
807 for ( i = 0; port_is_valid(d, i); i++ )
808 (void)__evtchn_close(d, i);
810 rc = 0;
811 out:
812 rcu_unlock_domain(d);
814 return rc;
815 }
818 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
819 {
820 long rc;
822 switch ( cmd )
823 {
824 case EVTCHNOP_alloc_unbound: {
825 struct evtchn_alloc_unbound alloc_unbound;
826 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
827 return -EFAULT;
828 rc = evtchn_alloc_unbound(&alloc_unbound);
829 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
830 rc = -EFAULT; /* Cleaning up here would be a mess! */
831 break;
832 }
834 case EVTCHNOP_bind_interdomain: {
835 struct evtchn_bind_interdomain bind_interdomain;
836 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
837 return -EFAULT;
838 rc = evtchn_bind_interdomain(&bind_interdomain);
839 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
840 rc = -EFAULT; /* Cleaning up here would be a mess! */
841 break;
842 }
844 case EVTCHNOP_bind_virq: {
845 struct evtchn_bind_virq bind_virq;
846 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
847 return -EFAULT;
848 rc = evtchn_bind_virq(&bind_virq);
849 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
850 rc = -EFAULT; /* Cleaning up here would be a mess! */
851 break;
852 }
854 case EVTCHNOP_bind_ipi: {
855 struct evtchn_bind_ipi bind_ipi;
856 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
857 return -EFAULT;
858 rc = evtchn_bind_ipi(&bind_ipi);
859 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
860 rc = -EFAULT; /* Cleaning up here would be a mess! */
861 break;
862 }
864 case EVTCHNOP_bind_pirq: {
865 struct evtchn_bind_pirq bind_pirq;
866 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
867 return -EFAULT;
868 rc = evtchn_bind_pirq(&bind_pirq);
869 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
870 rc = -EFAULT; /* Cleaning up here would be a mess! */
871 break;
872 }
874 case EVTCHNOP_close: {
875 struct evtchn_close close;
876 if ( copy_from_guest(&close, arg, 1) != 0 )
877 return -EFAULT;
878 rc = evtchn_close(&close);
879 break;
880 }
882 case EVTCHNOP_send: {
883 struct evtchn_send send;
884 if ( copy_from_guest(&send, arg, 1) != 0 )
885 return -EFAULT;
886 rc = evtchn_send(send.port);
887 break;
888 }
890 case EVTCHNOP_status: {
891 struct evtchn_status status;
892 if ( copy_from_guest(&status, arg, 1) != 0 )
893 return -EFAULT;
894 rc = evtchn_status(&status);
895 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
896 rc = -EFAULT;
897 break;
898 }
900 case EVTCHNOP_bind_vcpu: {
901 struct evtchn_bind_vcpu bind_vcpu;
902 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
903 return -EFAULT;
904 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
905 break;
906 }
908 case EVTCHNOP_unmask: {
909 struct evtchn_unmask unmask;
910 if ( copy_from_guest(&unmask, arg, 1) != 0 )
911 return -EFAULT;
912 rc = evtchn_unmask(&unmask);
913 break;
914 }
916 case EVTCHNOP_reset: {
917 struct evtchn_reset reset;
918 if ( copy_from_guest(&reset, arg, 1) != 0 )
919 return -EFAULT;
920 rc = evtchn_reset(&reset);
921 break;
922 }
924 default:
925 rc = -ENOSYS;
926 break;
927 }
929 return rc;
930 }
933 int alloc_unbound_xen_event_channel(
934 struct vcpu *local_vcpu, domid_t remote_domid)
935 {
936 struct evtchn *chn;
937 struct domain *d = local_vcpu->domain;
938 int port;
940 spin_lock(&d->evtchn_lock);
942 if ( (port = get_free_port(d)) < 0 )
943 goto out;
944 chn = evtchn_from_port(d, port);
946 chn->state = ECS_UNBOUND;
947 chn->consumer_is_xen = 1;
948 chn->notify_vcpu_id = local_vcpu->vcpu_id;
949 chn->u.unbound.remote_domid = remote_domid;
951 out:
952 spin_unlock(&d->evtchn_lock);
954 return port;
955 }
958 void free_xen_event_channel(
959 struct vcpu *local_vcpu, int port)
960 {
961 struct evtchn *chn;
962 struct domain *d = local_vcpu->domain;
964 spin_lock(&d->evtchn_lock);
965 chn = evtchn_from_port(d, port);
966 BUG_ON(!chn->consumer_is_xen);
967 chn->consumer_is_xen = 0;
968 spin_unlock(&d->evtchn_lock);
970 (void)__evtchn_close(d, port);
971 }
974 void notify_via_xen_event_channel(int lport)
975 {
976 struct evtchn *lchn, *rchn;
977 struct domain *ld = current->domain, *rd;
978 int rport;
980 spin_lock(&ld->evtchn_lock);
982 ASSERT(port_is_valid(ld, lport));
983 lchn = evtchn_from_port(ld, lport);
984 ASSERT(lchn->consumer_is_xen);
986 if ( likely(lchn->state == ECS_INTERDOMAIN) )
987 {
988 rd = lchn->u.interdomain.remote_dom;
989 rport = lchn->u.interdomain.remote_port;
990 rchn = evtchn_from_port(rd, rport);
991 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
992 }
994 spin_unlock(&ld->evtchn_lock);
995 }
998 int evtchn_init(struct domain *d)
999 {
1000 spin_lock_init(&d->evtchn_lock);
1001 if ( get_free_port(d) != 0 )
1002 return -EINVAL;
1003 evtchn_from_port(d, 0)->state = ECS_RESERVED;
1004 return 0;
1008 void evtchn_destroy(struct domain *d)
1010 int i;
1012 /* After this barrier no new event-channel allocations can occur. */
1013 BUG_ON(!d->is_dying);
1014 spin_barrier(&d->evtchn_lock);
1016 /* Close all existing event channels. */
1017 for ( i = 0; port_is_valid(d, i); i++ )
1019 evtchn_from_port(d, i)->consumer_is_xen = 0;
1020 (void)__evtchn_close(d, i);
1023 /* Free all event-channel buckets. */
1024 spin_lock(&d->evtchn_lock);
1025 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
1027 xsm_free_security_evtchn(d->evtchn[i]);
1028 xfree(d->evtchn[i]);
1030 spin_unlock(&d->evtchn_lock);
1033 /*
1034 * Local variables:
1035 * mode: C
1036 * c-set-style: "BSD"
1037 * c-basic-offset: 4
1038 * tab-width: 4
1039 * indent-tabs-mode: nil
1040 * End:
1041 */