ia64/xen-unstable

view xen/common/event_channel.c @ 19788:2f9e1348aa98

x86_64: allow more vCPU-s per guest

Since the shared info layout is fixed, guests are required to use
VCPUOP_register_vcpu_info prior to booting any vCPU beyond the
traditional limit of 32.

MAX_VIRT_CPUS, being an implemetation detail of the hypervisor, is no
longer being exposed in the public headers.

The tools changes are clearly incomplete (and done only so things
would
build again), and the current state of the tools (using scalar
variables all over the place to represent vCPU bitmaps) very likely
doesn't permit booting DomU-s with more than the traditional number of
vCPU-s. Testing of the extended functionality was done with Dom0 (96
vCPU-s, as well as 128 vCPU-s out of which the kernel elected - by way
of a simple kernel side patch - to use only some, resulting in a
sparse
bitmap).

ia64 changes only to make things build, and build-tested only (and the
tools part only as far as the build would go without encountering
unrelated problems in the blktap code).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 18 10:14:16 2009 +0100 (2009-06-18)
parents 527b628b8e83
children
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/compat.h>
27 #include <xen/guest_access.h>
28 #include <xen/keyhandler.h>
29 #include <asm/current.h>
31 #include <public/xen.h>
32 #include <public/event_channel.h>
33 #include <xsm/xsm.h>
35 #define bucket_from_port(d,p) \
36 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
37 #define port_is_valid(d,p) \
38 (((p) >= 0) && ((p) < MAX_EVTCHNS(d)) && \
39 (bucket_from_port(d,p) != NULL))
40 #define evtchn_from_port(d,p) \
41 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
43 #define ERROR_EXIT(_errno) \
44 do { \
45 gdprintk(XENLOG_WARNING, \
46 "EVTCHNOP failure: error %d\n", \
47 (_errno)); \
48 rc = (_errno); \
49 goto out; \
50 } while ( 0 )
51 #define ERROR_EXIT_DOM(_errno, _dom) \
52 do { \
53 gdprintk(XENLOG_WARNING, \
54 "EVTCHNOP failure: domain %d, error %d\n", \
55 (_dom)->domain_id, (_errno)); \
56 rc = (_errno); \
57 goto out; \
58 } while ( 0 )
60 static int evtchn_set_pending(struct vcpu *v, int port);
62 static int virq_is_global(int virq)
63 {
64 int rc;
66 ASSERT((virq >= 0) && (virq < NR_VIRQS));
68 switch ( virq )
69 {
70 case VIRQ_TIMER:
71 case VIRQ_DEBUG:
72 case VIRQ_XENOPROF:
73 rc = 0;
74 break;
75 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
76 rc = arch_virq_is_global(virq);
77 break;
78 default:
79 rc = 1;
80 break;
81 }
83 return rc;
84 }
87 static int get_free_port(struct domain *d)
88 {
89 struct evtchn *chn;
90 int port;
91 int i, j;
93 if ( d->is_dying )
94 return -EINVAL;
96 for ( port = 0; port_is_valid(d, port); port++ )
97 if ( evtchn_from_port(d, port)->state == ECS_FREE )
98 return port;
100 if ( port == MAX_EVTCHNS(d) )
101 return -ENOSPC;
103 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
104 if ( unlikely(chn == NULL) )
105 return -ENOMEM;
106 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
107 bucket_from_port(d, port) = chn;
109 for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
110 {
111 if ( xsm_alloc_security_evtchn(&chn[i]) )
112 {
113 for ( j = 0; j < i; j++ )
114 xsm_free_security_evtchn(&chn[j]);
115 xfree(chn);
116 return -ENOMEM;
117 }
118 }
120 return port;
121 }
124 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
125 {
126 struct evtchn *chn;
127 struct domain *d;
128 int port;
129 domid_t dom = alloc->dom;
130 long rc;
132 rc = rcu_lock_target_domain_by_id(dom, &d);
133 if ( rc )
134 return rc;
136 spin_lock(&d->event_lock);
138 if ( (port = get_free_port(d)) < 0 )
139 ERROR_EXIT_DOM(port, d);
140 chn = evtchn_from_port(d, port);
142 rc = xsm_evtchn_unbound(d, chn, alloc->remote_dom);
143 if ( rc )
144 goto out;
146 chn->state = ECS_UNBOUND;
147 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
148 chn->u.unbound.remote_domid = current->domain->domain_id;
150 alloc->port = port;
152 out:
153 spin_unlock(&d->event_lock);
154 rcu_unlock_domain(d);
156 return rc;
157 }
160 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
161 {
162 struct evtchn *lchn, *rchn;
163 struct domain *ld = current->domain, *rd;
164 int lport, rport = bind->remote_port;
165 domid_t rdom = bind->remote_dom;
166 long rc;
168 if ( rdom == DOMID_SELF )
169 rdom = current->domain->domain_id;
171 if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
172 return -ESRCH;
174 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
175 if ( ld < rd )
176 {
177 spin_lock(&ld->event_lock);
178 spin_lock(&rd->event_lock);
179 }
180 else
181 {
182 if ( ld != rd )
183 spin_lock(&rd->event_lock);
184 spin_lock(&ld->event_lock);
185 }
187 if ( (lport = get_free_port(ld)) < 0 )
188 ERROR_EXIT(lport);
189 lchn = evtchn_from_port(ld, lport);
191 if ( !port_is_valid(rd, rport) )
192 ERROR_EXIT_DOM(-EINVAL, rd);
193 rchn = evtchn_from_port(rd, rport);
194 if ( (rchn->state != ECS_UNBOUND) ||
195 (rchn->u.unbound.remote_domid != ld->domain_id) )
196 ERROR_EXIT_DOM(-EINVAL, rd);
198 rc = xsm_evtchn_interdomain(ld, lchn, rd, rchn);
199 if ( rc )
200 goto out;
202 lchn->u.interdomain.remote_dom = rd;
203 lchn->u.interdomain.remote_port = (u16)rport;
204 lchn->state = ECS_INTERDOMAIN;
206 rchn->u.interdomain.remote_dom = ld;
207 rchn->u.interdomain.remote_port = (u16)lport;
208 rchn->state = ECS_INTERDOMAIN;
210 /*
211 * We may have lost notifications on the remote unbound port. Fix that up
212 * here by conservatively always setting a notification on the local port.
213 */
214 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
216 bind->local_port = lport;
218 out:
219 spin_unlock(&ld->event_lock);
220 if ( ld != rd )
221 spin_unlock(&rd->event_lock);
223 rcu_unlock_domain(rd);
225 return rc;
226 }
229 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
230 {
231 struct evtchn *chn;
232 struct vcpu *v;
233 struct domain *d = current->domain;
234 int port, virq = bind->virq, vcpu = bind->vcpu;
235 long rc = 0;
237 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
238 return -EINVAL;
240 if ( virq_is_global(virq) && (vcpu != 0) )
241 return -EINVAL;
243 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
244 ((v = d->vcpu[vcpu]) == NULL) )
245 return -ENOENT;
247 if ( unlikely(!v->vcpu_info) )
248 return -EAGAIN;
250 spin_lock(&d->event_lock);
252 if ( v->virq_to_evtchn[virq] != 0 )
253 ERROR_EXIT(-EEXIST);
255 if ( (port = get_free_port(d)) < 0 )
256 ERROR_EXIT(port);
258 chn = evtchn_from_port(d, port);
259 chn->state = ECS_VIRQ;
260 chn->notify_vcpu_id = vcpu;
261 chn->u.virq = virq;
263 v->virq_to_evtchn[virq] = bind->port = port;
265 out:
266 spin_unlock(&d->event_lock);
268 return rc;
269 }
272 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
273 {
274 struct evtchn *chn;
275 struct domain *d = current->domain;
276 int port, vcpu = bind->vcpu;
277 long rc = 0;
279 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
280 (d->vcpu[vcpu] == NULL) )
281 return -ENOENT;
283 if ( unlikely(!d->vcpu[vcpu]->vcpu_info) )
284 return -EAGAIN;
286 spin_lock(&d->event_lock);
288 if ( (port = get_free_port(d)) < 0 )
289 ERROR_EXIT(port);
291 chn = evtchn_from_port(d, port);
292 chn->state = ECS_IPI;
293 chn->notify_vcpu_id = vcpu;
295 bind->port = port;
297 out:
298 spin_unlock(&d->event_lock);
300 return rc;
301 }
304 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
305 {
306 struct evtchn *chn;
307 struct domain *d = current->domain;
308 int port, pirq = bind->pirq;
309 long rc;
311 if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
312 return -EINVAL;
314 if ( !irq_access_permitted(d, pirq) )
315 return -EPERM;
317 spin_lock(&d->event_lock);
319 if ( d->pirq_to_evtchn[pirq] != 0 )
320 ERROR_EXIT(-EEXIST);
322 if ( (port = get_free_port(d)) < 0 )
323 ERROR_EXIT(port);
325 chn = evtchn_from_port(d, port);
327 d->pirq_to_evtchn[pirq] = port;
328 rc = pirq_guest_bind(d->vcpu[0], pirq,
329 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
330 if ( rc != 0 )
331 {
332 d->pirq_to_evtchn[pirq] = 0;
333 goto out;
334 }
336 chn->state = ECS_PIRQ;
337 chn->u.pirq = pirq;
339 bind->port = port;
341 out:
342 spin_unlock(&d->event_lock);
344 return rc;
345 }
348 static long __evtchn_close(struct domain *d1, int port1)
349 {
350 struct domain *d2 = NULL;
351 struct vcpu *v;
352 struct evtchn *chn1, *chn2;
353 int port2;
354 long rc = 0;
356 again:
357 spin_lock(&d1->event_lock);
359 if ( !port_is_valid(d1, port1) )
360 {
361 rc = -EINVAL;
362 goto out;
363 }
365 chn1 = evtchn_from_port(d1, port1);
367 /* Guest cannot close a Xen-attached event channel. */
368 if ( unlikely(chn1->consumer_is_xen) )
369 {
370 rc = -EINVAL;
371 goto out;
372 }
374 switch ( chn1->state )
375 {
376 case ECS_FREE:
377 case ECS_RESERVED:
378 rc = -EINVAL;
379 goto out;
381 case ECS_UNBOUND:
382 break;
384 case ECS_PIRQ:
385 pirq_guest_unbind(d1, chn1->u.pirq);
386 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
387 break;
389 case ECS_VIRQ:
390 for_each_vcpu ( d1, v )
391 {
392 if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
393 continue;
394 v->virq_to_evtchn[chn1->u.virq] = 0;
395 spin_barrier_irq(&v->virq_lock);
396 }
397 break;
399 case ECS_IPI:
400 break;
402 case ECS_INTERDOMAIN:
403 if ( d2 == NULL )
404 {
405 d2 = chn1->u.interdomain.remote_dom;
407 /* If we unlock d1 then we could lose d2. Must get a reference. */
408 if ( unlikely(!get_domain(d2)) )
409 BUG();
411 if ( d1 < d2 )
412 {
413 spin_lock(&d2->event_lock);
414 }
415 else if ( d1 != d2 )
416 {
417 spin_unlock(&d1->event_lock);
418 spin_lock(&d2->event_lock);
419 goto again;
420 }
421 }
422 else if ( d2 != chn1->u.interdomain.remote_dom )
423 {
424 /*
425 * We can only get here if the port was closed and re-bound after
426 * unlocking d1 but before locking d2 above. We could retry but
427 * it is easier to return the same error as if we had seen the
428 * port in ECS_CLOSED. It must have passed through that state for
429 * us to end up here, so it's a valid error to return.
430 */
431 rc = -EINVAL;
432 goto out;
433 }
435 port2 = chn1->u.interdomain.remote_port;
436 BUG_ON(!port_is_valid(d2, port2));
438 chn2 = evtchn_from_port(d2, port2);
439 BUG_ON(chn2->state != ECS_INTERDOMAIN);
440 BUG_ON(chn2->u.interdomain.remote_dom != d1);
442 chn2->state = ECS_UNBOUND;
443 chn2->u.unbound.remote_domid = d1->domain_id;
444 break;
446 default:
447 BUG();
448 }
450 /* Clear pending event to avoid unexpected behavior on re-bind. */
451 clear_bit(port1, &shared_info(d1, evtchn_pending));
453 /* Reset binding to vcpu0 when the channel is freed. */
454 chn1->state = ECS_FREE;
455 chn1->notify_vcpu_id = 0;
457 xsm_evtchn_close_post(chn1);
459 out:
460 if ( d2 != NULL )
461 {
462 if ( d1 != d2 )
463 spin_unlock(&d2->event_lock);
464 put_domain(d2);
465 }
467 spin_unlock(&d1->event_lock);
469 return rc;
470 }
473 static long evtchn_close(evtchn_close_t *close)
474 {
475 return __evtchn_close(current->domain, close->port);
476 }
478 int evtchn_send(struct domain *d, unsigned int lport)
479 {
480 struct evtchn *lchn, *rchn;
481 struct domain *ld = d, *rd;
482 struct vcpu *rvcpu;
483 int rport, ret = 0;
485 spin_lock(&ld->event_lock);
487 if ( unlikely(!port_is_valid(ld, lport)) )
488 {
489 spin_unlock(&ld->event_lock);
490 return -EINVAL;
491 }
493 lchn = evtchn_from_port(ld, lport);
495 /* Guest cannot send via a Xen-attached event channel. */
496 if ( unlikely(lchn->consumer_is_xen) )
497 {
498 spin_unlock(&ld->event_lock);
499 return -EINVAL;
500 }
502 ret = xsm_evtchn_send(ld, lchn);
503 if ( ret )
504 goto out;
506 switch ( lchn->state )
507 {
508 case ECS_INTERDOMAIN:
509 rd = lchn->u.interdomain.remote_dom;
510 rport = lchn->u.interdomain.remote_port;
511 rchn = evtchn_from_port(rd, rport);
512 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
513 if ( rchn->consumer_is_xen )
514 {
515 /* Xen consumers need notification only if they are blocked. */
516 if ( test_and_clear_bit(_VPF_blocked_in_xen,
517 &rvcpu->pause_flags) )
518 vcpu_wake(rvcpu);
519 }
520 else
521 {
522 evtchn_set_pending(rvcpu, rport);
523 }
524 break;
525 case ECS_IPI:
526 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
527 break;
528 case ECS_UNBOUND:
529 /* silently drop the notification */
530 break;
531 default:
532 ret = -EINVAL;
533 }
535 out:
536 spin_unlock(&ld->event_lock);
538 return ret;
539 }
541 static int evtchn_set_pending(struct vcpu *v, int port)
542 {
543 struct domain *d = v->domain;
544 int vcpuid;
546 /*
547 * The following bit operations must happen in strict order.
548 * NB. On x86, the atomic bit operations also act as memory barriers.
549 * There is therefore sufficiently strict ordering for this architecture --
550 * others may require explicit memory barriers.
551 */
553 if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
554 return 1;
556 if ( !test_bit (port, &shared_info(d, evtchn_mask)) &&
557 !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
558 &vcpu_info(v, evtchn_pending_sel)) )
559 {
560 vcpu_mark_events_pending(v);
561 }
563 /* Check if some VCPU might be polling for this event. */
564 if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
565 return 0;
567 /* Wake any interested (or potentially interested) pollers. */
568 for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
569 vcpuid < d->max_vcpus;
570 vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
571 {
572 v = d->vcpu[vcpuid];
573 if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
574 test_and_clear_bit(vcpuid, d->poll_mask) )
575 {
576 v->poll_evtchn = 0;
577 vcpu_unblock(v);
578 }
579 }
581 return 0;
582 }
584 int guest_enabled_event(struct vcpu *v, int virq)
585 {
586 return ((v != NULL) && (v->virq_to_evtchn[virq] != 0));
587 }
589 void send_guest_vcpu_virq(struct vcpu *v, int virq)
590 {
591 unsigned long flags;
592 int port;
594 ASSERT(!virq_is_global(virq));
596 spin_lock_irqsave(&v->virq_lock, flags);
598 port = v->virq_to_evtchn[virq];
599 if ( unlikely(port == 0) )
600 goto out;
602 evtchn_set_pending(v, port);
604 out:
605 spin_unlock_irqrestore(&v->virq_lock, flags);
606 }
608 void send_guest_global_virq(struct domain *d, int virq)
609 {
610 unsigned long flags;
611 int port;
612 struct vcpu *v;
613 struct evtchn *chn;
615 ASSERT(virq_is_global(virq));
617 if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) )
618 return;
620 v = d->vcpu[0];
621 if ( unlikely(v == NULL) )
622 return;
624 spin_lock_irqsave(&v->virq_lock, flags);
626 port = v->virq_to_evtchn[virq];
627 if ( unlikely(port == 0) )
628 goto out;
630 chn = evtchn_from_port(d, port);
631 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
633 out:
634 spin_unlock_irqrestore(&v->virq_lock, flags);
635 }
637 int send_guest_pirq(struct domain *d, int pirq)
638 {
639 int port = d->pirq_to_evtchn[pirq];
640 struct evtchn *chn;
642 /*
643 * It should not be possible to race with __evtchn_close():
644 * The caller of this function must synchronise with pirq_guest_unbind().
645 */
646 ASSERT(port != 0);
648 chn = evtchn_from_port(d, port);
649 return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
650 }
653 static long evtchn_status(evtchn_status_t *status)
654 {
655 struct domain *d;
656 domid_t dom = status->dom;
657 int port = status->port;
658 struct evtchn *chn;
659 long rc = 0;
661 rc = rcu_lock_target_domain_by_id(dom, &d);
662 if ( rc )
663 return rc;
665 spin_lock(&d->event_lock);
667 if ( !port_is_valid(d, port) )
668 {
669 rc = -EINVAL;
670 goto out;
671 }
673 chn = evtchn_from_port(d, port);
675 rc = xsm_evtchn_status(d, chn);
676 if ( rc )
677 goto out;
679 switch ( chn->state )
680 {
681 case ECS_FREE:
682 case ECS_RESERVED:
683 status->status = EVTCHNSTAT_closed;
684 break;
685 case ECS_UNBOUND:
686 status->status = EVTCHNSTAT_unbound;
687 status->u.unbound.dom = chn->u.unbound.remote_domid;
688 break;
689 case ECS_INTERDOMAIN:
690 status->status = EVTCHNSTAT_interdomain;
691 status->u.interdomain.dom =
692 chn->u.interdomain.remote_dom->domain_id;
693 status->u.interdomain.port = chn->u.interdomain.remote_port;
694 break;
695 case ECS_PIRQ:
696 status->status = EVTCHNSTAT_pirq;
697 status->u.pirq = chn->u.pirq;
698 break;
699 case ECS_VIRQ:
700 status->status = EVTCHNSTAT_virq;
701 status->u.virq = chn->u.virq;
702 break;
703 case ECS_IPI:
704 status->status = EVTCHNSTAT_ipi;
705 break;
706 default:
707 BUG();
708 }
710 status->vcpu = chn->notify_vcpu_id;
712 out:
713 spin_unlock(&d->event_lock);
714 rcu_unlock_domain(d);
716 return rc;
717 }
720 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
721 {
722 struct domain *d = current->domain;
723 struct evtchn *chn;
724 long rc = 0;
726 if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
727 return -ENOENT;
729 if ( unlikely(!d->vcpu[vcpu_id]->vcpu_info) )
730 return -EAGAIN;
732 spin_lock(&d->event_lock);
734 if ( !port_is_valid(d, port) )
735 {
736 rc = -EINVAL;
737 goto out;
738 }
740 chn = evtchn_from_port(d, port);
742 /* Guest cannot re-bind a Xen-attached event channel. */
743 if ( unlikely(chn->consumer_is_xen) )
744 {
745 rc = -EINVAL;
746 goto out;
747 }
749 switch ( chn->state )
750 {
751 case ECS_VIRQ:
752 if ( virq_is_global(chn->u.virq) )
753 chn->notify_vcpu_id = vcpu_id;
754 else
755 rc = -EINVAL;
756 break;
757 case ECS_UNBOUND:
758 case ECS_INTERDOMAIN:
759 case ECS_PIRQ:
760 chn->notify_vcpu_id = vcpu_id;
761 break;
762 default:
763 rc = -EINVAL;
764 break;
765 }
767 out:
768 spin_unlock(&d->event_lock);
770 return rc;
771 }
774 int evtchn_unmask(unsigned int port)
775 {
776 struct domain *d = current->domain;
777 struct vcpu *v;
779 spin_lock(&d->event_lock);
781 if ( unlikely(!port_is_valid(d, port)) )
782 {
783 spin_unlock(&d->event_lock);
784 return -EINVAL;
785 }
787 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
789 /*
790 * These operations must happen in strict order. Based on
791 * include/xen/event.h:evtchn_set_pending().
792 */
793 if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
794 test_bit (port, &shared_info(d, evtchn_pending)) &&
795 !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
796 &vcpu_info(v, evtchn_pending_sel)) )
797 {
798 vcpu_mark_events_pending(v);
799 }
801 spin_unlock(&d->event_lock);
803 return 0;
804 }
807 static long evtchn_reset(evtchn_reset_t *r)
808 {
809 domid_t dom = r->dom;
810 struct domain *d;
811 int i, rc;
813 rc = rcu_lock_target_domain_by_id(dom, &d);
814 if ( rc )
815 return rc;
817 rc = xsm_evtchn_reset(current->domain, d);
818 if ( rc )
819 goto out;
821 for ( i = 0; port_is_valid(d, i); i++ )
822 (void)__evtchn_close(d, i);
824 rc = 0;
826 out:
827 rcu_unlock_domain(d);
829 return rc;
830 }
833 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
834 {
835 long rc;
837 switch ( cmd )
838 {
839 case EVTCHNOP_alloc_unbound: {
840 struct evtchn_alloc_unbound alloc_unbound;
841 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
842 return -EFAULT;
843 rc = evtchn_alloc_unbound(&alloc_unbound);
844 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
845 rc = -EFAULT; /* Cleaning up here would be a mess! */
846 break;
847 }
849 case EVTCHNOP_bind_interdomain: {
850 struct evtchn_bind_interdomain bind_interdomain;
851 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
852 return -EFAULT;
853 rc = evtchn_bind_interdomain(&bind_interdomain);
854 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
855 rc = -EFAULT; /* Cleaning up here would be a mess! */
856 break;
857 }
859 case EVTCHNOP_bind_virq: {
860 struct evtchn_bind_virq bind_virq;
861 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
862 return -EFAULT;
863 rc = evtchn_bind_virq(&bind_virq);
864 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
865 rc = -EFAULT; /* Cleaning up here would be a mess! */
866 break;
867 }
869 case EVTCHNOP_bind_ipi: {
870 struct evtchn_bind_ipi bind_ipi;
871 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
872 return -EFAULT;
873 rc = evtchn_bind_ipi(&bind_ipi);
874 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
875 rc = -EFAULT; /* Cleaning up here would be a mess! */
876 break;
877 }
879 case EVTCHNOP_bind_pirq: {
880 struct evtchn_bind_pirq bind_pirq;
881 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
882 return -EFAULT;
883 rc = evtchn_bind_pirq(&bind_pirq);
884 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
885 rc = -EFAULT; /* Cleaning up here would be a mess! */
886 break;
887 }
889 case EVTCHNOP_close: {
890 struct evtchn_close close;
891 if ( copy_from_guest(&close, arg, 1) != 0 )
892 return -EFAULT;
893 rc = evtchn_close(&close);
894 break;
895 }
897 case EVTCHNOP_send: {
898 struct evtchn_send send;
899 if ( copy_from_guest(&send, arg, 1) != 0 )
900 return -EFAULT;
901 rc = evtchn_send(current->domain, send.port);
902 break;
903 }
905 case EVTCHNOP_status: {
906 struct evtchn_status status;
907 if ( copy_from_guest(&status, arg, 1) != 0 )
908 return -EFAULT;
909 rc = evtchn_status(&status);
910 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
911 rc = -EFAULT;
912 break;
913 }
915 case EVTCHNOP_bind_vcpu: {
916 struct evtchn_bind_vcpu bind_vcpu;
917 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
918 return -EFAULT;
919 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
920 break;
921 }
923 case EVTCHNOP_unmask: {
924 struct evtchn_unmask unmask;
925 if ( copy_from_guest(&unmask, arg, 1) != 0 )
926 return -EFAULT;
927 rc = evtchn_unmask(unmask.port);
928 break;
929 }
931 case EVTCHNOP_reset: {
932 struct evtchn_reset reset;
933 if ( copy_from_guest(&reset, arg, 1) != 0 )
934 return -EFAULT;
935 rc = evtchn_reset(&reset);
936 break;
937 }
939 default:
940 rc = -ENOSYS;
941 break;
942 }
944 return rc;
945 }
948 int alloc_unbound_xen_event_channel(
949 struct vcpu *local_vcpu, domid_t remote_domid)
950 {
951 struct evtchn *chn;
952 struct domain *d = local_vcpu->domain;
953 int port;
955 if ( unlikely(!local_vcpu->vcpu_info) )
956 return -EAGAIN;
958 spin_lock(&d->event_lock);
960 if ( (port = get_free_port(d)) < 0 )
961 goto out;
962 chn = evtchn_from_port(d, port);
964 chn->state = ECS_UNBOUND;
965 chn->consumer_is_xen = 1;
966 chn->notify_vcpu_id = local_vcpu->vcpu_id;
967 chn->u.unbound.remote_domid = remote_domid;
969 out:
970 spin_unlock(&d->event_lock);
972 return port;
973 }
976 void free_xen_event_channel(
977 struct vcpu *local_vcpu, int port)
978 {
979 struct evtchn *chn;
980 struct domain *d = local_vcpu->domain;
982 spin_lock(&d->event_lock);
984 if ( unlikely(d->is_dying) )
985 {
986 spin_unlock(&d->event_lock);
987 return;
988 }
990 BUG_ON(!port_is_valid(d, port));
991 chn = evtchn_from_port(d, port);
992 BUG_ON(!chn->consumer_is_xen);
993 chn->consumer_is_xen = 0;
995 spin_unlock(&d->event_lock);
997 (void)__evtchn_close(d, port);
998 }
1001 void notify_via_xen_event_channel(int lport)
1003 struct evtchn *lchn, *rchn;
1004 struct domain *ld = current->domain, *rd;
1005 int rport;
1007 spin_lock(&ld->event_lock);
1009 ASSERT(port_is_valid(ld, lport));
1010 lchn = evtchn_from_port(ld, lport);
1011 ASSERT(lchn->consumer_is_xen);
1013 if ( likely(lchn->state == ECS_INTERDOMAIN) )
1015 rd = lchn->u.interdomain.remote_dom;
1016 rport = lchn->u.interdomain.remote_port;
1017 rchn = evtchn_from_port(rd, rport);
1018 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
1021 spin_unlock(&ld->event_lock);
1025 int evtchn_init(struct domain *d)
1027 spin_lock_init(&d->event_lock);
1028 if ( get_free_port(d) != 0 )
1029 return -EINVAL;
1030 evtchn_from_port(d, 0)->state = ECS_RESERVED;
1032 #if MAX_VIRT_CPUS > BITS_PER_LONG
1033 d->poll_mask = xmalloc_array(unsigned long, BITS_TO_LONGS(MAX_VIRT_CPUS));
1034 if ( !d->poll_mask )
1035 return -ENOMEM;
1036 bitmap_zero(d->poll_mask, MAX_VIRT_CPUS);
1037 #endif
1039 return 0;
1043 void evtchn_destroy(struct domain *d)
1045 int i;
1047 /* After this barrier no new event-channel allocations can occur. */
1048 BUG_ON(!d->is_dying);
1049 spin_barrier(&d->event_lock);
1051 /* Close all existing event channels. */
1052 for ( i = 0; port_is_valid(d, i); i++ )
1054 evtchn_from_port(d, i)->consumer_is_xen = 0;
1055 (void)__evtchn_close(d, i);
1058 /* Free all event-channel buckets. */
1059 spin_lock(&d->event_lock);
1060 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
1062 xsm_free_security_evtchn(d->evtchn[i]);
1063 xfree(d->evtchn[i]);
1064 d->evtchn[i] = NULL;
1066 spin_unlock(&d->event_lock);
1068 #if MAX_VIRT_CPUS > BITS_PER_LONG
1069 xfree(d->poll_mask);
1070 d->poll_mask = NULL;
1071 #endif
1074 static void domain_dump_evtchn_info(struct domain *d)
1076 unsigned int port;
1078 printk("Domain %d polling vCPUs: %08lx\n", d->domain_id, d->poll_mask[0]);
1080 if ( !spin_trylock(&d->event_lock) )
1081 return;
1083 printk("Event channel information for domain %d:\n",
1084 d->domain_id);
1086 for ( port = 1; port < MAX_EVTCHNS(d); ++port )
1088 const struct evtchn *chn;
1090 if ( !port_is_valid(d, port) )
1091 continue;
1092 chn = evtchn_from_port(d, port);
1093 if ( chn->state == ECS_FREE )
1094 continue;
1096 printk(" %4u[%d/%d]: s=%d n=%d",
1097 port,
1098 test_bit(port, &shared_info(d, evtchn_pending)),
1099 test_bit(port, &shared_info(d, evtchn_mask)),
1100 chn->state, chn->notify_vcpu_id);
1101 switch ( chn->state )
1103 case ECS_UNBOUND:
1104 printk(" d=%d", chn->u.unbound.remote_domid);
1105 break;
1106 case ECS_INTERDOMAIN:
1107 printk(" d=%d p=%d",
1108 chn->u.interdomain.remote_dom->domain_id,
1109 chn->u.interdomain.remote_port);
1110 break;
1111 case ECS_PIRQ:
1112 printk(" p=%d", chn->u.pirq);
1113 break;
1114 case ECS_VIRQ:
1115 printk(" v=%d", chn->u.virq);
1116 break;
1118 printk(" x=%d\n", chn->consumer_is_xen);
1121 spin_unlock(&d->event_lock);
1124 static void dump_evtchn_info(unsigned char key)
1126 struct domain *d;
1128 printk("'%c' pressed -> dumping event-channel info\n", key);
1130 rcu_read_lock(&domlist_read_lock);
1132 for_each_domain ( d )
1133 domain_dump_evtchn_info(d);
1135 rcu_read_unlock(&domlist_read_lock);
1138 static int __init dump_evtchn_info_key_init(void)
1140 register_keyhandler('e', dump_evtchn_info, "dump evtchn info");
1141 return 0;
1143 __initcall(dump_evtchn_info_key_init);
1145 /*
1146 * Local variables:
1147 * mode: C
1148 * c-set-style: "BSD"
1149 * c-basic-offset: 4
1150 * tab-width: 4
1151 * indent-tabs-mode: nil
1152 * End:
1153 */