ia64/xen-unstable

view xen/common/event_channel.c @ 10570:8dc4af3f192c

[IA64] Implement and use DOM0_DOMAIN_STEUP.

DOM0_GETMEMLIST now reads ptes and use gpfn.
Domain builder reworked: calls DOMAIN_SETUP, setup start_info page.
SAL data are now in domain memory.
is_vti field added in domain.arch.
Many cleanups (indentation, static, warnings).

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Wed Jul 05 09:28:32 2006 -0600 (2006-07-05)
parents 63967ff8d459
children 462d6e4cb29a
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/guest_access.h>
27 #include <asm/current.h>
29 #include <public/xen.h>
30 #include <public/event_channel.h>
31 #include <acm/acm_hooks.h>
33 #define bucket_from_port(d,p) \
34 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
35 #define port_is_valid(d,p) \
36 (((p) >= 0) && ((p) < MAX_EVTCHNS) && \
37 (bucket_from_port(d,p) != NULL))
38 #define evtchn_from_port(d,p) \
39 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
41 #define ERROR_EXIT(_errno) \
42 do { \
43 DPRINTK("EVTCHNOP failure: domain %d, error %d, line %d\n", \
44 current->domain->domain_id, (_errno), __LINE__); \
45 rc = (_errno); \
46 goto out; \
47 } while ( 0 )
50 static int virq_is_global(int virq)
51 {
52 int rc;
54 ASSERT((virq >= 0) && (virq < NR_VIRQS));
56 switch ( virq )
57 {
58 case VIRQ_TIMER:
59 case VIRQ_DEBUG:
60 case VIRQ_XENOPROF:
61 rc = 0;
62 break;
63 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
64 rc = arch_virq_is_global(virq);
65 break;
66 default:
67 rc = 1;
68 break;
69 }
71 return rc;
72 }
75 static int get_free_port(struct domain *d)
76 {
77 struct evtchn *chn;
78 int port;
80 for ( port = 0; port_is_valid(d, port); port++ )
81 if ( evtchn_from_port(d, port)->state == ECS_FREE )
82 return port;
84 if ( port == MAX_EVTCHNS )
85 return -ENOSPC;
87 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
88 if ( unlikely(chn == NULL) )
89 return -ENOMEM;
90 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
91 bucket_from_port(d, port) = chn;
93 return port;
94 }
97 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
98 {
99 struct evtchn *chn;
100 struct domain *d;
101 int port;
102 domid_t dom = alloc->dom;
103 long rc;
105 if ( (rc = acm_pre_eventchannel_unbound(dom, alloc->remote_dom)) != 0 )
106 return rc;
108 if ( dom == DOMID_SELF )
109 dom = current->domain->domain_id;
110 else if ( !IS_PRIV(current->domain) )
111 return -EPERM;
113 if ( (d = find_domain_by_id(dom)) == NULL )
114 return -ESRCH;
116 spin_lock(&d->evtchn_lock);
118 if ( (port = get_free_port(d)) < 0 )
119 ERROR_EXIT(port);
120 chn = evtchn_from_port(d, port);
122 chn->state = ECS_UNBOUND;
123 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
124 chn->u.unbound.remote_domid = current->domain->domain_id;
126 alloc->port = port;
128 out:
129 spin_unlock(&d->evtchn_lock);
131 put_domain(d);
133 return rc;
134 }
137 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
138 {
139 struct evtchn *lchn, *rchn;
140 struct domain *ld = current->domain, *rd;
141 int lport, rport = bind->remote_port;
142 domid_t rdom = bind->remote_dom;
143 long rc;
145 if ( (rc = acm_pre_eventchannel_interdomain(rdom)) != 0 )
146 return rc;
148 if ( rdom == DOMID_SELF )
149 rdom = current->domain->domain_id;
151 if ( (rd = find_domain_by_id(rdom)) == NULL )
152 return -ESRCH;
154 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
155 if ( ld < rd )
156 {
157 spin_lock(&ld->evtchn_lock);
158 spin_lock(&rd->evtchn_lock);
159 }
160 else
161 {
162 if ( ld != rd )
163 spin_lock(&rd->evtchn_lock);
164 spin_lock(&ld->evtchn_lock);
165 }
167 if ( (lport = get_free_port(ld)) < 0 )
168 ERROR_EXIT(lport);
169 lchn = evtchn_from_port(ld, lport);
171 if ( !port_is_valid(rd, rport) )
172 ERROR_EXIT(-EINVAL);
173 rchn = evtchn_from_port(rd, rport);
174 if ( (rchn->state != ECS_UNBOUND) ||
175 (rchn->u.unbound.remote_domid != ld->domain_id) )
176 ERROR_EXIT(-EINVAL);
178 lchn->u.interdomain.remote_dom = rd;
179 lchn->u.interdomain.remote_port = (u16)rport;
180 lchn->state = ECS_INTERDOMAIN;
182 rchn->u.interdomain.remote_dom = ld;
183 rchn->u.interdomain.remote_port = (u16)lport;
184 rchn->state = ECS_INTERDOMAIN;
186 /*
187 * We may have lost notifications on the remote unbound port. Fix that up
188 * here by conservatively always setting a notification on the local port.
189 */
190 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
192 bind->local_port = lport;
194 out:
195 spin_unlock(&ld->evtchn_lock);
196 if ( ld != rd )
197 spin_unlock(&rd->evtchn_lock);
199 put_domain(rd);
201 return rc;
202 }
205 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
206 {
207 struct evtchn *chn;
208 struct vcpu *v;
209 struct domain *d = current->domain;
210 int port, virq = bind->virq, vcpu = bind->vcpu;
211 long rc = 0;
213 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
214 return -EINVAL;
216 if ( virq_is_global(virq) && (vcpu != 0) )
217 return -EINVAL;
219 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
220 ((v = d->vcpu[vcpu]) == NULL) )
221 return -ENOENT;
223 spin_lock(&d->evtchn_lock);
225 if ( v->virq_to_evtchn[virq] != 0 )
226 ERROR_EXIT(-EEXIST);
228 if ( (port = get_free_port(d)) < 0 )
229 ERROR_EXIT(port);
231 chn = evtchn_from_port(d, port);
232 chn->state = ECS_VIRQ;
233 chn->notify_vcpu_id = vcpu;
234 chn->u.virq = virq;
236 v->virq_to_evtchn[virq] = bind->port = port;
238 out:
239 spin_unlock(&d->evtchn_lock);
241 return rc;
242 }
245 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
246 {
247 struct evtchn *chn;
248 struct domain *d = current->domain;
249 int port, vcpu = bind->vcpu;
250 long rc = 0;
252 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
253 (d->vcpu[vcpu] == NULL) )
254 return -ENOENT;
256 spin_lock(&d->evtchn_lock);
258 if ( (port = get_free_port(d)) < 0 )
259 ERROR_EXIT(port);
261 chn = evtchn_from_port(d, port);
262 chn->state = ECS_IPI;
263 chn->notify_vcpu_id = vcpu;
265 bind->port = port;
267 out:
268 spin_unlock(&d->evtchn_lock);
270 return rc;
271 }
274 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
275 {
276 struct evtchn *chn;
277 struct domain *d = current->domain;
278 int port, pirq = bind->pirq;
279 long rc;
281 if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) )
282 return -EINVAL;
284 if ( !irq_access_permitted(d, pirq) )
285 return -EPERM;
287 spin_lock(&d->evtchn_lock);
289 if ( d->pirq_to_evtchn[pirq] != 0 )
290 ERROR_EXIT(-EEXIST);
292 if ( (port = get_free_port(d)) < 0 )
293 ERROR_EXIT(port);
295 chn = evtchn_from_port(d, port);
297 d->pirq_to_evtchn[pirq] = port;
298 rc = pirq_guest_bind(d->vcpu[0], pirq,
299 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
300 if ( rc != 0 )
301 {
302 d->pirq_to_evtchn[pirq] = 0;
303 goto out;
304 }
306 chn->state = ECS_PIRQ;
307 chn->u.pirq = pirq;
309 bind->port = port;
311 out:
312 spin_unlock(&d->evtchn_lock);
314 return rc;
315 }
318 static long __evtchn_close(struct domain *d1, int port1)
319 {
320 struct domain *d2 = NULL;
321 struct vcpu *v;
322 struct evtchn *chn1, *chn2;
323 int port2;
324 long rc = 0;
326 again:
327 spin_lock(&d1->evtchn_lock);
329 if ( !port_is_valid(d1, port1) )
330 {
331 rc = -EINVAL;
332 goto out;
333 }
335 chn1 = evtchn_from_port(d1, port1);
336 switch ( chn1->state )
337 {
338 case ECS_FREE:
339 case ECS_RESERVED:
340 rc = -EINVAL;
341 goto out;
343 case ECS_UNBOUND:
344 break;
346 case ECS_PIRQ:
347 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
348 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
349 break;
351 case ECS_VIRQ:
352 for_each_vcpu ( d1, v )
353 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
354 v->virq_to_evtchn[chn1->u.virq] = 0;
355 break;
357 case ECS_IPI:
358 break;
360 case ECS_INTERDOMAIN:
361 if ( d2 == NULL )
362 {
363 d2 = chn1->u.interdomain.remote_dom;
365 /* If we unlock d1 then we could lose d2. Must get a reference. */
366 if ( unlikely(!get_domain(d2)) )
367 {
368 /*
369 * Failed to obtain a reference. No matter: d2 must be dying
370 * and so will close this event channel for us.
371 */
372 d2 = NULL;
373 goto out;
374 }
376 if ( d1 < d2 )
377 {
378 spin_lock(&d2->evtchn_lock);
379 }
380 else if ( d1 != d2 )
381 {
382 spin_unlock(&d1->evtchn_lock);
383 spin_lock(&d2->evtchn_lock);
384 goto again;
385 }
386 }
387 else if ( d2 != chn1->u.interdomain.remote_dom )
388 {
389 /*
390 * We can only get here if the port was closed and re-bound after
391 * unlocking d1 but before locking d2 above. We could retry but
392 * it is easier to return the same error as if we had seen the
393 * port in ECS_CLOSED. It must have passed through that state for
394 * us to end up here, so it's a valid error to return.
395 */
396 BUG_ON(d1 != current->domain);
397 rc = -EINVAL;
398 goto out;
399 }
401 port2 = chn1->u.interdomain.remote_port;
402 BUG_ON(!port_is_valid(d2, port2));
404 chn2 = evtchn_from_port(d2, port2);
405 BUG_ON(chn2->state != ECS_INTERDOMAIN);
406 BUG_ON(chn2->u.interdomain.remote_dom != d1);
408 chn2->state = ECS_UNBOUND;
409 chn2->u.unbound.remote_domid = d1->domain_id;
410 break;
412 default:
413 BUG();
414 }
416 /* Reset binding to vcpu0 when the channel is freed. */
417 chn1->state = ECS_FREE;
418 chn1->notify_vcpu_id = 0;
420 out:
421 if ( d2 != NULL )
422 {
423 if ( d1 != d2 )
424 spin_unlock(&d2->evtchn_lock);
425 put_domain(d2);
426 }
428 spin_unlock(&d1->evtchn_lock);
430 return rc;
431 }
434 static long evtchn_close(evtchn_close_t *close)
435 {
436 return __evtchn_close(current->domain, close->port);
437 }
440 long evtchn_send(unsigned int lport)
441 {
442 struct evtchn *lchn, *rchn;
443 struct domain *ld = current->domain, *rd;
444 int rport, ret = 0;
446 spin_lock(&ld->evtchn_lock);
448 if ( unlikely(!port_is_valid(ld, lport)) )
449 {
450 spin_unlock(&ld->evtchn_lock);
451 return -EINVAL;
452 }
454 lchn = evtchn_from_port(ld, lport);
455 switch ( lchn->state )
456 {
457 case ECS_INTERDOMAIN:
458 rd = lchn->u.interdomain.remote_dom;
459 rport = lchn->u.interdomain.remote_port;
460 rchn = evtchn_from_port(rd, rport);
461 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
462 break;
463 case ECS_IPI:
464 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
465 break;
466 case ECS_UNBOUND:
467 /* silently drop the notification */
468 break;
469 default:
470 ret = -EINVAL;
471 }
473 spin_unlock(&ld->evtchn_lock);
475 return ret;
476 }
479 void evtchn_set_pending(struct vcpu *v, int port)
480 {
481 struct domain *d = v->domain;
482 shared_info_t *s = d->shared_info;
484 /*
485 * The following bit operations must happen in strict order.
486 * NB. On x86, the atomic bit operations also act as memory barriers.
487 * There is therefore sufficiently strict ordering for this architecture --
488 * others may require explicit memory barriers.
489 */
491 if ( test_and_set_bit(port, s->evtchn_pending) )
492 return;
494 if ( !test_bit (port, s->evtchn_mask) &&
495 !test_and_set_bit(port / BITS_PER_LONG,
496 &v->vcpu_info->evtchn_pending_sel) )
497 {
498 vcpu_mark_events_pending(v);
499 }
501 /* Check if some VCPU might be polling for this event. */
502 if ( unlikely(test_bit(_DOMF_polling, &d->domain_flags)) &&
503 likely(test_and_clear_bit(_DOMF_polling, &d->domain_flags)) )
504 {
505 for_each_vcpu ( d, v )
506 if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) )
507 vcpu_unblock(v);
508 }
509 }
512 void send_guest_vcpu_virq(struct vcpu *v, int virq)
513 {
514 int port;
516 ASSERT(!virq_is_global(virq));
518 port = v->virq_to_evtchn[virq];
519 if ( unlikely(port == 0) )
520 return;
522 evtchn_set_pending(v, port);
523 }
525 void send_guest_global_virq(struct domain *d, int virq)
526 {
527 int port;
528 struct evtchn *chn;
530 ASSERT(virq_is_global(virq));
532 port = d->vcpu[0]->virq_to_evtchn[virq];
533 if ( unlikely(port == 0) )
534 return;
536 chn = evtchn_from_port(d, port);
537 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
538 }
541 void send_guest_pirq(struct domain *d, int pirq)
542 {
543 int port = d->pirq_to_evtchn[pirq];
544 struct evtchn *chn;
546 ASSERT(port != 0);
548 chn = evtchn_from_port(d, port);
549 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
550 }
553 static long evtchn_status(evtchn_status_t *status)
554 {
555 struct domain *d;
556 domid_t dom = status->dom;
557 int port = status->port;
558 struct evtchn *chn;
559 long rc = 0;
561 if ( dom == DOMID_SELF )
562 dom = current->domain->domain_id;
563 else if ( !IS_PRIV(current->domain) )
564 return -EPERM;
566 if ( (d = find_domain_by_id(dom)) == NULL )
567 return -ESRCH;
569 spin_lock(&d->evtchn_lock);
571 if ( !port_is_valid(d, port) )
572 {
573 rc = -EINVAL;
574 goto out;
575 }
577 chn = evtchn_from_port(d, port);
578 switch ( chn->state )
579 {
580 case ECS_FREE:
581 case ECS_RESERVED:
582 status->status = EVTCHNSTAT_closed;
583 break;
584 case ECS_UNBOUND:
585 status->status = EVTCHNSTAT_unbound;
586 status->u.unbound.dom = chn->u.unbound.remote_domid;
587 break;
588 case ECS_INTERDOMAIN:
589 status->status = EVTCHNSTAT_interdomain;
590 status->u.interdomain.dom =
591 chn->u.interdomain.remote_dom->domain_id;
592 status->u.interdomain.port = chn->u.interdomain.remote_port;
593 break;
594 case ECS_PIRQ:
595 status->status = EVTCHNSTAT_pirq;
596 status->u.pirq = chn->u.pirq;
597 break;
598 case ECS_VIRQ:
599 status->status = EVTCHNSTAT_virq;
600 status->u.virq = chn->u.virq;
601 break;
602 case ECS_IPI:
603 status->status = EVTCHNSTAT_ipi;
604 break;
605 default:
606 BUG();
607 }
609 status->vcpu = chn->notify_vcpu_id;
611 out:
612 spin_unlock(&d->evtchn_lock);
613 put_domain(d);
614 return rc;
615 }
618 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
619 {
620 struct domain *d = current->domain;
621 struct evtchn *chn;
622 long rc = 0;
624 if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
625 return -ENOENT;
627 spin_lock(&d->evtchn_lock);
629 if ( !port_is_valid(d, port) )
630 {
631 rc = -EINVAL;
632 goto out;
633 }
635 chn = evtchn_from_port(d, port);
636 switch ( chn->state )
637 {
638 case ECS_VIRQ:
639 if ( virq_is_global(chn->u.virq) )
640 chn->notify_vcpu_id = vcpu_id;
641 else
642 rc = -EINVAL;
643 break;
644 case ECS_UNBOUND:
645 case ECS_INTERDOMAIN:
646 case ECS_PIRQ:
647 chn->notify_vcpu_id = vcpu_id;
648 break;
649 default:
650 rc = -EINVAL;
651 break;
652 }
654 out:
655 spin_unlock(&d->evtchn_lock);
656 return rc;
657 }
660 static long evtchn_unmask(evtchn_unmask_t *unmask)
661 {
662 struct domain *d = current->domain;
663 shared_info_t *s = d->shared_info;
664 int port = unmask->port;
665 struct vcpu *v;
667 spin_lock(&d->evtchn_lock);
669 if ( unlikely(!port_is_valid(d, port)) )
670 {
671 spin_unlock(&d->evtchn_lock);
672 return -EINVAL;
673 }
675 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
677 /*
678 * These operations must happen in strict order. Based on
679 * include/xen/event.h:evtchn_set_pending().
680 */
681 if ( test_and_clear_bit(port, s->evtchn_mask) &&
682 test_bit (port, s->evtchn_pending) &&
683 !test_and_set_bit (port / BITS_PER_LONG,
684 &v->vcpu_info->evtchn_pending_sel) )
685 {
686 vcpu_mark_events_pending(v);
687 }
689 spin_unlock(&d->evtchn_lock);
691 return 0;
692 }
695 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
696 {
697 long rc;
699 switch ( cmd )
700 {
701 case EVTCHNOP_alloc_unbound: {
702 struct evtchn_alloc_unbound alloc_unbound;
703 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
704 return -EFAULT;
705 rc = evtchn_alloc_unbound(&alloc_unbound);
706 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
707 rc = -EFAULT; /* Cleaning up here would be a mess! */
708 break;
709 }
711 case EVTCHNOP_bind_interdomain: {
712 struct evtchn_bind_interdomain bind_interdomain;
713 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
714 return -EFAULT;
715 rc = evtchn_bind_interdomain(&bind_interdomain);
716 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
717 rc = -EFAULT; /* Cleaning up here would be a mess! */
718 break;
719 }
721 case EVTCHNOP_bind_virq: {
722 struct evtchn_bind_virq bind_virq;
723 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
724 return -EFAULT;
725 rc = evtchn_bind_virq(&bind_virq);
726 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
727 rc = -EFAULT; /* Cleaning up here would be a mess! */
728 break;
729 }
731 case EVTCHNOP_bind_ipi: {
732 struct evtchn_bind_ipi bind_ipi;
733 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
734 return -EFAULT;
735 rc = evtchn_bind_ipi(&bind_ipi);
736 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
737 rc = -EFAULT; /* Cleaning up here would be a mess! */
738 break;
739 }
741 case EVTCHNOP_bind_pirq: {
742 struct evtchn_bind_pirq bind_pirq;
743 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
744 return -EFAULT;
745 rc = evtchn_bind_pirq(&bind_pirq);
746 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
747 rc = -EFAULT; /* Cleaning up here would be a mess! */
748 break;
749 }
751 case EVTCHNOP_close: {
752 struct evtchn_close close;
753 if ( copy_from_guest(&close, arg, 1) != 0 )
754 return -EFAULT;
755 rc = evtchn_close(&close);
756 break;
757 }
759 case EVTCHNOP_send: {
760 struct evtchn_send send;
761 if ( copy_from_guest(&send, arg, 1) != 0 )
762 return -EFAULT;
763 rc = evtchn_send(send.port);
764 break;
765 }
767 case EVTCHNOP_status: {
768 struct evtchn_status status;
769 if ( copy_from_guest(&status, arg, 1) != 0 )
770 return -EFAULT;
771 rc = evtchn_status(&status);
772 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
773 rc = -EFAULT;
774 break;
775 }
777 case EVTCHNOP_bind_vcpu: {
778 struct evtchn_bind_vcpu bind_vcpu;
779 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
780 return -EFAULT;
781 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
782 break;
783 }
785 case EVTCHNOP_unmask: {
786 struct evtchn_unmask unmask;
787 if ( copy_from_guest(&unmask, arg, 1) != 0 )
788 return -EFAULT;
789 rc = evtchn_unmask(&unmask);
790 break;
791 }
793 default:
794 rc = -ENOSYS;
795 break;
796 }
798 return rc;
799 }
802 int evtchn_init(struct domain *d)
803 {
804 spin_lock_init(&d->evtchn_lock);
805 if ( get_free_port(d) != 0 )
806 return -EINVAL;
807 evtchn_from_port(d, 0)->state = ECS_RESERVED;
808 return 0;
809 }
812 void evtchn_destroy(struct domain *d)
813 {
814 int i;
816 for ( i = 0; port_is_valid(d, i); i++ )
817 (void)__evtchn_close(d, i);
819 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
820 xfree(d->evtchn[i]);
821 }
823 /*
824 * Local variables:
825 * mode: C
826 * c-set-style: "BSD"
827 * c-basic-offset: 4
828 * tab-width: 4
829 * indent-tabs-mode: nil
830 * End:
831 */