ia64/xen-unstable

view xen/common/event_channel.c @ 10099:3d3e5a3008f6

Check `global' property of the arch specific virqs.
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu May 18 16:19:18 2006 +0100 (2006-05-18)
parents 42a8e3101c6c
children ddc25d4ebf60
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/guest_access.h>
27 #include <asm/current.h>
29 #include <public/xen.h>
30 #include <public/event_channel.h>
31 #include <acm/acm_hooks.h>
33 #define bucket_from_port(d,p) \
34 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
35 #define port_is_valid(d,p) \
36 (((p) >= 0) && ((p) < MAX_EVTCHNS) && \
37 (bucket_from_port(d,p) != NULL))
38 #define evtchn_from_port(d,p) \
39 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
41 #define ERROR_EXIT(_errno) \
42 do { \
43 DPRINTK("EVTCHNOP failure: domain %d, error %d, line %d\n", \
44 current->domain->domain_id, (_errno), __LINE__); \
45 rc = (_errno); \
46 goto out; \
47 } while ( 0 )
50 static int virq_is_global(int virq)
51 {
52 int rc;
54 ASSERT((virq >= 0) && (virq < NR_VIRQS));
56 switch ( virq )
57 {
58 case VIRQ_TIMER:
59 case VIRQ_DEBUG:
60 case VIRQ_XENOPROF:
61 rc = 0;
62 break;
63 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
64 rc = arch_virq_is_global(virq);
65 break;
66 default:
67 rc = 1;
68 break;
69 }
71 return rc;
72 }
75 static int get_free_port(struct domain *d)
76 {
77 struct evtchn *chn;
78 int port;
80 for ( port = 0; port_is_valid(d, port); port++ )
81 if ( evtchn_from_port(d, port)->state == ECS_FREE )
82 return port;
84 if ( port == MAX_EVTCHNS )
85 return -ENOSPC;
87 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
88 if ( unlikely(chn == NULL) )
89 return -ENOMEM;
90 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
91 bucket_from_port(d, port) = chn;
93 return port;
94 }
97 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
98 {
99 struct evtchn *chn;
100 struct domain *d;
101 int port;
102 domid_t dom = alloc->dom;
103 long rc;
105 if ( (rc = acm_pre_eventchannel_unbound(dom, alloc->remote_dom)) != 0 )
106 return rc;
108 if ( dom == DOMID_SELF )
109 dom = current->domain->domain_id;
110 else if ( !IS_PRIV(current->domain) )
111 return -EPERM;
113 if ( (d = find_domain_by_id(dom)) == NULL )
114 return -ESRCH;
116 spin_lock(&d->evtchn_lock);
118 if ( (port = get_free_port(d)) < 0 )
119 ERROR_EXIT(port);
120 chn = evtchn_from_port(d, port);
122 chn->state = ECS_UNBOUND;
123 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
124 chn->u.unbound.remote_domid = current->domain->domain_id;
126 alloc->port = port;
128 out:
129 spin_unlock(&d->evtchn_lock);
131 put_domain(d);
133 return rc;
134 }
137 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
138 {
139 struct evtchn *lchn, *rchn;
140 struct domain *ld = current->domain, *rd;
141 int lport, rport = bind->remote_port;
142 domid_t rdom = bind->remote_dom;
143 long rc;
145 if ( (rc = acm_pre_eventchannel_interdomain(rdom)) != 0 )
146 return rc;
148 if ( rdom == DOMID_SELF )
149 rdom = current->domain->domain_id;
151 if ( (rd = find_domain_by_id(rdom)) == NULL )
152 return -ESRCH;
154 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
155 if ( ld < rd )
156 {
157 spin_lock(&ld->evtchn_lock);
158 spin_lock(&rd->evtchn_lock);
159 }
160 else
161 {
162 if ( ld != rd )
163 spin_lock(&rd->evtchn_lock);
164 spin_lock(&ld->evtchn_lock);
165 }
167 if ( (lport = get_free_port(ld)) < 0 )
168 ERROR_EXIT(lport);
169 lchn = evtchn_from_port(ld, lport);
171 if ( !port_is_valid(rd, rport) )
172 ERROR_EXIT(-EINVAL);
173 rchn = evtchn_from_port(rd, rport);
174 if ( (rchn->state != ECS_UNBOUND) ||
175 (rchn->u.unbound.remote_domid != ld->domain_id) )
176 ERROR_EXIT(-EINVAL);
178 lchn->u.interdomain.remote_dom = rd;
179 lchn->u.interdomain.remote_port = (u16)rport;
180 lchn->state = ECS_INTERDOMAIN;
182 rchn->u.interdomain.remote_dom = ld;
183 rchn->u.interdomain.remote_port = (u16)lport;
184 rchn->state = ECS_INTERDOMAIN;
186 /*
187 * We may have lost notifications on the remote unbound port. Fix that up
188 * here by conservatively always setting a notification on the local port.
189 */
190 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
192 bind->local_port = lport;
194 out:
195 spin_unlock(&ld->evtchn_lock);
196 if ( ld != rd )
197 spin_unlock(&rd->evtchn_lock);
199 put_domain(rd);
201 return rc;
202 }
205 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
206 {
207 struct evtchn *chn;
208 struct vcpu *v;
209 struct domain *d = current->domain;
210 int port, virq = bind->virq, vcpu = bind->vcpu;
211 long rc = 0;
213 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
214 return -EINVAL;
216 if ( virq_is_global(virq) && (vcpu != 0) )
217 return -EINVAL;
219 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
220 ((v = d->vcpu[vcpu]) == NULL) )
221 return -ENOENT;
223 spin_lock(&d->evtchn_lock);
225 if ( v->virq_to_evtchn[virq] != 0 )
226 ERROR_EXIT(-EEXIST);
228 if ( (port = get_free_port(d)) < 0 )
229 ERROR_EXIT(port);
231 chn = evtchn_from_port(d, port);
232 chn->state = ECS_VIRQ;
233 chn->notify_vcpu_id = vcpu;
234 chn->u.virq = virq;
236 v->virq_to_evtchn[virq] = bind->port = port;
238 out:
239 spin_unlock(&d->evtchn_lock);
241 return rc;
242 }
245 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
246 {
247 struct evtchn *chn;
248 struct domain *d = current->domain;
249 int port, vcpu = bind->vcpu;
250 long rc = 0;
252 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
253 (d->vcpu[vcpu] == NULL) )
254 return -ENOENT;
256 spin_lock(&d->evtchn_lock);
258 if ( (port = get_free_port(d)) < 0 )
259 ERROR_EXIT(port);
261 chn = evtchn_from_port(d, port);
262 chn->state = ECS_IPI;
263 chn->notify_vcpu_id = vcpu;
265 bind->port = port;
267 out:
268 spin_unlock(&d->evtchn_lock);
270 return rc;
271 }
274 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
275 {
276 struct evtchn *chn;
277 struct domain *d = current->domain;
278 int port, pirq = bind->pirq;
279 long rc;
281 if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) )
282 return -EINVAL;
284 if ( !irq_access_permitted(d, pirq) )
285 return -EPERM;
287 spin_lock(&d->evtchn_lock);
289 if ( d->pirq_to_evtchn[pirq] != 0 )
290 ERROR_EXIT(-EEXIST);
292 if ( (port = get_free_port(d)) < 0 )
293 ERROR_EXIT(port);
295 chn = evtchn_from_port(d, port);
297 d->pirq_to_evtchn[pirq] = port;
298 rc = pirq_guest_bind(d->vcpu[0], pirq,
299 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
300 if ( rc != 0 )
301 {
302 d->pirq_to_evtchn[pirq] = 0;
303 goto out;
304 }
306 chn->state = ECS_PIRQ;
307 chn->u.pirq = pirq;
309 bind->port = port;
311 out:
312 spin_unlock(&d->evtchn_lock);
314 return rc;
315 }
318 static long __evtchn_close(struct domain *d1, int port1)
319 {
320 struct domain *d2 = NULL;
321 struct vcpu *v;
322 struct evtchn *chn1, *chn2;
323 int port2;
324 long rc = 0;
326 again:
327 spin_lock(&d1->evtchn_lock);
329 if ( !port_is_valid(d1, port1) )
330 {
331 rc = -EINVAL;
332 goto out;
333 }
335 chn1 = evtchn_from_port(d1, port1);
336 switch ( chn1->state )
337 {
338 case ECS_FREE:
339 case ECS_RESERVED:
340 rc = -EINVAL;
341 goto out;
343 case ECS_UNBOUND:
344 break;
346 case ECS_PIRQ:
347 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
348 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
349 break;
351 case ECS_VIRQ:
352 for_each_vcpu ( d1, v )
353 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
354 v->virq_to_evtchn[chn1->u.virq] = 0;
355 break;
357 case ECS_IPI:
358 break;
360 case ECS_INTERDOMAIN:
361 if ( d2 == NULL )
362 {
363 d2 = chn1->u.interdomain.remote_dom;
365 /* If we unlock d1 then we could lose d2. Must get a reference. */
366 if ( unlikely(!get_domain(d2)) )
367 {
368 /*
369 * Failed to obtain a reference. No matter: d2 must be dying
370 * and so will close this event channel for us.
371 */
372 d2 = NULL;
373 goto out;
374 }
376 if ( d1 < d2 )
377 {
378 spin_lock(&d2->evtchn_lock);
379 }
380 else if ( d1 != d2 )
381 {
382 spin_unlock(&d1->evtchn_lock);
383 spin_lock(&d2->evtchn_lock);
384 goto again;
385 }
386 }
387 else if ( d2 != chn1->u.interdomain.remote_dom )
388 {
389 /*
390 * We can only get here if the port was closed and re-bound after
391 * unlocking d1 but before locking d2 above. We could retry but
392 * it is easier to return the same error as if we had seen the
393 * port in ECS_CLOSED. It must have passed through that state for
394 * us to end up here, so it's a valid error to return.
395 */
396 BUG_ON(d1 != current->domain);
397 rc = -EINVAL;
398 goto out;
399 }
401 port2 = chn1->u.interdomain.remote_port;
402 BUG_ON(!port_is_valid(d2, port2));
404 chn2 = evtchn_from_port(d2, port2);
405 BUG_ON(chn2->state != ECS_INTERDOMAIN);
406 BUG_ON(chn2->u.interdomain.remote_dom != d1);
408 chn2->state = ECS_UNBOUND;
409 chn2->u.unbound.remote_domid = d1->domain_id;
410 break;
412 default:
413 BUG();
414 }
416 /* Reset binding to vcpu0 when the channel is freed. */
417 chn1->state = ECS_FREE;
418 chn1->notify_vcpu_id = 0;
420 out:
421 if ( d2 != NULL )
422 {
423 if ( d1 != d2 )
424 spin_unlock(&d2->evtchn_lock);
425 put_domain(d2);
426 }
428 spin_unlock(&d1->evtchn_lock);
430 return rc;
431 }
434 static long evtchn_close(evtchn_close_t *close)
435 {
436 return __evtchn_close(current->domain, close->port);
437 }
440 long evtchn_send(unsigned int lport)
441 {
442 struct evtchn *lchn, *rchn;
443 struct domain *ld = current->domain, *rd;
444 int rport, ret = 0;
446 spin_lock(&ld->evtchn_lock);
448 if ( unlikely(!port_is_valid(ld, lport)) )
449 {
450 spin_unlock(&ld->evtchn_lock);
451 return -EINVAL;
452 }
454 lchn = evtchn_from_port(ld, lport);
455 switch ( lchn->state )
456 {
457 case ECS_INTERDOMAIN:
458 rd = lchn->u.interdomain.remote_dom;
459 rport = lchn->u.interdomain.remote_port;
460 rchn = evtchn_from_port(rd, rport);
461 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
462 break;
463 case ECS_IPI:
464 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
465 break;
466 case ECS_UNBOUND:
467 /* silently drop the notification */
468 break;
469 default:
470 ret = -EINVAL;
471 }
473 spin_unlock(&ld->evtchn_lock);
475 return ret;
476 }
479 void evtchn_set_pending(struct vcpu *v, int port)
480 {
481 struct domain *d = v->domain;
482 shared_info_t *s = d->shared_info;
484 /*
485 * The following bit operations must happen in strict order.
486 * NB. On x86, the atomic bit operations also act as memory barriers.
487 * There is therefore sufficiently strict ordering for this architecture --
488 * others may require explicit memory barriers.
489 */
491 if ( test_and_set_bit(port, s->evtchn_pending) )
492 return;
494 if ( !test_bit (port, s->evtchn_mask) &&
495 !test_and_set_bit(port / BITS_PER_LONG,
496 &v->vcpu_info->evtchn_pending_sel) &&
497 !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
498 {
499 evtchn_notify(v);
500 }
501 else if ( unlikely(test_bit(_VCPUF_blocked, &v->vcpu_flags) &&
502 v->vcpu_info->evtchn_upcall_mask) )
503 {
504 /*
505 * Blocked and masked will usually mean that the VCPU executed
506 * SCHEDOP_poll. Kick the VCPU in case this port is in its poll list.
507 */
508 vcpu_unblock(v);
509 }
510 }
513 void send_guest_vcpu_virq(struct vcpu *v, int virq)
514 {
515 int port;
517 ASSERT(!virq_is_global(virq));
519 port = v->virq_to_evtchn[virq];
520 if ( unlikely(port == 0) )
521 return;
523 evtchn_set_pending(v, port);
524 }
526 void send_guest_global_virq(struct domain *d, int virq)
527 {
528 int port;
529 struct evtchn *chn;
531 ASSERT(virq_is_global(virq));
533 port = d->vcpu[0]->virq_to_evtchn[virq];
534 if ( unlikely(port == 0) )
535 return;
537 chn = evtchn_from_port(d, port);
538 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
539 }
542 void send_guest_pirq(struct domain *d, int pirq)
543 {
544 int port = d->pirq_to_evtchn[pirq];
545 struct evtchn *chn;
547 ASSERT(port != 0);
549 chn = evtchn_from_port(d, port);
550 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
551 }
554 static long evtchn_status(evtchn_status_t *status)
555 {
556 struct domain *d;
557 domid_t dom = status->dom;
558 int port = status->port;
559 struct evtchn *chn;
560 long rc = 0;
562 if ( dom == DOMID_SELF )
563 dom = current->domain->domain_id;
564 else if ( !IS_PRIV(current->domain) )
565 return -EPERM;
567 if ( (d = find_domain_by_id(dom)) == NULL )
568 return -ESRCH;
570 spin_lock(&d->evtchn_lock);
572 if ( !port_is_valid(d, port) )
573 {
574 rc = -EINVAL;
575 goto out;
576 }
578 chn = evtchn_from_port(d, port);
579 switch ( chn->state )
580 {
581 case ECS_FREE:
582 case ECS_RESERVED:
583 status->status = EVTCHNSTAT_closed;
584 break;
585 case ECS_UNBOUND:
586 status->status = EVTCHNSTAT_unbound;
587 status->u.unbound.dom = chn->u.unbound.remote_domid;
588 break;
589 case ECS_INTERDOMAIN:
590 status->status = EVTCHNSTAT_interdomain;
591 status->u.interdomain.dom =
592 chn->u.interdomain.remote_dom->domain_id;
593 status->u.interdomain.port = chn->u.interdomain.remote_port;
594 break;
595 case ECS_PIRQ:
596 status->status = EVTCHNSTAT_pirq;
597 status->u.pirq = chn->u.pirq;
598 break;
599 case ECS_VIRQ:
600 status->status = EVTCHNSTAT_virq;
601 status->u.virq = chn->u.virq;
602 break;
603 case ECS_IPI:
604 status->status = EVTCHNSTAT_ipi;
605 break;
606 default:
607 BUG();
608 }
610 status->vcpu = chn->notify_vcpu_id;
612 out:
613 spin_unlock(&d->evtchn_lock);
614 put_domain(d);
615 return rc;
616 }
619 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
620 {
621 struct domain *d = current->domain;
622 struct evtchn *chn;
623 long rc = 0;
625 if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
626 return -ENOENT;
628 spin_lock(&d->evtchn_lock);
630 if ( !port_is_valid(d, port) )
631 {
632 rc = -EINVAL;
633 goto out;
634 }
636 chn = evtchn_from_port(d, port);
637 switch ( chn->state )
638 {
639 case ECS_VIRQ:
640 if ( virq_is_global(chn->u.virq) )
641 chn->notify_vcpu_id = vcpu_id;
642 else
643 rc = -EINVAL;
644 break;
645 case ECS_UNBOUND:
646 case ECS_INTERDOMAIN:
647 case ECS_PIRQ:
648 chn->notify_vcpu_id = vcpu_id;
649 break;
650 default:
651 rc = -EINVAL;
652 break;
653 }
655 out:
656 spin_unlock(&d->evtchn_lock);
657 return rc;
658 }
661 static long evtchn_unmask(evtchn_unmask_t *unmask)
662 {
663 struct domain *d = current->domain;
664 shared_info_t *s = d->shared_info;
665 int port = unmask->port;
666 struct vcpu *v;
668 spin_lock(&d->evtchn_lock);
670 if ( unlikely(!port_is_valid(d, port)) )
671 {
672 spin_unlock(&d->evtchn_lock);
673 return -EINVAL;
674 }
676 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
678 /*
679 * These operations must happen in strict order. Based on
680 * include/xen/event.h:evtchn_set_pending().
681 */
682 if ( test_and_clear_bit(port, s->evtchn_mask) &&
683 test_bit (port, s->evtchn_pending) &&
684 !test_and_set_bit (port / BITS_PER_LONG,
685 &v->vcpu_info->evtchn_pending_sel) &&
686 !test_and_set_bit (0, &v->vcpu_info->evtchn_upcall_pending) )
687 {
688 evtchn_notify(v);
689 }
691 spin_unlock(&d->evtchn_lock);
693 return 0;
694 }
697 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
698 {
699 long rc;
701 switch ( cmd )
702 {
703 case EVTCHNOP_alloc_unbound: {
704 struct evtchn_alloc_unbound alloc_unbound;
705 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
706 return -EFAULT;
707 rc = evtchn_alloc_unbound(&alloc_unbound);
708 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
709 rc = -EFAULT; /* Cleaning up here would be a mess! */
710 break;
711 }
713 case EVTCHNOP_bind_interdomain: {
714 struct evtchn_bind_interdomain bind_interdomain;
715 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
716 return -EFAULT;
717 rc = evtchn_bind_interdomain(&bind_interdomain);
718 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
719 rc = -EFAULT; /* Cleaning up here would be a mess! */
720 break;
721 }
723 case EVTCHNOP_bind_virq: {
724 struct evtchn_bind_virq bind_virq;
725 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
726 return -EFAULT;
727 rc = evtchn_bind_virq(&bind_virq);
728 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
729 rc = -EFAULT; /* Cleaning up here would be a mess! */
730 break;
731 }
733 case EVTCHNOP_bind_ipi: {
734 struct evtchn_bind_ipi bind_ipi;
735 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
736 return -EFAULT;
737 rc = evtchn_bind_ipi(&bind_ipi);
738 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
739 rc = -EFAULT; /* Cleaning up here would be a mess! */
740 break;
741 }
743 case EVTCHNOP_bind_pirq: {
744 struct evtchn_bind_pirq bind_pirq;
745 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
746 return -EFAULT;
747 rc = evtchn_bind_pirq(&bind_pirq);
748 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
749 rc = -EFAULT; /* Cleaning up here would be a mess! */
750 break;
751 }
753 case EVTCHNOP_close: {
754 struct evtchn_close close;
755 if ( copy_from_guest(&close, arg, 1) != 0 )
756 return -EFAULT;
757 rc = evtchn_close(&close);
758 break;
759 }
761 case EVTCHNOP_send: {
762 struct evtchn_send send;
763 if ( copy_from_guest(&send, arg, 1) != 0 )
764 return -EFAULT;
765 rc = evtchn_send(send.port);
766 break;
767 }
769 case EVTCHNOP_status: {
770 struct evtchn_status status;
771 if ( copy_from_guest(&status, arg, 1) != 0 )
772 return -EFAULT;
773 rc = evtchn_status(&status);
774 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
775 rc = -EFAULT;
776 break;
777 }
779 case EVTCHNOP_bind_vcpu: {
780 struct evtchn_bind_vcpu bind_vcpu;
781 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
782 return -EFAULT;
783 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
784 break;
785 }
787 case EVTCHNOP_unmask: {
788 struct evtchn_unmask unmask;
789 if ( copy_from_guest(&unmask, arg, 1) != 0 )
790 return -EFAULT;
791 rc = evtchn_unmask(&unmask);
792 break;
793 }
795 default:
796 rc = -ENOSYS;
797 break;
798 }
800 return rc;
801 }
804 void evtchn_notify_reserved_port(struct domain *d, int port)
805 {
806 struct evtchn *chn = evtchn_from_port(d, port);
807 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
808 }
811 int evtchn_init(struct domain *d)
812 {
813 spin_lock_init(&d->evtchn_lock);
814 if ( get_free_port(d) != 0 )
815 return -EINVAL;
816 evtchn_from_port(d, 0)->state = ECS_RESERVED;
817 return 0;
818 }
821 void evtchn_destroy(struct domain *d)
822 {
823 int i;
825 for ( i = 0; port_is_valid(d, i); i++ )
826 (void)__evtchn_close(d, i);
828 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
829 xfree(d->evtchn[i]);
830 }
832 /*
833 * Local variables:
834 * mode: C
835 * c-set-style: "BSD"
836 * c-basic-offset: 4
837 * tab-width: 4
838 * indent-tabs-mode: nil
839 * End:
840 */