ia64/xen-unstable

view xen/common/event_channel.c @ 12390:e28beea6d228

[IA64] Fix time services of EFI emulation

This patch serializes the execution of following efi.runtimes.
- GetTime
- SetTime
- GetWakeTime
- SetWakeTime

Linux/ia64 uses similar spinlocks in the EFI RTC driver.

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Fri Nov 10 12:03:19 2006 -0700 (2006-11-10)
parents 36679b74e24a
children 4c8f157a3a47
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2006, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/guest_access.h>
27 #include <asm/current.h>
29 #include <public/xen.h>
30 #include <public/event_channel.h>
31 #include <acm/acm_hooks.h>
33 #define bucket_from_port(d,p) \
34 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
35 #define port_is_valid(d,p) \
36 (((p) >= 0) && ((p) < MAX_EVTCHNS) && \
37 (bucket_from_port(d,p) != NULL))
38 #define evtchn_from_port(d,p) \
39 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
41 #define ERROR_EXIT(_errno) \
42 do { \
43 gdprintk(XENLOG_WARNING, \
44 "EVTCHNOP failure: domain %d, error %d, line %d\n", \
45 current->domain->domain_id, (_errno), __LINE__); \
46 rc = (_errno); \
47 goto out; \
48 } while ( 0 )
51 static int virq_is_global(int virq)
52 {
53 int rc;
55 ASSERT((virq >= 0) && (virq < NR_VIRQS));
57 switch ( virq )
58 {
59 case VIRQ_TIMER:
60 case VIRQ_DEBUG:
61 case VIRQ_XENOPROF:
62 rc = 0;
63 break;
64 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
65 rc = arch_virq_is_global(virq);
66 break;
67 default:
68 rc = 1;
69 break;
70 }
72 return rc;
73 }
76 static int get_free_port(struct domain *d)
77 {
78 struct evtchn *chn;
79 int port;
81 for ( port = 0; port_is_valid(d, port); port++ )
82 if ( evtchn_from_port(d, port)->state == ECS_FREE )
83 return port;
85 if ( port == MAX_EVTCHNS )
86 return -ENOSPC;
88 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
89 if ( unlikely(chn == NULL) )
90 return -ENOMEM;
91 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
92 bucket_from_port(d, port) = chn;
94 return port;
95 }
98 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
99 {
100 struct evtchn *chn;
101 struct domain *d;
102 int port;
103 domid_t dom = alloc->dom;
104 long rc;
106 if ( (rc = acm_pre_eventchannel_unbound(dom, alloc->remote_dom)) != 0 )
107 return rc;
109 if ( dom == DOMID_SELF )
110 dom = current->domain->domain_id;
111 else if ( !IS_PRIV(current->domain) )
112 return -EPERM;
114 if ( (d = find_domain_by_id(dom)) == NULL )
115 return -ESRCH;
117 spin_lock(&d->evtchn_lock);
119 if ( (port = get_free_port(d)) < 0 )
120 ERROR_EXIT(port);
121 chn = evtchn_from_port(d, port);
123 chn->state = ECS_UNBOUND;
124 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
125 chn->u.unbound.remote_domid = current->domain->domain_id;
127 alloc->port = port;
129 out:
130 spin_unlock(&d->evtchn_lock);
132 put_domain(d);
134 return rc;
135 }
138 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
139 {
140 struct evtchn *lchn, *rchn;
141 struct domain *ld = current->domain, *rd;
142 int lport, rport = bind->remote_port;
143 domid_t rdom = bind->remote_dom;
144 long rc;
146 if ( (rc = acm_pre_eventchannel_interdomain(rdom)) != 0 )
147 return rc;
149 if ( rdom == DOMID_SELF )
150 rdom = current->domain->domain_id;
152 if ( (rd = find_domain_by_id(rdom)) == NULL )
153 return -ESRCH;
155 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
156 if ( ld < rd )
157 {
158 spin_lock(&ld->evtchn_lock);
159 spin_lock(&rd->evtchn_lock);
160 }
161 else
162 {
163 if ( ld != rd )
164 spin_lock(&rd->evtchn_lock);
165 spin_lock(&ld->evtchn_lock);
166 }
168 if ( (lport = get_free_port(ld)) < 0 )
169 ERROR_EXIT(lport);
170 lchn = evtchn_from_port(ld, lport);
172 if ( !port_is_valid(rd, rport) )
173 ERROR_EXIT(-EINVAL);
174 rchn = evtchn_from_port(rd, rport);
175 if ( (rchn->state != ECS_UNBOUND) ||
176 (rchn->u.unbound.remote_domid != ld->domain_id) )
177 ERROR_EXIT(-EINVAL);
179 lchn->u.interdomain.remote_dom = rd;
180 lchn->u.interdomain.remote_port = (u16)rport;
181 lchn->state = ECS_INTERDOMAIN;
183 rchn->u.interdomain.remote_dom = ld;
184 rchn->u.interdomain.remote_port = (u16)lport;
185 rchn->state = ECS_INTERDOMAIN;
187 /*
188 * We may have lost notifications on the remote unbound port. Fix that up
189 * here by conservatively always setting a notification on the local port.
190 */
191 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
193 bind->local_port = lport;
195 out:
196 spin_unlock(&ld->evtchn_lock);
197 if ( ld != rd )
198 spin_unlock(&rd->evtchn_lock);
200 put_domain(rd);
202 return rc;
203 }
206 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
207 {
208 struct evtchn *chn;
209 struct vcpu *v;
210 struct domain *d = current->domain;
211 int port, virq = bind->virq, vcpu = bind->vcpu;
212 long rc = 0;
214 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
215 return -EINVAL;
217 if ( virq_is_global(virq) && (vcpu != 0) )
218 return -EINVAL;
220 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
221 ((v = d->vcpu[vcpu]) == NULL) )
222 return -ENOENT;
224 spin_lock(&d->evtchn_lock);
226 if ( v->virq_to_evtchn[virq] != 0 )
227 ERROR_EXIT(-EEXIST);
229 if ( (port = get_free_port(d)) < 0 )
230 ERROR_EXIT(port);
232 chn = evtchn_from_port(d, port);
233 chn->state = ECS_VIRQ;
234 chn->notify_vcpu_id = vcpu;
235 chn->u.virq = virq;
237 v->virq_to_evtchn[virq] = bind->port = port;
239 out:
240 spin_unlock(&d->evtchn_lock);
242 return rc;
243 }
246 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
247 {
248 struct evtchn *chn;
249 struct domain *d = current->domain;
250 int port, vcpu = bind->vcpu;
251 long rc = 0;
253 if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
254 (d->vcpu[vcpu] == NULL) )
255 return -ENOENT;
257 spin_lock(&d->evtchn_lock);
259 if ( (port = get_free_port(d)) < 0 )
260 ERROR_EXIT(port);
262 chn = evtchn_from_port(d, port);
263 chn->state = ECS_IPI;
264 chn->notify_vcpu_id = vcpu;
266 bind->port = port;
268 out:
269 spin_unlock(&d->evtchn_lock);
271 return rc;
272 }
275 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
276 {
277 struct evtchn *chn;
278 struct domain *d = current->domain;
279 int port, pirq = bind->pirq;
280 long rc;
282 if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) )
283 return -EINVAL;
285 if ( !irq_access_permitted(d, pirq) )
286 return -EPERM;
288 spin_lock(&d->evtchn_lock);
290 if ( d->pirq_to_evtchn[pirq] != 0 )
291 ERROR_EXIT(-EEXIST);
293 if ( (port = get_free_port(d)) < 0 )
294 ERROR_EXIT(port);
296 chn = evtchn_from_port(d, port);
298 d->pirq_to_evtchn[pirq] = port;
299 rc = pirq_guest_bind(d->vcpu[0], pirq,
300 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
301 if ( rc != 0 )
302 {
303 d->pirq_to_evtchn[pirq] = 0;
304 goto out;
305 }
307 chn->state = ECS_PIRQ;
308 chn->u.pirq = pirq;
310 bind->port = port;
312 out:
313 spin_unlock(&d->evtchn_lock);
315 return rc;
316 }
319 static long __evtchn_close(struct domain *d1, int port1)
320 {
321 struct domain *d2 = NULL;
322 struct vcpu *v;
323 struct evtchn *chn1, *chn2;
324 int port2;
325 long rc = 0;
327 again:
328 spin_lock(&d1->evtchn_lock);
330 if ( !port_is_valid(d1, port1) )
331 {
332 rc = -EINVAL;
333 goto out;
334 }
336 chn1 = evtchn_from_port(d1, port1);
338 /* Guest cannot close a Xen-attached event channel. */
339 if ( unlikely(chn1->consumer_is_xen) )
340 {
341 rc = -EINVAL;
342 goto out;
343 }
345 switch ( chn1->state )
346 {
347 case ECS_FREE:
348 case ECS_RESERVED:
349 rc = -EINVAL;
350 goto out;
352 case ECS_UNBOUND:
353 break;
355 case ECS_PIRQ:
356 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
357 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
358 break;
360 case ECS_VIRQ:
361 for_each_vcpu ( d1, v )
362 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
363 v->virq_to_evtchn[chn1->u.virq] = 0;
364 break;
366 case ECS_IPI:
367 break;
369 case ECS_INTERDOMAIN:
370 if ( d2 == NULL )
371 {
372 d2 = chn1->u.interdomain.remote_dom;
374 /* If we unlock d1 then we could lose d2. Must get a reference. */
375 if ( unlikely(!get_domain(d2)) )
376 {
377 /*
378 * Failed to obtain a reference. No matter: d2 must be dying
379 * and so will close this event channel for us.
380 */
381 d2 = NULL;
382 goto out;
383 }
385 if ( d1 < d2 )
386 {
387 spin_lock(&d2->evtchn_lock);
388 }
389 else if ( d1 != d2 )
390 {
391 spin_unlock(&d1->evtchn_lock);
392 spin_lock(&d2->evtchn_lock);
393 goto again;
394 }
395 }
396 else if ( d2 != chn1->u.interdomain.remote_dom )
397 {
398 /*
399 * We can only get here if the port was closed and re-bound after
400 * unlocking d1 but before locking d2 above. We could retry but
401 * it is easier to return the same error as if we had seen the
402 * port in ECS_CLOSED. It must have passed through that state for
403 * us to end up here, so it's a valid error to return.
404 */
405 BUG_ON(d1 != current->domain);
406 rc = -EINVAL;
407 goto out;
408 }
410 port2 = chn1->u.interdomain.remote_port;
411 BUG_ON(!port_is_valid(d2, port2));
413 chn2 = evtchn_from_port(d2, port2);
414 BUG_ON(chn2->state != ECS_INTERDOMAIN);
415 BUG_ON(chn2->u.interdomain.remote_dom != d1);
417 chn2->state = ECS_UNBOUND;
418 chn2->u.unbound.remote_domid = d1->domain_id;
419 break;
421 default:
422 BUG();
423 }
425 /* Reset binding to vcpu0 when the channel is freed. */
426 chn1->state = ECS_FREE;
427 chn1->notify_vcpu_id = 0;
429 out:
430 if ( d2 != NULL )
431 {
432 if ( d1 != d2 )
433 spin_unlock(&d2->evtchn_lock);
434 put_domain(d2);
435 }
437 spin_unlock(&d1->evtchn_lock);
439 return rc;
440 }
443 static long evtchn_close(evtchn_close_t *close)
444 {
445 return __evtchn_close(current->domain, close->port);
446 }
449 long evtchn_send(unsigned int lport)
450 {
451 struct evtchn *lchn, *rchn;
452 struct domain *ld = current->domain, *rd;
453 struct vcpu *rvcpu;
454 int rport, ret = 0;
456 spin_lock(&ld->evtchn_lock);
458 if ( unlikely(!port_is_valid(ld, lport)) )
459 {
460 spin_unlock(&ld->evtchn_lock);
461 return -EINVAL;
462 }
464 lchn = evtchn_from_port(ld, lport);
466 /* Guest cannot send via a Xen-attached event channel. */
467 if ( unlikely(lchn->consumer_is_xen) )
468 {
469 spin_unlock(&ld->evtchn_lock);
470 return -EINVAL;
471 }
473 switch ( lchn->state )
474 {
475 case ECS_INTERDOMAIN:
476 rd = lchn->u.interdomain.remote_dom;
477 rport = lchn->u.interdomain.remote_port;
478 rchn = evtchn_from_port(rd, rport);
479 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
480 if ( rchn->consumer_is_xen )
481 {
482 /* Xen consumers need notification only if they are blocked. */
483 if ( test_and_clear_bit(_VCPUF_blocked_in_xen,
484 &rvcpu->vcpu_flags) )
485 vcpu_wake(rvcpu);
486 }
487 else
488 {
489 evtchn_set_pending(rvcpu, rport);
490 }
491 break;
492 case ECS_IPI:
493 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
494 break;
495 case ECS_UNBOUND:
496 /* silently drop the notification */
497 break;
498 default:
499 ret = -EINVAL;
500 }
502 spin_unlock(&ld->evtchn_lock);
504 return ret;
505 }
508 void evtchn_set_pending(struct vcpu *v, int port)
509 {
510 struct domain *d = v->domain;
511 shared_info_t *s = d->shared_info;
513 /*
514 * The following bit operations must happen in strict order.
515 * NB. On x86, the atomic bit operations also act as memory barriers.
516 * There is therefore sufficiently strict ordering for this architecture --
517 * others may require explicit memory barriers.
518 */
520 if ( test_and_set_bit(port, s->evtchn_pending) )
521 return;
523 if ( !test_bit (port, s->evtchn_mask) &&
524 !test_and_set_bit(port / BITS_PER_LONG,
525 &v->vcpu_info->evtchn_pending_sel) )
526 {
527 vcpu_mark_events_pending(v);
528 }
530 /* Check if some VCPU might be polling for this event. */
531 if ( unlikely(test_bit(_DOMF_polling, &d->domain_flags)) &&
532 likely(test_and_clear_bit(_DOMF_polling, &d->domain_flags)) )
533 {
534 for_each_vcpu ( d, v )
535 if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) )
536 vcpu_unblock(v);
537 }
538 }
541 void send_guest_vcpu_virq(struct vcpu *v, int virq)
542 {
543 int port;
545 ASSERT(!virq_is_global(virq));
547 port = v->virq_to_evtchn[virq];
548 if ( unlikely(port == 0) )
549 return;
551 evtchn_set_pending(v, port);
552 }
554 void send_guest_global_virq(struct domain *d, int virq)
555 {
556 int port;
557 struct vcpu *v;
558 struct evtchn *chn;
560 ASSERT(virq_is_global(virq));
562 v = d->vcpu[0];
563 if ( unlikely(v == NULL) )
564 return;
566 port = v->virq_to_evtchn[virq];
567 if ( unlikely(port == 0) )
568 return;
570 chn = evtchn_from_port(d, port);
571 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
572 }
575 void send_guest_pirq(struct domain *d, int pirq)
576 {
577 int port = d->pirq_to_evtchn[pirq];
578 struct evtchn *chn;
580 ASSERT(port != 0);
582 chn = evtchn_from_port(d, port);
583 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
584 }
587 static long evtchn_status(evtchn_status_t *status)
588 {
589 struct domain *d;
590 domid_t dom = status->dom;
591 int port = status->port;
592 struct evtchn *chn;
593 long rc = 0;
595 if ( dom == DOMID_SELF )
596 dom = current->domain->domain_id;
597 else if ( !IS_PRIV(current->domain) )
598 return -EPERM;
600 if ( (d = find_domain_by_id(dom)) == NULL )
601 return -ESRCH;
603 spin_lock(&d->evtchn_lock);
605 if ( !port_is_valid(d, port) )
606 {
607 rc = -EINVAL;
608 goto out;
609 }
611 chn = evtchn_from_port(d, port);
612 switch ( chn->state )
613 {
614 case ECS_FREE:
615 case ECS_RESERVED:
616 status->status = EVTCHNSTAT_closed;
617 break;
618 case ECS_UNBOUND:
619 status->status = EVTCHNSTAT_unbound;
620 status->u.unbound.dom = chn->u.unbound.remote_domid;
621 break;
622 case ECS_INTERDOMAIN:
623 status->status = EVTCHNSTAT_interdomain;
624 status->u.interdomain.dom =
625 chn->u.interdomain.remote_dom->domain_id;
626 status->u.interdomain.port = chn->u.interdomain.remote_port;
627 break;
628 case ECS_PIRQ:
629 status->status = EVTCHNSTAT_pirq;
630 status->u.pirq = chn->u.pirq;
631 break;
632 case ECS_VIRQ:
633 status->status = EVTCHNSTAT_virq;
634 status->u.virq = chn->u.virq;
635 break;
636 case ECS_IPI:
637 status->status = EVTCHNSTAT_ipi;
638 break;
639 default:
640 BUG();
641 }
643 status->vcpu = chn->notify_vcpu_id;
645 out:
646 spin_unlock(&d->evtchn_lock);
647 put_domain(d);
648 return rc;
649 }
652 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
653 {
654 struct domain *d = current->domain;
655 struct evtchn *chn;
656 long rc = 0;
658 if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
659 return -ENOENT;
661 spin_lock(&d->evtchn_lock);
663 if ( !port_is_valid(d, port) )
664 {
665 rc = -EINVAL;
666 goto out;
667 }
669 chn = evtchn_from_port(d, port);
671 /* Guest cannot re-bind a Xen-attached event channel. */
672 if ( unlikely(chn->consumer_is_xen) )
673 {
674 rc = -EINVAL;
675 goto out;
676 }
678 switch ( chn->state )
679 {
680 case ECS_VIRQ:
681 if ( virq_is_global(chn->u.virq) )
682 chn->notify_vcpu_id = vcpu_id;
683 else
684 rc = -EINVAL;
685 break;
686 case ECS_UNBOUND:
687 case ECS_INTERDOMAIN:
688 case ECS_PIRQ:
689 chn->notify_vcpu_id = vcpu_id;
690 break;
691 default:
692 rc = -EINVAL;
693 break;
694 }
696 out:
697 spin_unlock(&d->evtchn_lock);
698 return rc;
699 }
702 static long evtchn_unmask(evtchn_unmask_t *unmask)
703 {
704 struct domain *d = current->domain;
705 shared_info_t *s = d->shared_info;
706 int port = unmask->port;
707 struct vcpu *v;
709 spin_lock(&d->evtchn_lock);
711 if ( unlikely(!port_is_valid(d, port)) )
712 {
713 spin_unlock(&d->evtchn_lock);
714 return -EINVAL;
715 }
717 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
719 /*
720 * These operations must happen in strict order. Based on
721 * include/xen/event.h:evtchn_set_pending().
722 */
723 if ( test_and_clear_bit(port, s->evtchn_mask) &&
724 test_bit (port, s->evtchn_pending) &&
725 !test_and_set_bit (port / BITS_PER_LONG,
726 &v->vcpu_info->evtchn_pending_sel) )
727 {
728 vcpu_mark_events_pending(v);
729 }
731 spin_unlock(&d->evtchn_lock);
733 return 0;
734 }
737 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
738 {
739 long rc;
741 switch ( cmd )
742 {
743 case EVTCHNOP_alloc_unbound: {
744 struct evtchn_alloc_unbound alloc_unbound;
745 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
746 return -EFAULT;
747 rc = evtchn_alloc_unbound(&alloc_unbound);
748 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
749 rc = -EFAULT; /* Cleaning up here would be a mess! */
750 break;
751 }
753 case EVTCHNOP_bind_interdomain: {
754 struct evtchn_bind_interdomain bind_interdomain;
755 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
756 return -EFAULT;
757 rc = evtchn_bind_interdomain(&bind_interdomain);
758 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
759 rc = -EFAULT; /* Cleaning up here would be a mess! */
760 break;
761 }
763 case EVTCHNOP_bind_virq: {
764 struct evtchn_bind_virq bind_virq;
765 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
766 return -EFAULT;
767 rc = evtchn_bind_virq(&bind_virq);
768 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
769 rc = -EFAULT; /* Cleaning up here would be a mess! */
770 break;
771 }
773 case EVTCHNOP_bind_ipi: {
774 struct evtchn_bind_ipi bind_ipi;
775 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
776 return -EFAULT;
777 rc = evtchn_bind_ipi(&bind_ipi);
778 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
779 rc = -EFAULT; /* Cleaning up here would be a mess! */
780 break;
781 }
783 case EVTCHNOP_bind_pirq: {
784 struct evtchn_bind_pirq bind_pirq;
785 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
786 return -EFAULT;
787 rc = evtchn_bind_pirq(&bind_pirq);
788 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
789 rc = -EFAULT; /* Cleaning up here would be a mess! */
790 break;
791 }
793 case EVTCHNOP_close: {
794 struct evtchn_close close;
795 if ( copy_from_guest(&close, arg, 1) != 0 )
796 return -EFAULT;
797 rc = evtchn_close(&close);
798 break;
799 }
801 case EVTCHNOP_send: {
802 struct evtchn_send send;
803 if ( copy_from_guest(&send, arg, 1) != 0 )
804 return -EFAULT;
805 rc = evtchn_send(send.port);
806 break;
807 }
809 case EVTCHNOP_status: {
810 struct evtchn_status status;
811 if ( copy_from_guest(&status, arg, 1) != 0 )
812 return -EFAULT;
813 rc = evtchn_status(&status);
814 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
815 rc = -EFAULT;
816 break;
817 }
819 case EVTCHNOP_bind_vcpu: {
820 struct evtchn_bind_vcpu bind_vcpu;
821 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
822 return -EFAULT;
823 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
824 break;
825 }
827 case EVTCHNOP_unmask: {
828 struct evtchn_unmask unmask;
829 if ( copy_from_guest(&unmask, arg, 1) != 0 )
830 return -EFAULT;
831 rc = evtchn_unmask(&unmask);
832 break;
833 }
835 default:
836 rc = -ENOSYS;
837 break;
838 }
840 return rc;
841 }
844 int alloc_unbound_xen_event_channel(
845 struct vcpu *local_vcpu, domid_t remote_domid)
846 {
847 struct evtchn *chn;
848 struct domain *d = local_vcpu->domain;
849 int port;
851 spin_lock(&d->evtchn_lock);
853 if ( (port = get_free_port(d)) < 0 )
854 goto out;
855 chn = evtchn_from_port(d, port);
857 chn->state = ECS_UNBOUND;
858 chn->consumer_is_xen = 1;
859 chn->notify_vcpu_id = local_vcpu->vcpu_id;
860 chn->u.unbound.remote_domid = remote_domid;
862 out:
863 spin_unlock(&d->evtchn_lock);
865 return port;
866 }
869 void free_xen_event_channel(
870 struct vcpu *local_vcpu, int port)
871 {
872 struct evtchn *chn;
873 struct domain *d = local_vcpu->domain;
875 spin_lock(&d->evtchn_lock);
876 chn = evtchn_from_port(d, port);
877 BUG_ON(!chn->consumer_is_xen);
878 chn->consumer_is_xen = 0;
879 spin_unlock(&d->evtchn_lock);
881 (void)__evtchn_close(d, port);
882 }
885 void notify_via_xen_event_channel(int lport)
886 {
887 struct evtchn *lchn, *rchn;
888 struct domain *ld = current->domain, *rd;
889 int rport;
891 spin_lock(&ld->evtchn_lock);
893 ASSERT(port_is_valid(ld, lport));
894 lchn = evtchn_from_port(ld, lport);
895 ASSERT(lchn->consumer_is_xen);
897 if ( likely(lchn->state == ECS_INTERDOMAIN) )
898 {
899 rd = lchn->u.interdomain.remote_dom;
900 rport = lchn->u.interdomain.remote_port;
901 rchn = evtchn_from_port(rd, rport);
902 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
903 }
905 spin_unlock(&ld->evtchn_lock);
906 }
909 int evtchn_init(struct domain *d)
910 {
911 spin_lock_init(&d->evtchn_lock);
912 if ( get_free_port(d) != 0 )
913 return -EINVAL;
914 evtchn_from_port(d, 0)->state = ECS_RESERVED;
915 return 0;
916 }
919 void evtchn_destroy(struct domain *d)
920 {
921 int i;
923 for ( i = 0; port_is_valid(d, i); i++ )
924 {
925 evtchn_from_port(d, i)->consumer_is_xen = 0;
926 (void)__evtchn_close(d, i);
927 }
929 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
930 xfree(d->evtchn[i]);
931 }
933 /*
934 * Local variables:
935 * mode: C
936 * c-set-style: "BSD"
937 * c-basic-offset: 4
938 * tab-width: 4
939 * indent-tabs-mode: nil
940 * End:
941 */