ia64/xen-unstable

view xen/common/event_channel.c @ 9183:4293d6760cef

Upgrade all hypercalls to use the new guest_handle interface (on the Xen side).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Mar 07 18:40:42 2006 +0100 (2006-03-07)
parents b5bb9920bf48
children c445d4a0dd76
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2005, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/guest_access.h>
27 #include <asm/current.h>
29 #include <public/xen.h>
30 #include <public/event_channel.h>
31 #include <acm/acm_hooks.h>
33 #define bucket_from_port(d,p) \
34 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
35 #define port_is_valid(d,p) \
36 (((p) >= 0) && ((p) < MAX_EVTCHNS) && \
37 (bucket_from_port(d,p) != NULL))
38 #define evtchn_from_port(d,p) \
39 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
41 #define ERROR_EXIT(_errno) \
42 do { \
43 DPRINTK("EVTCHNOP failure: domain %d, error %d, line %d\n", \
44 current->domain->domain_id, (_errno), __LINE__); \
45 rc = (_errno); \
46 goto out; \
47 } while ( 0 )
49 static int get_free_port(struct domain *d)
50 {
51 struct evtchn *chn;
52 int port;
54 for ( port = 0; port_is_valid(d, port); port++ )
55 if ( evtchn_from_port(d, port)->state == ECS_FREE )
56 return port;
58 if ( port == MAX_EVTCHNS )
59 return -ENOSPC;
61 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
62 if ( unlikely(chn == NULL) )
63 return -ENOMEM;
64 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
65 bucket_from_port(d, port) = chn;
67 return port;
68 }
71 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
72 {
73 struct evtchn *chn;
74 struct domain *d;
75 int port;
76 domid_t dom = alloc->dom;
77 long rc = 0;
79 if ( dom == DOMID_SELF )
80 dom = current->domain->domain_id;
81 else if ( !IS_PRIV(current->domain) )
82 return -EPERM;
84 if ( (d = find_domain_by_id(dom)) == NULL )
85 return -ESRCH;
87 spin_lock(&d->evtchn_lock);
89 if ( (port = get_free_port(d)) < 0 )
90 ERROR_EXIT(port);
91 chn = evtchn_from_port(d, port);
93 chn->state = ECS_UNBOUND;
94 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
95 chn->u.unbound.remote_domid = current->domain->domain_id;
97 alloc->port = port;
99 out:
100 spin_unlock(&d->evtchn_lock);
102 put_domain(d);
104 return rc;
105 }
108 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
109 {
110 struct evtchn *lchn, *rchn;
111 struct domain *ld = current->domain, *rd;
112 int lport, rport = bind->remote_port;
113 domid_t rdom = bind->remote_dom;
114 long rc = 0;
116 if ( rdom == DOMID_SELF )
117 rdom = current->domain->domain_id;
119 if ( (rd = find_domain_by_id(rdom)) == NULL )
120 return -ESRCH;
122 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
123 if ( ld < rd )
124 {
125 spin_lock(&ld->evtchn_lock);
126 spin_lock(&rd->evtchn_lock);
127 }
128 else
129 {
130 if ( ld != rd )
131 spin_lock(&rd->evtchn_lock);
132 spin_lock(&ld->evtchn_lock);
133 }
135 if ( (lport = get_free_port(ld)) < 0 )
136 ERROR_EXIT(lport);
137 lchn = evtchn_from_port(ld, lport);
139 if ( !port_is_valid(rd, rport) )
140 ERROR_EXIT(-EINVAL);
141 rchn = evtchn_from_port(rd, rport);
142 if ( (rchn->state != ECS_UNBOUND) ||
143 (rchn->u.unbound.remote_domid != ld->domain_id) )
144 ERROR_EXIT(-EINVAL);
146 lchn->u.interdomain.remote_dom = rd;
147 lchn->u.interdomain.remote_port = (u16)rport;
148 lchn->state = ECS_INTERDOMAIN;
150 rchn->u.interdomain.remote_dom = ld;
151 rchn->u.interdomain.remote_port = (u16)lport;
152 rchn->state = ECS_INTERDOMAIN;
154 /*
155 * We may have lost notifications on the remote unbound port. Fix that up
156 * here by conservatively always setting a notification on the local port.
157 */
158 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
160 bind->local_port = lport;
162 out:
163 spin_unlock(&ld->evtchn_lock);
164 if ( ld != rd )
165 spin_unlock(&rd->evtchn_lock);
167 put_domain(rd);
169 return rc;
170 }
173 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
174 {
175 struct evtchn *chn;
176 struct vcpu *v;
177 struct domain *d = current->domain;
178 int port, virq = bind->virq, vcpu = bind->vcpu;
179 long rc = 0;
181 if ( virq >= ARRAY_SIZE(v->virq_to_evtchn) )
182 return -EINVAL;
184 if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || ((v = d->vcpu[vcpu]) == NULL) )
185 return -ENOENT;
187 spin_lock(&d->evtchn_lock);
189 if ( v->virq_to_evtchn[virq] != 0 )
190 ERROR_EXIT(-EEXIST);
192 if ( (port = get_free_port(d)) < 0 )
193 ERROR_EXIT(port);
195 chn = evtchn_from_port(d, port);
196 chn->state = ECS_VIRQ;
197 chn->notify_vcpu_id = vcpu;
198 chn->u.virq = virq;
200 v->virq_to_evtchn[virq] = bind->port = port;
202 out:
203 spin_unlock(&d->evtchn_lock);
205 return rc;
206 }
209 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
210 {
211 struct evtchn *chn;
212 struct domain *d = current->domain;
213 int port, vcpu = bind->vcpu;
214 long rc = 0;
216 if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) )
217 return -ENOENT;
219 spin_lock(&d->evtchn_lock);
221 if ( (port = get_free_port(d)) < 0 )
222 ERROR_EXIT(port);
224 chn = evtchn_from_port(d, port);
225 chn->state = ECS_IPI;
226 chn->notify_vcpu_id = vcpu;
228 bind->port = port;
230 out:
231 spin_unlock(&d->evtchn_lock);
233 return rc;
234 }
237 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
238 {
239 struct evtchn *chn;
240 struct domain *d = current->domain;
241 int port, pirq = bind->pirq;
242 long rc;
244 if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
245 return -EINVAL;
247 if ( !irq_access_permitted(d, pirq) )
248 return -EPERM;
250 spin_lock(&d->evtchn_lock);
252 if ( d->pirq_to_evtchn[pirq] != 0 )
253 ERROR_EXIT(-EEXIST);
255 if ( (port = get_free_port(d)) < 0 )
256 ERROR_EXIT(port);
258 chn = evtchn_from_port(d, port);
260 d->pirq_to_evtchn[pirq] = port;
261 rc = pirq_guest_bind(d->vcpu[0], pirq,
262 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
263 if ( rc != 0 )
264 {
265 d->pirq_to_evtchn[pirq] = 0;
266 goto out;
267 }
269 chn->state = ECS_PIRQ;
270 chn->u.pirq = pirq;
272 bind->port = port;
274 out:
275 spin_unlock(&d->evtchn_lock);
277 return rc;
278 }
281 static long __evtchn_close(struct domain *d1, int port1)
282 {
283 struct domain *d2 = NULL;
284 struct vcpu *v;
285 struct evtchn *chn1, *chn2;
286 int port2;
287 long rc = 0;
289 again:
290 spin_lock(&d1->evtchn_lock);
292 if ( !port_is_valid(d1, port1) )
293 {
294 rc = -EINVAL;
295 goto out;
296 }
298 chn1 = evtchn_from_port(d1, port1);
299 switch ( chn1->state )
300 {
301 case ECS_FREE:
302 case ECS_RESERVED:
303 rc = -EINVAL;
304 goto out;
306 case ECS_UNBOUND:
307 break;
309 case ECS_PIRQ:
310 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
311 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
312 break;
314 case ECS_VIRQ:
315 for_each_vcpu ( d1, v )
316 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
317 v->virq_to_evtchn[chn1->u.virq] = 0;
318 break;
320 case ECS_IPI:
321 break;
323 case ECS_INTERDOMAIN:
324 if ( d2 == NULL )
325 {
326 d2 = chn1->u.interdomain.remote_dom;
328 /* If we unlock d1 then we could lose d2. Must get a reference. */
329 if ( unlikely(!get_domain(d2)) )
330 {
331 /*
332 * Failed to obtain a reference. No matter: d2 must be dying
333 * and so will close this event channel for us.
334 */
335 d2 = NULL;
336 goto out;
337 }
339 if ( d1 < d2 )
340 {
341 spin_lock(&d2->evtchn_lock);
342 }
343 else if ( d1 != d2 )
344 {
345 spin_unlock(&d1->evtchn_lock);
346 spin_lock(&d2->evtchn_lock);
347 goto again;
348 }
349 }
350 else if ( d2 != chn1->u.interdomain.remote_dom )
351 {
352 /*
353 * We can only get here if the port was closed and re-bound after
354 * unlocking d1 but before locking d2 above. We could retry but
355 * it is easier to return the same error as if we had seen the
356 * port in ECS_CLOSED. It must have passed through that state for
357 * us to end up here, so it's a valid error to return.
358 */
359 BUG_ON(d1 != current->domain);
360 rc = -EINVAL;
361 goto out;
362 }
364 port2 = chn1->u.interdomain.remote_port;
365 BUG_ON(!port_is_valid(d2, port2));
367 chn2 = evtchn_from_port(d2, port2);
368 BUG_ON(chn2->state != ECS_INTERDOMAIN);
369 BUG_ON(chn2->u.interdomain.remote_dom != d1);
371 chn2->state = ECS_UNBOUND;
372 chn2->u.unbound.remote_domid = d1->domain_id;
373 break;
375 default:
376 BUG();
377 }
379 /* Reset binding to vcpu0 when the channel is freed. */
380 chn1->state = ECS_FREE;
381 chn1->notify_vcpu_id = 0;
383 out:
384 if ( d2 != NULL )
385 {
386 if ( d1 != d2 )
387 spin_unlock(&d2->evtchn_lock);
388 put_domain(d2);
389 }
391 spin_unlock(&d1->evtchn_lock);
393 return rc;
394 }
397 static long evtchn_close(evtchn_close_t *close)
398 {
399 return __evtchn_close(current->domain, close->port);
400 }
403 long evtchn_send(unsigned int lport)
404 {
405 struct evtchn *lchn, *rchn;
406 struct domain *ld = current->domain, *rd;
407 int rport, ret = 0;
409 spin_lock(&ld->evtchn_lock);
411 if ( unlikely(!port_is_valid(ld, lport)) )
412 {
413 spin_unlock(&ld->evtchn_lock);
414 return -EINVAL;
415 }
417 lchn = evtchn_from_port(ld, lport);
418 switch ( lchn->state )
419 {
420 case ECS_INTERDOMAIN:
421 rd = lchn->u.interdomain.remote_dom;
422 rport = lchn->u.interdomain.remote_port;
423 rchn = evtchn_from_port(rd, rport);
424 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
425 break;
426 case ECS_IPI:
427 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
428 break;
429 case ECS_UNBOUND:
430 /* silently drop the notification */
431 break;
432 default:
433 ret = -EINVAL;
434 }
436 spin_unlock(&ld->evtchn_lock);
438 return ret;
439 }
441 void send_guest_pirq(struct domain *d, int pirq)
442 {
443 int port = d->pirq_to_evtchn[pirq];
444 struct evtchn *chn = evtchn_from_port(d, port);
445 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
446 }
448 static long evtchn_status(evtchn_status_t *status)
449 {
450 struct domain *d;
451 domid_t dom = status->dom;
452 int port = status->port;
453 struct evtchn *chn;
454 long rc = 0;
456 if ( dom == DOMID_SELF )
457 dom = current->domain->domain_id;
458 else if ( !IS_PRIV(current->domain) )
459 return -EPERM;
461 if ( (d = find_domain_by_id(dom)) == NULL )
462 return -ESRCH;
464 spin_lock(&d->evtchn_lock);
466 if ( !port_is_valid(d, port) )
467 {
468 rc = -EINVAL;
469 goto out;
470 }
472 chn = evtchn_from_port(d, port);
473 switch ( chn->state )
474 {
475 case ECS_FREE:
476 case ECS_RESERVED:
477 status->status = EVTCHNSTAT_closed;
478 break;
479 case ECS_UNBOUND:
480 status->status = EVTCHNSTAT_unbound;
481 status->u.unbound.dom = chn->u.unbound.remote_domid;
482 break;
483 case ECS_INTERDOMAIN:
484 status->status = EVTCHNSTAT_interdomain;
485 status->u.interdomain.dom =
486 chn->u.interdomain.remote_dom->domain_id;
487 status->u.interdomain.port = chn->u.interdomain.remote_port;
488 break;
489 case ECS_PIRQ:
490 status->status = EVTCHNSTAT_pirq;
491 status->u.pirq = chn->u.pirq;
492 break;
493 case ECS_VIRQ:
494 status->status = EVTCHNSTAT_virq;
495 status->u.virq = chn->u.virq;
496 break;
497 case ECS_IPI:
498 status->status = EVTCHNSTAT_ipi;
499 break;
500 default:
501 BUG();
502 }
504 status->vcpu = chn->notify_vcpu_id;
506 out:
507 spin_unlock(&d->evtchn_lock);
508 put_domain(d);
509 return rc;
510 }
512 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
513 {
514 struct domain *d = current->domain;
515 struct evtchn *chn;
516 long rc = 0;
518 if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
519 return -ENOENT;
521 spin_lock(&d->evtchn_lock);
523 if ( !port_is_valid(d, port) )
524 {
525 rc = -EINVAL;
526 goto out;
527 }
529 chn = evtchn_from_port(d, port);
530 switch ( chn->state )
531 {
532 case ECS_UNBOUND:
533 case ECS_INTERDOMAIN:
534 case ECS_PIRQ:
535 chn->notify_vcpu_id = vcpu_id;
536 break;
537 default:
538 rc = -EINVAL;
539 break;
540 }
542 out:
543 spin_unlock(&d->evtchn_lock);
544 return rc;
545 }
547 static long evtchn_unmask(evtchn_unmask_t *unmask)
548 {
549 struct domain *d = current->domain;
550 shared_info_t *s = d->shared_info;
551 int port = unmask->port;
552 struct vcpu *v;
554 spin_lock(&d->evtchn_lock);
556 if ( unlikely(!port_is_valid(d, port)) )
557 {
558 spin_unlock(&d->evtchn_lock);
559 return -EINVAL;
560 }
562 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
564 /*
565 * These operations must happen in strict order. Based on
566 * include/xen/event.h:evtchn_set_pending().
567 */
568 if ( test_and_clear_bit(port, &s->evtchn_mask[0]) &&
569 test_bit (port, &s->evtchn_pending[0]) &&
570 !test_and_set_bit (port / BITS_PER_LONG,
571 &v->vcpu_info->evtchn_pending_sel) &&
572 !test_and_set_bit (0, &v->vcpu_info->evtchn_upcall_pending) )
573 {
574 evtchn_notify(v);
575 }
577 spin_unlock(&d->evtchn_lock);
579 return 0;
580 }
582 long do_event_channel_op(GUEST_HANDLE(evtchn_op_t) uop)
583 {
584 long rc;
585 struct evtchn_op op;
587 if ( copy_from_guest(&op, uop, 1) != 0 )
588 return -EFAULT;
590 if (acm_pre_event_channel(&op))
591 return -EACCES;
593 switch ( op.cmd )
594 {
595 case EVTCHNOP_alloc_unbound:
596 rc = evtchn_alloc_unbound(&op.u.alloc_unbound);
597 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
598 rc = -EFAULT; /* Cleaning up here would be a mess! */
599 break;
601 case EVTCHNOP_bind_interdomain:
602 rc = evtchn_bind_interdomain(&op.u.bind_interdomain);
603 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
604 rc = -EFAULT; /* Cleaning up here would be a mess! */
605 break;
607 case EVTCHNOP_bind_virq:
608 rc = evtchn_bind_virq(&op.u.bind_virq);
609 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
610 rc = -EFAULT; /* Cleaning up here would be a mess! */
611 break;
613 case EVTCHNOP_bind_ipi:
614 rc = evtchn_bind_ipi(&op.u.bind_ipi);
615 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
616 rc = -EFAULT; /* Cleaning up here would be a mess! */
617 break;
619 case EVTCHNOP_bind_pirq:
620 rc = evtchn_bind_pirq(&op.u.bind_pirq);
621 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
622 rc = -EFAULT; /* Cleaning up here would be a mess! */
623 break;
625 case EVTCHNOP_close:
626 rc = evtchn_close(&op.u.close);
627 break;
629 case EVTCHNOP_send:
630 rc = evtchn_send(op.u.send.port);
631 break;
633 case EVTCHNOP_status:
634 rc = evtchn_status(&op.u.status);
635 if ( (rc == 0) && (copy_to_guest(uop, &op, 1) != 0) )
636 rc = -EFAULT;
637 break;
639 case EVTCHNOP_bind_vcpu:
640 rc = evtchn_bind_vcpu(op.u.bind_vcpu.port, op.u.bind_vcpu.vcpu);
641 break;
643 case EVTCHNOP_unmask:
644 rc = evtchn_unmask(&op.u.unmask);
645 break;
647 default:
648 rc = -ENOSYS;
649 break;
650 }
652 return rc;
653 }
656 int evtchn_init(struct domain *d)
657 {
658 spin_lock_init(&d->evtchn_lock);
659 if ( get_free_port(d) != 0 )
660 return -EINVAL;
661 evtchn_from_port(d, 0)->state = ECS_RESERVED;
662 return 0;
663 }
666 void evtchn_destroy(struct domain *d)
667 {
668 int i;
670 for ( i = 0; port_is_valid(d, i); i++ )
671 (void)__evtchn_close(d, i);
673 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
674 xfree(d->evtchn[i]);
675 }
677 /*
678 * Local variables:
679 * mode: C
680 * c-set-style: "BSD"
681 * c-basic-offset: 4
682 * tab-width: 4
683 * indent-tabs-mode: nil
684 * End:
685 */