ia64/xen-unstable

view xen/common/event_channel.c @ 8609:85d693e6f61a

Arch-specific per-vcpu info should be initialised to zero
when allocating a new vcpu structure, not copied from
CPU0's idle VCPU. Especially now that the idle VCPU itself
is dynamically allocated.

This should fix assertions people have been seeing in
getdomain_info_ctxt() relation to IOPL in eflags.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Jan 14 21:26:40 2006 +0100 (2006-01-14)
parents d966b7a00959
children 98c8afe4c433
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2005, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <asm/current.h>
28 #include <public/xen.h>
29 #include <public/event_channel.h>
30 #include <acm/acm_hooks.h>
32 #define bucket_from_port(d,p) \
33 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
34 #define port_is_valid(d,p) \
35 (((p) >= 0) && ((p) < MAX_EVTCHNS) && \
36 (bucket_from_port(d,p) != NULL))
37 #define evtchn_from_port(d,p) \
38 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
40 #define ERROR_EXIT(_errno) \
41 do { \
42 DPRINTK("EVTCHNOP failure: domain %d, error %d, line %d\n", \
43 current->domain->domain_id, (_errno), __LINE__); \
44 rc = (_errno); \
45 goto out; \
46 } while ( 0 )
48 static int get_free_port(struct domain *d)
49 {
50 struct evtchn *chn;
51 int port;
53 for ( port = 0; port_is_valid(d, port); port++ )
54 if ( evtchn_from_port(d, port)->state == ECS_FREE )
55 return port;
57 if ( port == MAX_EVTCHNS )
58 return -ENOSPC;
60 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
61 if ( unlikely(chn == NULL) )
62 return -ENOMEM;
63 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
64 bucket_from_port(d, port) = chn;
66 return port;
67 }
70 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
71 {
72 struct evtchn *chn;
73 struct domain *d;
74 int port;
75 domid_t dom = alloc->dom;
76 long rc = 0;
78 if ( dom == DOMID_SELF )
79 dom = current->domain->domain_id;
80 else if ( !IS_PRIV(current->domain) )
81 return -EPERM;
83 if ( (d = find_domain_by_id(dom)) == NULL )
84 return -ESRCH;
86 spin_lock(&d->evtchn_lock);
88 if ( (port = get_free_port(d)) < 0 )
89 ERROR_EXIT(port);
90 chn = evtchn_from_port(d, port);
92 chn->state = ECS_UNBOUND;
93 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
94 chn->u.unbound.remote_domid = current->domain->domain_id;
96 alloc->port = port;
98 out:
99 spin_unlock(&d->evtchn_lock);
101 put_domain(d);
103 return rc;
104 }
107 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
108 {
109 struct evtchn *lchn, *rchn;
110 struct domain *ld = current->domain, *rd;
111 int lport, rport = bind->remote_port;
112 domid_t rdom = bind->remote_dom;
113 long rc = 0;
115 if ( rdom == DOMID_SELF )
116 rdom = current->domain->domain_id;
118 if ( (rd = find_domain_by_id(rdom)) == NULL )
119 return -ESRCH;
121 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
122 if ( ld < rd )
123 {
124 spin_lock(&ld->evtchn_lock);
125 spin_lock(&rd->evtchn_lock);
126 }
127 else
128 {
129 if ( ld != rd )
130 spin_lock(&rd->evtchn_lock);
131 spin_lock(&ld->evtchn_lock);
132 }
134 if ( (lport = get_free_port(ld)) < 0 )
135 ERROR_EXIT(lport);
136 lchn = evtchn_from_port(ld, lport);
138 if ( !port_is_valid(rd, rport) )
139 ERROR_EXIT(-EINVAL);
140 rchn = evtchn_from_port(rd, rport);
141 if ( (rchn->state != ECS_UNBOUND) ||
142 (rchn->u.unbound.remote_domid != ld->domain_id) )
143 ERROR_EXIT(-EINVAL);
145 lchn->u.interdomain.remote_dom = rd;
146 lchn->u.interdomain.remote_port = (u16)rport;
147 lchn->state = ECS_INTERDOMAIN;
149 rchn->u.interdomain.remote_dom = ld;
150 rchn->u.interdomain.remote_port = (u16)lport;
151 rchn->state = ECS_INTERDOMAIN;
153 /*
154 * We may have lost notifications on the remote unbound port. Fix that up
155 * here by conservatively always setting a notification on the local port.
156 */
157 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
159 bind->local_port = lport;
161 out:
162 spin_unlock(&ld->evtchn_lock);
163 if ( ld != rd )
164 spin_unlock(&rd->evtchn_lock);
166 put_domain(rd);
168 return rc;
169 }
172 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
173 {
174 struct evtchn *chn;
175 struct vcpu *v;
176 struct domain *d = current->domain;
177 int port, virq = bind->virq, vcpu = bind->vcpu;
178 long rc = 0;
180 if ( virq >= ARRAY_SIZE(v->virq_to_evtchn) )
181 return -EINVAL;
183 if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || ((v = d->vcpu[vcpu]) == NULL) )
184 return -ENOENT;
186 spin_lock(&d->evtchn_lock);
188 if ( v->virq_to_evtchn[virq] != 0 )
189 ERROR_EXIT(-EEXIST);
191 if ( (port = get_free_port(d)) < 0 )
192 ERROR_EXIT(port);
194 chn = evtchn_from_port(d, port);
195 chn->state = ECS_VIRQ;
196 chn->notify_vcpu_id = vcpu;
197 chn->u.virq = virq;
199 v->virq_to_evtchn[virq] = bind->port = port;
201 out:
202 spin_unlock(&d->evtchn_lock);
204 return rc;
205 }
208 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
209 {
210 struct evtchn *chn;
211 struct domain *d = current->domain;
212 int port, vcpu = bind->vcpu;
213 long rc = 0;
215 if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) )
216 return -ENOENT;
218 spin_lock(&d->evtchn_lock);
220 if ( (port = get_free_port(d)) < 0 )
221 ERROR_EXIT(port);
223 chn = evtchn_from_port(d, port);
224 chn->state = ECS_IPI;
225 chn->notify_vcpu_id = vcpu;
227 bind->port = port;
229 out:
230 spin_unlock(&d->evtchn_lock);
232 return rc;
233 }
236 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
237 {
238 struct evtchn *chn;
239 struct domain *d = current->domain;
240 int port, pirq = bind->pirq;
241 long rc;
243 if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
244 return -EINVAL;
246 if ( !irq_access_permitted(d, pirq) )
247 return -EPERM;
249 spin_lock(&d->evtchn_lock);
251 if ( d->pirq_to_evtchn[pirq] != 0 )
252 ERROR_EXIT(-EEXIST);
254 if ( (port = get_free_port(d)) < 0 )
255 ERROR_EXIT(port);
257 chn = evtchn_from_port(d, port);
259 d->pirq_to_evtchn[pirq] = port;
260 rc = pirq_guest_bind(d->vcpu[0], pirq,
261 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
262 if ( rc != 0 )
263 {
264 d->pirq_to_evtchn[pirq] = 0;
265 goto out;
266 }
268 chn->state = ECS_PIRQ;
269 chn->u.pirq = pirq;
271 bind->port = port;
273 out:
274 spin_unlock(&d->evtchn_lock);
276 return rc;
277 }
280 static long __evtchn_close(struct domain *d1, int port1)
281 {
282 struct domain *d2 = NULL;
283 struct vcpu *v;
284 struct evtchn *chn1, *chn2;
285 int port2;
286 long rc = 0;
288 again:
289 spin_lock(&d1->evtchn_lock);
291 if ( !port_is_valid(d1, port1) )
292 {
293 rc = -EINVAL;
294 goto out;
295 }
297 chn1 = evtchn_from_port(d1, port1);
298 switch ( chn1->state )
299 {
300 case ECS_FREE:
301 case ECS_RESERVED:
302 rc = -EINVAL;
303 goto out;
305 case ECS_UNBOUND:
306 break;
308 case ECS_PIRQ:
309 if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
310 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
311 break;
313 case ECS_VIRQ:
314 for_each_vcpu ( d1, v )
315 if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
316 v->virq_to_evtchn[chn1->u.virq] = 0;
317 break;
319 case ECS_IPI:
320 break;
322 case ECS_INTERDOMAIN:
323 if ( d2 == NULL )
324 {
325 d2 = chn1->u.interdomain.remote_dom;
327 /* If we unlock d1 then we could lose d2. Must get a reference. */
328 if ( unlikely(!get_domain(d2)) )
329 {
330 /*
331 * Failed to obtain a reference. No matter: d2 must be dying
332 * and so will close this event channel for us.
333 */
334 d2 = NULL;
335 goto out;
336 }
338 if ( d1 < d2 )
339 {
340 spin_lock(&d2->evtchn_lock);
341 }
342 else if ( d1 != d2 )
343 {
344 spin_unlock(&d1->evtchn_lock);
345 spin_lock(&d2->evtchn_lock);
346 goto again;
347 }
348 }
349 else if ( d2 != chn1->u.interdomain.remote_dom )
350 {
351 /*
352 * We can only get here if the port was closed and re-bound after
353 * unlocking d1 but before locking d2 above. We could retry but
354 * it is easier to return the same error as if we had seen the
355 * port in ECS_CLOSED. It must have passed through that state for
356 * us to end up here, so it's a valid error to return.
357 */
358 BUG_ON(d1 != current->domain);
359 rc = -EINVAL;
360 goto out;
361 }
363 port2 = chn1->u.interdomain.remote_port;
364 BUG_ON(!port_is_valid(d2, port2));
366 chn2 = evtchn_from_port(d2, port2);
367 BUG_ON(chn2->state != ECS_INTERDOMAIN);
368 BUG_ON(chn2->u.interdomain.remote_dom != d1);
370 chn2->state = ECS_UNBOUND;
371 chn2->u.unbound.remote_domid = d1->domain_id;
372 break;
374 default:
375 BUG();
376 }
378 /* Reset binding to vcpu0 when the channel is freed. */
379 chn1->state = ECS_FREE;
380 chn1->notify_vcpu_id = 0;
382 out:
383 if ( d2 != NULL )
384 {
385 if ( d1 != d2 )
386 spin_unlock(&d2->evtchn_lock);
387 put_domain(d2);
388 }
390 spin_unlock(&d1->evtchn_lock);
392 return rc;
393 }
396 static long evtchn_close(evtchn_close_t *close)
397 {
398 return __evtchn_close(current->domain, close->port);
399 }
402 long evtchn_send(int lport)
403 {
404 struct evtchn *lchn, *rchn;
405 struct domain *ld = current->domain, *rd;
406 int rport, ret = 0;
408 spin_lock(&ld->evtchn_lock);
410 if ( unlikely(!port_is_valid(ld, lport)) )
411 {
412 spin_unlock(&ld->evtchn_lock);
413 return -EINVAL;
414 }
416 lchn = evtchn_from_port(ld, lport);
417 switch ( lchn->state )
418 {
419 case ECS_INTERDOMAIN:
420 rd = lchn->u.interdomain.remote_dom;
421 rport = lchn->u.interdomain.remote_port;
422 rchn = evtchn_from_port(rd, rport);
423 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
424 break;
425 case ECS_IPI:
426 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
427 break;
428 case ECS_UNBOUND:
429 /* silently drop the notification */
430 break;
431 default:
432 ret = -EINVAL;
433 }
435 spin_unlock(&ld->evtchn_lock);
437 return ret;
438 }
440 void send_guest_pirq(struct domain *d, int pirq)
441 {
442 int port = d->pirq_to_evtchn[pirq];
443 struct evtchn *chn = evtchn_from_port(d, port);
444 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
445 }
447 static long evtchn_status(evtchn_status_t *status)
448 {
449 struct domain *d;
450 domid_t dom = status->dom;
451 int port = status->port;
452 struct evtchn *chn;
453 long rc = 0;
455 if ( dom == DOMID_SELF )
456 dom = current->domain->domain_id;
457 else if ( !IS_PRIV(current->domain) )
458 return -EPERM;
460 if ( (d = find_domain_by_id(dom)) == NULL )
461 return -ESRCH;
463 spin_lock(&d->evtchn_lock);
465 if ( !port_is_valid(d, port) )
466 {
467 rc = -EINVAL;
468 goto out;
469 }
471 chn = evtchn_from_port(d, port);
472 switch ( chn->state )
473 {
474 case ECS_FREE:
475 case ECS_RESERVED:
476 status->status = EVTCHNSTAT_closed;
477 break;
478 case ECS_UNBOUND:
479 status->status = EVTCHNSTAT_unbound;
480 status->u.unbound.dom = chn->u.unbound.remote_domid;
481 break;
482 case ECS_INTERDOMAIN:
483 status->status = EVTCHNSTAT_interdomain;
484 status->u.interdomain.dom =
485 chn->u.interdomain.remote_dom->domain_id;
486 status->u.interdomain.port = chn->u.interdomain.remote_port;
487 break;
488 case ECS_PIRQ:
489 status->status = EVTCHNSTAT_pirq;
490 status->u.pirq = chn->u.pirq;
491 break;
492 case ECS_VIRQ:
493 status->status = EVTCHNSTAT_virq;
494 status->u.virq = chn->u.virq;
495 break;
496 case ECS_IPI:
497 status->status = EVTCHNSTAT_ipi;
498 break;
499 default:
500 BUG();
501 }
503 status->vcpu = chn->notify_vcpu_id;
505 out:
506 spin_unlock(&d->evtchn_lock);
507 put_domain(d);
508 return rc;
509 }
511 static long evtchn_bind_vcpu(evtchn_bind_vcpu_t *bind)
512 {
513 struct domain *d = current->domain;
514 int port = bind->port;
515 int vcpu = bind->vcpu;
516 struct evtchn *chn;
517 long rc = 0;
519 if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) )
520 return -ENOENT;
522 spin_lock(&d->evtchn_lock);
524 if ( !port_is_valid(d, port) )
525 {
526 rc = -EINVAL;
527 goto out;
528 }
530 chn = evtchn_from_port(d, port);
531 switch ( chn->state )
532 {
533 case ECS_UNBOUND:
534 case ECS_INTERDOMAIN:
535 case ECS_PIRQ:
536 chn->notify_vcpu_id = vcpu;
537 break;
538 default:
539 rc = -EINVAL;
540 break;
541 }
543 out:
544 spin_unlock(&d->evtchn_lock);
545 return rc;
546 }
548 static long evtchn_unmask(evtchn_unmask_t *unmask)
549 {
550 struct domain *d = current->domain;
551 shared_info_t *s = d->shared_info;
552 int port = unmask->port;
553 struct vcpu *v;
555 spin_lock(&d->evtchn_lock);
557 if ( unlikely(!port_is_valid(d, port)) )
558 {
559 spin_unlock(&d->evtchn_lock);
560 return -EINVAL;
561 }
563 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
565 /*
566 * These operations must happen in strict order. Based on
567 * include/xen/event.h:evtchn_set_pending().
568 */
569 if ( test_and_clear_bit(port, &s->evtchn_mask[0]) &&
570 test_bit (port, &s->evtchn_pending[0]) &&
571 !test_and_set_bit (port / BITS_PER_LONG,
572 &v->vcpu_info->evtchn_pending_sel) &&
573 !test_and_set_bit (0, &v->vcpu_info->evtchn_upcall_pending) )
574 {
575 evtchn_notify(v);
576 }
578 spin_unlock(&d->evtchn_lock);
580 return 0;
581 }
583 long do_event_channel_op(evtchn_op_t *uop)
584 {
585 long rc;
586 evtchn_op_t op;
588 if ( copy_from_user(&op, uop, sizeof(op)) != 0 )
589 return -EFAULT;
591 if (acm_pre_event_channel(&op))
592 return -EACCES;
594 switch ( op.cmd )
595 {
596 case EVTCHNOP_alloc_unbound:
597 rc = evtchn_alloc_unbound(&op.u.alloc_unbound);
598 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
599 rc = -EFAULT; /* Cleaning up here would be a mess! */
600 break;
602 case EVTCHNOP_bind_interdomain:
603 rc = evtchn_bind_interdomain(&op.u.bind_interdomain);
604 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
605 rc = -EFAULT; /* Cleaning up here would be a mess! */
606 break;
608 case EVTCHNOP_bind_virq:
609 rc = evtchn_bind_virq(&op.u.bind_virq);
610 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
611 rc = -EFAULT; /* Cleaning up here would be a mess! */
612 break;
614 case EVTCHNOP_bind_ipi:
615 rc = evtchn_bind_ipi(&op.u.bind_ipi);
616 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
617 rc = -EFAULT; /* Cleaning up here would be a mess! */
618 break;
620 case EVTCHNOP_bind_pirq:
621 rc = evtchn_bind_pirq(&op.u.bind_pirq);
622 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
623 rc = -EFAULT; /* Cleaning up here would be a mess! */
624 break;
626 case EVTCHNOP_close:
627 rc = evtchn_close(&op.u.close);
628 break;
630 case EVTCHNOP_send:
631 rc = evtchn_send(op.u.send.port);
632 break;
634 case EVTCHNOP_status:
635 rc = evtchn_status(&op.u.status);
636 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
637 rc = -EFAULT;
638 break;
640 case EVTCHNOP_bind_vcpu:
641 rc = evtchn_bind_vcpu(&op.u.bind_vcpu);
642 break;
644 case EVTCHNOP_unmask:
645 rc = evtchn_unmask(&op.u.unmask);
646 break;
648 default:
649 rc = -ENOSYS;
650 break;
651 }
653 return rc;
654 }
657 int evtchn_init(struct domain *d)
658 {
659 spin_lock_init(&d->evtchn_lock);
660 if ( get_free_port(d) != 0 )
661 return -EINVAL;
662 evtchn_from_port(d, 0)->state = ECS_RESERVED;
663 return 0;
664 }
667 void evtchn_destroy(struct domain *d)
668 {
669 int i;
671 for ( i = 0; port_is_valid(d, i); i++ )
672 (void)__evtchn_close(d, i);
674 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
675 xfree(d->evtchn[i]);
676 }
678 /*
679 * Local variables:
680 * mode: C
681 * c-set-style: "BSD"
682 * c-basic-offset: 4
683 * tab-width: 4
684 * indent-tabs-mode: nil
685 * End:
686 */