direct-io.hg

view xen/common/event_channel.c @ 2714:77fbe6c094f2

bitkeeper revision 1.1159.1.261 (41792d94c9Ck-m72kpZkL5_a2aSoZA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xeno.bk-26dom0
into freefall.cl.cam.ac.uk:/local/scratch/cl349/xeno.bk-24dom0
author cl349@freefall.cl.cam.ac.uk
date Fri Oct 22 15:56:04 2004 +0000 (2004-10-22)
parents ed0f5b1a41ba 06527e1d6757
children cf913b2c5774
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event notifications from VIRQs, PIRQs, and other domains.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xen/config.h>
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/errno.h>
22 #include <xen/sched.h>
23 #include <xen/event.h>
24 #include <xen/irq.h>
26 #include <hypervisor-ifs/hypervisor-if.h>
27 #include <hypervisor-ifs/event_channel.h>
29 #define INIT_EVENT_CHANNELS 16
30 #define MAX_EVENT_CHANNELS 1024
33 static int get_free_port(struct domain *d)
34 {
35 int max, port;
36 event_channel_t *chn;
38 max = d->max_event_channel;
39 chn = d->event_channel;
41 for ( port = 0; port < max; port++ )
42 if ( chn[port].state == ECS_FREE )
43 break;
45 if ( port == max )
46 {
47 if ( max == MAX_EVENT_CHANNELS )
48 return -ENOSPC;
50 max *= 2;
52 chn = xmalloc(max * sizeof(event_channel_t));
53 if ( unlikely(chn == NULL) )
54 return -ENOMEM;
56 memset(chn, 0, max * sizeof(event_channel_t));
58 if ( d->event_channel != NULL )
59 {
60 memcpy(chn, d->event_channel, (max/2) * sizeof(event_channel_t));
61 xfree(d->event_channel);
62 }
64 d->event_channel = chn;
65 d->max_event_channel = max;
66 }
68 return port;
69 }
72 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
73 {
74 struct domain *d = current;
75 int port;
77 spin_lock(&d->event_channel_lock);
79 if ( (port = get_free_port(d)) >= 0 )
80 {
81 d->event_channel[port].state = ECS_UNBOUND;
82 d->event_channel[port].u.unbound.remote_domid = alloc->dom;
83 }
85 spin_unlock(&d->event_channel_lock);
87 if ( port < 0 )
88 return port;
90 alloc->port = port;
91 return 0;
92 }
95 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
96 {
97 #define ERROR_EXIT(_errno) do { rc = (_errno); goto out; } while ( 0 )
98 struct domain *d1, *d2;
99 int port1 = bind->port1, port2 = bind->port2;
100 domid_t dom1 = bind->dom1, dom2 = bind->dom2;
101 long rc = 0;
103 if ( !IS_PRIV(current) && (dom1 != DOMID_SELF) )
104 return -EPERM;
106 if ( (port1 < 0) || (port2 < 0) )
107 return -EINVAL;
109 if ( dom1 == DOMID_SELF )
110 dom1 = current->id;
111 if ( dom2 == DOMID_SELF )
112 dom2 = current->id;
114 if ( ((d1 = find_domain_by_id(dom1)) == NULL) ||
115 ((d2 = find_domain_by_id(dom2)) == NULL) )
116 {
117 if ( d1 != NULL )
118 put_domain(d1);
119 return -ESRCH;
120 }
122 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
123 if ( d1 < d2 )
124 {
125 spin_lock(&d1->event_channel_lock);
126 spin_lock(&d2->event_channel_lock);
127 }
128 else
129 {
130 if ( d1 != d2 )
131 spin_lock(&d2->event_channel_lock);
132 spin_lock(&d1->event_channel_lock);
133 }
135 /* Obtain, or ensure that we already have, a valid <port1>. */
136 if ( port1 == 0 )
137 {
138 if ( (port1 = get_free_port(d1)) < 0 )
139 ERROR_EXIT(port1);
140 }
141 else if ( port1 >= d1->max_event_channel )
142 ERROR_EXIT(-EINVAL);
144 /* Obtain, or ensure that we already have, a valid <port2>. */
145 if ( port2 == 0 )
146 {
147 /* Make port1 non-free while we allocate port2 (in case dom1==dom2). */
148 u16 tmp = d1->event_channel[port1].state;
149 d1->event_channel[port1].state = ECS_INTERDOMAIN;
150 port2 = get_free_port(d2);
151 d1->event_channel[port1].state = tmp;
152 if ( port2 < 0 )
153 ERROR_EXIT(port2);
154 }
155 else if ( port2 >= d2->max_event_channel )
156 ERROR_EXIT(-EINVAL);
158 /* Validate <dom1,port1>'s current state. */
159 switch ( d1->event_channel[port1].state )
160 {
161 case ECS_FREE:
162 break;
164 case ECS_UNBOUND:
165 if ( d1->event_channel[port1].u.unbound.remote_domid != dom2 )
166 ERROR_EXIT(-EINVAL);
167 break;
169 case ECS_INTERDOMAIN:
170 rc = ((d1->event_channel[port1].u.interdomain.remote_dom != d2) ||
171 (d1->event_channel[port1].u.interdomain.remote_port != port2)) ?
172 -EINVAL : 0;
173 goto out;
175 default:
176 ERROR_EXIT(-EINVAL);
177 }
179 /* Validate <dom2,port2>'s current state. */
180 switch ( d2->event_channel[port2].state )
181 {
182 case ECS_FREE:
183 break;
185 case ECS_UNBOUND:
186 if ( d2->event_channel[port2].u.unbound.remote_domid != dom1 )
187 ERROR_EXIT(-EINVAL);
188 break;
190 default:
191 ERROR_EXIT(-EINVAL);
192 }
194 /*
195 * Everything checked out okay -- bind <dom1,port1> to <dom2,port2>.
196 */
198 d1->event_channel[port1].u.interdomain.remote_dom = d2;
199 d1->event_channel[port1].u.interdomain.remote_port = (u16)port2;
200 d1->event_channel[port1].state = ECS_INTERDOMAIN;
202 d2->event_channel[port2].u.interdomain.remote_dom = d1;
203 d2->event_channel[port2].u.interdomain.remote_port = (u16)port1;
204 d2->event_channel[port2].state = ECS_INTERDOMAIN;
206 out:
207 spin_unlock(&d1->event_channel_lock);
208 if ( d1 != d2 )
209 spin_unlock(&d2->event_channel_lock);
211 put_domain(d1);
212 put_domain(d2);
214 bind->port1 = port1;
215 bind->port2 = port2;
217 return rc;
218 #undef ERROR_EXIT
219 }
222 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
223 {
224 struct domain *d = current;
225 int port, virq = bind->virq;
227 if ( virq >= ARRAY_SIZE(d->virq_to_evtchn) )
228 return -EINVAL;
230 spin_lock(&d->event_channel_lock);
232 /*
233 * Port 0 is the fallback port for VIRQs that haven't been explicitly
234 * bound yet. The exception is the 'misdirect VIRQ', which is permanently
235 * bound to port 0.
236 */
237 if ( ((port = d->virq_to_evtchn[virq]) != 0) ||
238 (virq == VIRQ_MISDIRECT) ||
239 ((port = get_free_port(d)) < 0) )
240 goto out;
242 d->event_channel[port].state = ECS_VIRQ;
243 d->event_channel[port].u.virq = virq;
245 d->virq_to_evtchn[virq] = port;
247 out:
248 spin_unlock(&d->event_channel_lock);
250 if ( port < 0 )
251 return port;
253 bind->port = port;
254 return 0;
255 }
258 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
259 {
260 struct domain *d = current;
261 int port, rc, pirq = bind->pirq;
263 if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
264 return -EINVAL;
266 spin_lock(&d->event_channel_lock);
268 if ( ((rc = port = d->pirq_to_evtchn[pirq]) != 0) ||
269 ((rc = port = get_free_port(d)) < 0) )
270 goto out;
272 d->pirq_to_evtchn[pirq] = port;
273 rc = pirq_guest_bind(d, pirq,
274 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
275 if ( rc != 0 )
276 {
277 d->pirq_to_evtchn[pirq] = 0;
278 goto out;
279 }
281 d->event_channel[port].state = ECS_PIRQ;
282 d->event_channel[port].u.pirq = pirq;
284 out:
285 spin_unlock(&d->event_channel_lock);
287 if ( rc < 0 )
288 return rc;
290 bind->port = port;
291 return 0;
292 }
295 static long __evtchn_close(struct domain *d1, int port1)
296 {
297 struct domain *d2 = NULL;
298 event_channel_t *chn1, *chn2;
299 int port2;
300 long rc = 0;
302 again:
303 spin_lock(&d1->event_channel_lock);
305 chn1 = d1->event_channel;
307 /* NB. Port 0 is special (VIRQ_MISDIRECT). Never let it be closed. */
308 if ( (port1 <= 0) || (port1 >= d1->max_event_channel) )
309 {
310 rc = -EINVAL;
311 goto out;
312 }
314 switch ( chn1[port1].state )
315 {
316 case ECS_FREE:
317 rc = -EINVAL;
318 goto out;
320 case ECS_UNBOUND:
321 break;
323 case ECS_PIRQ:
324 if ( (rc = pirq_guest_unbind(d1, chn1[port1].u.pirq)) == 0 )
325 d1->pirq_to_evtchn[chn1[port1].u.pirq] = 0;
326 break;
328 case ECS_VIRQ:
329 d1->virq_to_evtchn[chn1[port1].u.virq] = 0;
330 break;
332 case ECS_INTERDOMAIN:
333 if ( d2 == NULL )
334 {
335 d2 = chn1[port1].u.interdomain.remote_dom;
337 /* If we unlock d1 then we could lose d2. Must get a reference. */
338 if ( unlikely(!get_domain(d2)) )
339 {
340 /*
341 * Failed to obtain a reference. No matter: d2 must be dying
342 * and so will close this event channel for us.
343 */
344 d2 = NULL;
345 goto out;
346 }
348 if ( d1 < d2 )
349 {
350 spin_lock(&d2->event_channel_lock);
351 }
352 else if ( d1 != d2 )
353 {
354 spin_unlock(&d1->event_channel_lock);
355 spin_lock(&d2->event_channel_lock);
356 goto again;
357 }
358 }
359 else if ( d2 != chn1[port1].u.interdomain.remote_dom )
360 {
361 rc = -EINVAL;
362 goto out;
363 }
365 chn2 = d2->event_channel;
366 port2 = chn1[port1].u.interdomain.remote_port;
368 if ( port2 >= d2->max_event_channel )
369 BUG();
370 if ( chn2[port2].state != ECS_INTERDOMAIN )
371 BUG();
372 if ( chn2[port2].u.interdomain.remote_dom != d1 )
373 BUG();
375 chn2[port2].state = ECS_UNBOUND;
376 chn2[port2].u.unbound.remote_domid = d1->id;
377 break;
379 default:
380 BUG();
381 }
383 chn1[port1].state = ECS_FREE;
385 out:
386 if ( d2 != NULL )
387 {
388 if ( d1 != d2 )
389 spin_unlock(&d2->event_channel_lock);
390 put_domain(d2);
391 }
393 spin_unlock(&d1->event_channel_lock);
395 return rc;
396 }
399 static long evtchn_close(evtchn_close_t *close)
400 {
401 struct domain *d;
402 long rc;
403 domid_t dom = close->dom;
405 if ( dom == DOMID_SELF )
406 dom = current->id;
407 else if ( !IS_PRIV(current) )
408 return -EPERM;
410 if ( (d = find_domain_by_id(dom)) == NULL )
411 return -ESRCH;
413 rc = __evtchn_close(d, close->port);
415 put_domain(d);
416 return rc;
417 }
420 static long evtchn_send(int lport)
421 {
422 struct domain *ld = current, *rd;
423 int rport;
425 spin_lock(&ld->event_channel_lock);
427 if ( unlikely(lport < 0) ||
428 unlikely(lport >= ld->max_event_channel) ||
429 unlikely(ld->event_channel[lport].state != ECS_INTERDOMAIN) )
430 {
431 spin_unlock(&ld->event_channel_lock);
432 return -EINVAL;
433 }
435 rd = ld->event_channel[lport].u.interdomain.remote_dom;
436 rport = ld->event_channel[lport].u.interdomain.remote_port;
438 evtchn_set_pending(rd, rport);
440 spin_unlock(&ld->event_channel_lock);
442 return 0;
443 }
446 static long evtchn_status(evtchn_status_t *status)
447 {
448 struct domain *d;
449 domid_t dom = status->dom;
450 int port = status->port;
451 event_channel_t *chn;
452 long rc = 0;
454 if ( dom == DOMID_SELF )
455 dom = current->id;
456 else if ( !IS_PRIV(current) )
457 return -EPERM;
459 if ( (d = find_domain_by_id(dom)) == NULL )
460 return -ESRCH;
462 spin_lock(&d->event_channel_lock);
464 chn = d->event_channel;
466 if ( (port < 0) || (port >= d->max_event_channel) )
467 {
468 rc = -EINVAL;
469 goto out;
470 }
472 switch ( chn[port].state )
473 {
474 case ECS_FREE:
475 status->status = EVTCHNSTAT_closed;
476 break;
477 case ECS_UNBOUND:
478 status->status = EVTCHNSTAT_unbound;
479 status->u.unbound.dom = chn[port].u.unbound.remote_domid;
480 break;
481 case ECS_INTERDOMAIN:
482 status->status = EVTCHNSTAT_interdomain;
483 status->u.interdomain.dom = chn[port].u.interdomain.remote_dom->id;
484 status->u.interdomain.port = chn[port].u.interdomain.remote_port;
485 break;
486 case ECS_PIRQ:
487 status->status = EVTCHNSTAT_pirq;
488 status->u.pirq = chn[port].u.pirq;
489 break;
490 case ECS_VIRQ:
491 status->status = EVTCHNSTAT_virq;
492 status->u.virq = chn[port].u.virq;
493 break;
494 default:
495 BUG();
496 }
498 out:
499 spin_unlock(&d->event_channel_lock);
500 put_domain(d);
501 return rc;
502 }
505 long do_event_channel_op(evtchn_op_t *uop)
506 {
507 long rc;
508 evtchn_op_t op;
510 if ( copy_from_user(&op, uop, sizeof(op)) != 0 )
511 return -EFAULT;
513 switch ( op.cmd )
514 {
515 case EVTCHNOP_alloc_unbound:
516 rc = evtchn_alloc_unbound(&op.u.alloc_unbound);
517 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
518 rc = -EFAULT; /* Cleaning up here would be a mess! */
519 break;
521 case EVTCHNOP_bind_interdomain:
522 rc = evtchn_bind_interdomain(&op.u.bind_interdomain);
523 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
524 rc = -EFAULT; /* Cleaning up here would be a mess! */
525 break;
527 case EVTCHNOP_bind_virq:
528 rc = evtchn_bind_virq(&op.u.bind_virq);
529 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
530 rc = -EFAULT; /* Cleaning up here would be a mess! */
531 break;
533 case EVTCHNOP_bind_pirq:
534 rc = evtchn_bind_pirq(&op.u.bind_pirq);
535 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
536 rc = -EFAULT; /* Cleaning up here would be a mess! */
537 break;
539 case EVTCHNOP_close:
540 rc = evtchn_close(&op.u.close);
541 break;
543 case EVTCHNOP_send:
544 rc = evtchn_send(op.u.send.local_port);
545 break;
547 case EVTCHNOP_status:
548 rc = evtchn_status(&op.u.status);
549 if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
550 rc = -EFAULT;
551 break;
553 default:
554 rc = -ENOSYS;
555 break;
556 }
558 return rc;
559 }
562 int init_event_channels(struct domain *d)
563 {
564 spin_lock_init(&d->event_channel_lock);
565 d->event_channel = xmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t));
566 if ( unlikely(d->event_channel == NULL) )
567 return -ENOMEM;
568 d->max_event_channel = INIT_EVENT_CHANNELS;
569 memset(d->event_channel, 0, INIT_EVENT_CHANNELS * sizeof(event_channel_t));
570 d->event_channel[0].state = ECS_VIRQ;
571 d->event_channel[0].u.virq = VIRQ_MISDIRECT;
572 return 0;
573 }
576 void destroy_event_channels(struct domain *d)
577 {
578 int i;
579 if ( d->event_channel != NULL )
580 {
581 for ( i = 0; i < d->max_event_channel; i++ )
582 (void)__evtchn_close(d, i);
583 xfree(d->event_channel);
584 }
585 }