ia64/xen-unstable

annotate xen/common/event_channel.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 2f9e1348aa98
children
rev   line source
kaf24@954 1 /******************************************************************************
kaf24@954 2 * event_channel.c
kaf24@954 3 *
kaf24@1509 4 * Event notifications from VIRQs, PIRQs, and other domains.
kaf24@954 5 *
kaf24@9597 6 * Copyright (c) 2003-2006, K A Fraser.
kaf24@954 7 *
kaf24@954 8 * This program is distributed in the hope that it will be useful,
kaf24@954 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
kaf24@954 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
kaf24@954 11 * GNU General Public License for more details.
kaf24@954 12 *
kaf24@954 13 * You should have received a copy of the GNU General Public License
kaf24@954 14 * along with this program; if not, write to the Free Software
kaf24@954 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
kaf24@954 16 */
kaf24@954 17
kaf24@1210 18 #include <xen/config.h>
kaf24@1210 19 #include <xen/init.h>
kaf24@1210 20 #include <xen/lib.h>
kaf24@1210 21 #include <xen/errno.h>
kaf24@1210 22 #include <xen/sched.h>
kaf24@1210 23 #include <xen/event.h>
kaf24@1239 24 #include <xen/irq.h>
kaf24@8468 25 #include <xen/iocap.h>
ack@13292 26 #include <xen/compat.h>
kaf24@9183 27 #include <xen/guest_access.h>
keir@18531 28 #include <xen/keyhandler.h>
cl349@5291 29 #include <asm/current.h>
kaf24@954 30
kaf24@2789 31 #include <public/xen.h>
kaf24@2789 32 #include <public/event_channel.h>
kfraser@15815 33 #include <xsm/xsm.h>
kaf24@1127 34
kaf24@5308 35 #define bucket_from_port(d,p) \
kaf24@5308 36 ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
kaf24@5308 37 #define port_is_valid(d,p) \
ack@13292 38 (((p) >= 0) && ((p) < MAX_EVTCHNS(d)) && \
kaf24@5308 39 (bucket_from_port(d,p) != NULL))
kaf24@5308 40 #define evtchn_from_port(d,p) \
kaf24@5308 41 (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
kaf24@1127 42
kaf24@7245 43 #define ERROR_EXIT(_errno) \
kaf24@7245 44 do { \
kaf24@12038 45 gdprintk(XENLOG_WARNING, \
keir@16574 46 "EVTCHNOP failure: error %d\n", \
keir@16574 47 (_errno)); \
keir@16574 48 rc = (_errno); \
keir@16574 49 goto out; \
keir@16574 50 } while ( 0 )
keir@16574 51 #define ERROR_EXIT_DOM(_errno, _dom) \
keir@16574 52 do { \
keir@16574 53 gdprintk(XENLOG_WARNING, \
keir@16574 54 "EVTCHNOP failure: domain %d, error %d\n", \
keir@16574 55 (_dom)->domain_id, (_errno)); \
kaf24@7245 56 rc = (_errno); \
kaf24@7245 57 goto out; \
kaf24@7245 58 } while ( 0 )
kaf24@5325 59
keir@17969 60 static int evtchn_set_pending(struct vcpu *v, int port);
keir@17969 61
kaf24@9597 62 static int virq_is_global(int virq)
kaf24@9597 63 {
kaf24@9597 64 int rc;
kaf24@9597 65
kaf24@9597 66 ASSERT((virq >= 0) && (virq < NR_VIRQS));
kaf24@9597 67
kaf24@9597 68 switch ( virq )
kaf24@9597 69 {
kaf24@9597 70 case VIRQ_TIMER:
kaf24@9597 71 case VIRQ_DEBUG:
kaf24@9629 72 case VIRQ_XENOPROF:
kaf24@9597 73 rc = 0;
kaf24@9597 74 break;
kaf24@10099 75 case VIRQ_ARCH_0 ... VIRQ_ARCH_7:
kaf24@10099 76 rc = arch_virq_is_global(virq);
kaf24@10099 77 break;
kaf24@9597 78 default:
kaf24@9597 79 rc = 1;
kaf24@9597 80 break;
kaf24@9597 81 }
kaf24@9597 82
kaf24@9597 83 return rc;
kaf24@9597 84 }
kaf24@9597 85
kaf24@9597 86
kaf24@5308 87 static int get_free_port(struct domain *d)
kaf24@5308 88 {
kaf24@5308 89 struct evtchn *chn;
kaf24@5308 90 int port;
kfraser@15815 91 int i, j;
cl349@3114 92
kfraser@15501 93 if ( d->is_dying )
kfraser@15501 94 return -EINVAL;
kfraser@15501 95
kaf24@5308 96 for ( port = 0; port_is_valid(d, port); port++ )
kaf24@5308 97 if ( evtchn_from_port(d, port)->state == ECS_FREE )
kaf24@5308 98 return port;
kaf24@1127 99
ack@13292 100 if ( port == MAX_EVTCHNS(d) )
kaf24@5308 101 return -ENOSPC;
kaf24@1127 102
kaf24@5308 103 chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
kaf24@5308 104 if ( unlikely(chn == NULL) )
kaf24@5308 105 return -ENOMEM;
kaf24@5308 106 memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
kaf24@5308 107 bucket_from_port(d, port) = chn;
kaf24@1127 108
kfraser@15815 109 for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ )
kfraser@15815 110 {
kfraser@15815 111 if ( xsm_alloc_security_evtchn(&chn[i]) )
kfraser@15815 112 {
kfraser@15815 113 for ( j = 0; j < i; j++ )
kfraser@15815 114 xsm_free_security_evtchn(&chn[j]);
kfraser@15815 115 xfree(chn);
kfraser@15815 116 return -ENOMEM;
kfraser@15815 117 }
kfraser@15815 118 }
kfraser@15815 119
kaf24@1127 120 return port;
kaf24@1127 121 }
kaf24@1127 122
kaf24@2713 123
kaf24@2713 124 static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
kaf24@2713 125 {
kaf24@5308 126 struct evtchn *chn;
kaf24@7232 127 struct domain *d;
kaf24@7250 128 int port;
kaf24@7232 129 domid_t dom = alloc->dom;
kaf24@9896 130 long rc;
kaf24@9896 131
keir@18574 132 rc = rcu_lock_target_domain_by_id(dom, &d);
keir@18574 133 if ( rc )
keir@18574 134 return rc;
kaf24@7232 135
keir@18602 136 spin_lock(&d->event_lock);
kaf24@2713 137
kaf24@7250 138 if ( (port = get_free_port(d)) < 0 )
keir@16574 139 ERROR_EXIT_DOM(port, d);
kaf24@5325 140 chn = evtchn_from_port(d, port);
kaf24@5325 141
kfraser@15815 142 rc = xsm_evtchn_unbound(d, chn, alloc->remote_dom);
kfraser@15815 143 if ( rc )
kfraser@15815 144 goto out;
kfraser@15815 145
kaf24@7250 146 chn->state = ECS_UNBOUND;
kaf24@7421 147 if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
kaf24@7421 148 chn->u.unbound.remote_domid = current->domain->domain_id;
kaf24@5325 149
kaf24@7250 150 alloc->port = port;
kaf24@2713 151
kaf24@5325 152 out:
keir@18602 153 spin_unlock(&d->event_lock);
kfraser@14192 154 rcu_unlock_domain(d);
kaf24@7232 155
kaf24@5325 156 return rc;
kaf24@2713 157 }
kaf24@2713 158
kaf24@2713 159
kaf24@1218 160 static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
kaf24@1127 161 {
kaf24@7250 162 struct evtchn *lchn, *rchn;
kaf24@7250 163 struct domain *ld = current->domain, *rd;
kaf24@7250 164 int lport, rport = bind->remote_port;
kaf24@7421 165 domid_t rdom = bind->remote_dom;
kaf24@9896 166 long rc;
kaf24@9896 167
kaf24@7421 168 if ( rdom == DOMID_SELF )
kaf24@7421 169 rdom = current->domain->domain_id;
kaf24@7421 170
kfraser@14192 171 if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
kaf24@1127 172 return -ESRCH;
kaf24@1127 173
kaf24@1127 174 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
kaf24@7250 175 if ( ld < rd )
kaf24@954 176 {
keir@18602 177 spin_lock(&ld->event_lock);
keir@18602 178 spin_lock(&rd->event_lock);
kaf24@954 179 }
kaf24@954 180 else
kaf24@954 181 {
kaf24@7250 182 if ( ld != rd )
keir@18602 183 spin_lock(&rd->event_lock);
keir@18602 184 spin_lock(&ld->event_lock);
kaf24@954 185 }
kaf24@954 186
kaf24@7250 187 if ( (lport = get_free_port(ld)) < 0 )
kaf24@7250 188 ERROR_EXIT(lport);
kaf24@7250 189 lchn = evtchn_from_port(ld, lport);
kaf24@2713 190
kaf24@7250 191 if ( !port_is_valid(rd, rport) )
keir@16574 192 ERROR_EXIT_DOM(-EINVAL, rd);
kaf24@7250 193 rchn = evtchn_from_port(rd, rport);
kaf24@7250 194 if ( (rchn->state != ECS_UNBOUND) ||
keir@17349 195 (rchn->u.unbound.remote_domid != ld->domain_id) )
keir@16574 196 ERROR_EXIT_DOM(-EINVAL, rd);
kaf24@954 197
kfraser@15815 198 rc = xsm_evtchn_interdomain(ld, lchn, rd, rchn);
kfraser@15815 199 if ( rc )
kfraser@15815 200 goto out;
kfraser@15815 201
kaf24@7250 202 lchn->u.interdomain.remote_dom = rd;
kaf24@7250 203 lchn->u.interdomain.remote_port = (u16)rport;
kaf24@7250 204 lchn->state = ECS_INTERDOMAIN;
kaf24@7250 205
kaf24@7250 206 rchn->u.interdomain.remote_dom = ld;
kaf24@7250 207 rchn->u.interdomain.remote_port = (u16)lport;
kaf24@7250 208 rchn->state = ECS_INTERDOMAIN;
kaf24@954 209
kaf24@2713 210 /*
kaf24@7250 211 * We may have lost notifications on the remote unbound port. Fix that up
kaf24@7250 212 * here by conservatively always setting a notification on the local port.
kaf24@2713 213 */
kaf24@7250 214 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
kaf24@2713 215
kaf24@7250 216 bind->local_port = lport;
kaf24@1127 217
kaf24@954 218 out:
keir@18602 219 spin_unlock(&ld->event_lock);
kaf24@7250 220 if ( ld != rd )
keir@18602 221 spin_unlock(&rd->event_lock);
kaf24@1127 222
kfraser@14192 223 rcu_unlock_domain(rd);
kaf24@954 224
kaf24@954 225 return rc;
kaf24@954 226 }
kaf24@954 227
kaf24@954 228
kaf24@1218 229 static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
kaf24@1218 230 {
kaf24@5308 231 struct evtchn *chn;
kaf24@7204 232 struct vcpu *v;
kaf24@7204 233 struct domain *d = current->domain;
kaf24@7250 234 int port, virq = bind->virq, vcpu = bind->vcpu;
kaf24@7250 235 long rc = 0;
kaf24@1218 236
kaf24@9896 237 if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
kaf24@1218 238 return -EINVAL;
kaf24@1218 239
kaf24@9597 240 if ( virq_is_global(virq) && (vcpu != 0) )
kaf24@9597 241 return -EINVAL;
kaf24@9597 242
keir@19788 243 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
kaf24@9896 244 ((v = d->vcpu[vcpu]) == NULL) )
kaf24@7204 245 return -ENOENT;
cl349@6680 246
keir@19788 247 if ( unlikely(!v->vcpu_info) )
keir@19788 248 return -EAGAIN;
keir@19788 249
keir@18602 250 spin_lock(&d->event_lock);
kaf24@1218 251
kaf24@7250 252 if ( v->virq_to_evtchn[virq] != 0 )
kaf24@7250 253 ERROR_EXIT(-EEXIST);
kaf24@7250 254
kaf24@7250 255 if ( (port = get_free_port(d)) < 0 )
kaf24@7250 256 ERROR_EXIT(port);
kaf24@1218 257
kaf24@5308 258 chn = evtchn_from_port(d, port);
kaf24@5308 259 chn->state = ECS_VIRQ;
kaf24@7250 260 chn->notify_vcpu_id = vcpu;
kaf24@5308 261 chn->u.virq = virq;
kaf24@1218 262
kaf24@7250 263 v->virq_to_evtchn[virq] = bind->port = port;
kaf24@1218 264
kaf24@1218 265 out:
keir@18602 266 spin_unlock(&d->event_lock);
kaf24@1218 267
kaf24@7250 268 return rc;
cl349@2932 269 }
cl349@2932 270
kaf24@5308 271
cl349@2932 272 static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
cl349@2932 273 {
kaf24@5308 274 struct evtchn *chn;
kaf24@5308 275 struct domain *d = current->domain;
kaf24@7250 276 int port, vcpu = bind->vcpu;
kaf24@7250 277 long rc = 0;
kaf24@4202 278
keir@19788 279 if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
kaf24@9896 280 (d->vcpu[vcpu] == NULL) )
kaf24@7204 281 return -ENOENT;
kaf24@7204 282
keir@19788 283 if ( unlikely(!d->vcpu[vcpu]->vcpu_info) )
keir@19788 284 return -EAGAIN;
keir@19788 285
keir@18602 286 spin_lock(&d->event_lock);
cl349@2932 287
kaf24@7250 288 if ( (port = get_free_port(d)) < 0 )
kaf24@7250 289 ERROR_EXIT(port);
cl349@2932 290
kaf24@7250 291 chn = evtchn_from_port(d, port);
kaf24@7250 292 chn->state = ECS_IPI;
kaf24@7250 293 chn->notify_vcpu_id = vcpu;
kaf24@7250 294
kaf24@7250 295 bind->port = port;
kaf24@7250 296
kaf24@7250 297 out:
keir@18602 298 spin_unlock(&d->event_lock);
cl349@2932 299
kaf24@7250 300 return rc;
kaf24@1218 301 }
kaf24@1218 302
kaf24@1218 303
kaf24@1235 304 static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
kaf24@1235 305 {
kaf24@5308 306 struct evtchn *chn;
cl349@2919 307 struct domain *d = current->domain;
kaf24@7250 308 int port, pirq = bind->pirq;
kaf24@7250 309 long rc;
kaf24@1235 310
keir@19650 311 if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
kaf24@1235 312 return -EINVAL;
kaf24@1235 313
kaf24@8468 314 if ( !irq_access_permitted(d, pirq) )
kaf24@8468 315 return -EPERM;
kaf24@8468 316
keir@18602 317 spin_lock(&d->event_lock);
kaf24@1235 318
kaf24@7250 319 if ( d->pirq_to_evtchn[pirq] != 0 )
kaf24@7250 320 ERROR_EXIT(-EEXIST);
kaf24@7250 321
kaf24@7250 322 if ( (port = get_free_port(d)) < 0 )
kaf24@7250 323 ERROR_EXIT(port);
kaf24@1235 324
kaf24@5308 325 chn = evtchn_from_port(d, port);
kaf24@5308 326
kaf24@1506 327 d->pirq_to_evtchn[pirq] = port;
sos22@5700 328 rc = pirq_guest_bind(d->vcpu[0], pirq,
kaf24@1253 329 !!(bind->flags & BIND_PIRQ__WILL_SHARE));
kaf24@1253 330 if ( rc != 0 )
kaf24@1239 331 {
kaf24@1506 332 d->pirq_to_evtchn[pirq] = 0;
kaf24@1239 333 goto out;
kaf24@1239 334 }
kaf24@1239 335
kaf24@5308 336 chn->state = ECS_PIRQ;
kaf24@5308 337 chn->u.pirq = pirq;
kaf24@1235 338
kaf24@7250 339 bind->port = port;
kaf24@7250 340
kaf24@1235 341 out:
keir@18602 342 spin_unlock(&d->event_lock);
kaf24@1235 343
kaf24@7250 344 return rc;
kaf24@1235 345 }
kaf24@1235 346
kaf24@1235 347
kaf24@1506 348 static long __evtchn_close(struct domain *d1, int port1)
kaf24@954 349 {
kaf24@5308 350 struct domain *d2 = NULL;
kaf24@5308 351 struct vcpu *v;
kaf24@5308 352 struct evtchn *chn1, *chn2;
kaf24@5308 353 int port2;
kaf24@5308 354 long rc = 0;
kaf24@954 355
kaf24@954 356 again:
keir@18602 357 spin_lock(&d1->event_lock);
kaf24@954 358
kaf24@5308 359 if ( !port_is_valid(d1, port1) )
kaf24@954 360 {
kaf24@954 361 rc = -EINVAL;
kaf24@954 362 goto out;
kaf24@954 363 }
kaf24@954 364
kaf24@5308 365 chn1 = evtchn_from_port(d1, port1);
kfraser@10977 366
kfraser@10977 367 /* Guest cannot close a Xen-attached event channel. */
kfraser@10977 368 if ( unlikely(chn1->consumer_is_xen) )
kfraser@10977 369 {
kfraser@10977 370 rc = -EINVAL;
kfraser@10977 371 goto out;
kfraser@10977 372 }
kfraser@10977 373
kaf24@5308 374 switch ( chn1->state )
kaf24@954 375 {
kaf24@1218 376 case ECS_FREE:
cl349@3297 377 case ECS_RESERVED:
kaf24@1218 378 rc = -EINVAL;
kaf24@1218 379 goto out;
kaf24@1218 380
kaf24@1218 381 case ECS_UNBOUND:
kaf24@1218 382 break;
kaf24@1218 383
kaf24@1218 384 case ECS_PIRQ:
keir@18550 385 pirq_guest_unbind(d1, chn1->u.pirq);
keir@18208 386 d1->pirq_to_evtchn[chn1->u.pirq] = 0;
kaf24@1218 387 break;
kaf24@1218 388
kaf24@1218 389 case ECS_VIRQ:
kaf24@5289 390 for_each_vcpu ( d1, v )
keir@18208 391 {
keir@18208 392 if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
keir@18208 393 continue;
keir@18208 394 v->virq_to_evtchn[chn1->u.virq] = 0;
keir@18704 395 spin_barrier_irq(&v->virq_lock);
keir@18208 396 }
kaf24@1218 397 break;
kaf24@1218 398
cl349@2932 399 case ECS_IPI:
cl349@2932 400 break;
cl349@2932 401
kaf24@1218 402 case ECS_INTERDOMAIN:
kaf24@1506 403 if ( d2 == NULL )
kaf24@954 404 {
kaf24@5308 405 d2 = chn1->u.interdomain.remote_dom;
kaf24@1505 406
kaf24@1506 407 /* If we unlock d1 then we could lose d2. Must get a reference. */
kaf24@1506 408 if ( unlikely(!get_domain(d2)) )
kfraser@15501 409 BUG();
kaf24@1127 410
kaf24@1542 411 if ( d1 < d2 )
kaf24@954 412 {
keir@18602 413 spin_lock(&d2->event_lock);
kaf24@954 414 }
kaf24@1506 415 else if ( d1 != d2 )
kaf24@954 416 {
keir@18602 417 spin_unlock(&d1->event_lock);
keir@18602 418 spin_lock(&d2->event_lock);
kaf24@954 419 goto again;
kaf24@954 420 }
kaf24@954 421 }
kaf24@5308 422 else if ( d2 != chn1->u.interdomain.remote_dom )
kaf24@954 423 {
kaf24@8232 424 /*
kaf24@8232 425 * We can only get here if the port was closed and re-bound after
kaf24@8232 426 * unlocking d1 but before locking d2 above. We could retry but
kaf24@8232 427 * it is easier to return the same error as if we had seen the
kaf24@8232 428 * port in ECS_CLOSED. It must have passed through that state for
kaf24@8232 429 * us to end up here, so it's a valid error to return.
kaf24@8232 430 */
kaf24@954 431 rc = -EINVAL;
kaf24@954 432 goto out;
kaf24@954 433 }
kaf24@9596 434
kaf24@5308 435 port2 = chn1->u.interdomain.remote_port;
kaf24@5308 436 BUG_ON(!port_is_valid(d2, port2));
kaf24@954 437
kaf24@5308 438 chn2 = evtchn_from_port(d2, port2);
kaf24@5308 439 BUG_ON(chn2->state != ECS_INTERDOMAIN);
kaf24@5308 440 BUG_ON(chn2->u.interdomain.remote_dom != d1);
kaf24@954 441
kaf24@5308 442 chn2->state = ECS_UNBOUND;
kaf24@5308 443 chn2->u.unbound.remote_domid = d1->domain_id;
kaf24@1218 444 break;
kaf24@1218 445
kaf24@1218 446 default:
kaf24@1218 447 BUG();
kaf24@954 448 }
kaf24@954 449
keir@18208 450 /* Clear pending event to avoid unexpected behavior on re-bind. */
keir@18208 451 clear_bit(port1, &shared_info(d1, evtchn_pending));
keir@18208 452
kaf24@5703 453 /* Reset binding to vcpu0 when the channel is freed. */
kaf24@5703 454 chn1->state = ECS_FREE;
kaf24@5703 455 chn1->notify_vcpu_id = 0;
kaf24@1133 456
kfraser@15815 457 xsm_evtchn_close_post(chn1);
kfraser@15815 458
kaf24@954 459 out:
kaf24@1506 460 if ( d2 != NULL )
kaf24@954 461 {
kaf24@1506 462 if ( d1 != d2 )
keir@18602 463 spin_unlock(&d2->event_lock);
kaf24@1506 464 put_domain(d2);
kaf24@954 465 }
kfraser@14196 466
keir@18602 467 spin_unlock(&d1->event_lock);
kaf24@1145 468
kaf24@954 469 return rc;
kaf24@954 470 }
kaf24@954 471
kaf24@954 472
kaf24@1218 473 static long evtchn_close(evtchn_close_t *close)
kaf24@1127 474 {
kaf24@7250 475 return __evtchn_close(current->domain, close->port);
kaf24@1127 476 }
kaf24@1127 477
keir@17969 478 int evtchn_send(struct domain *d, unsigned int lport)
kaf24@954 479 {
kaf24@5308 480 struct evtchn *lchn, *rchn;
keir@17969 481 struct domain *ld = d, *rd;
kfraser@10977 482 struct vcpu *rvcpu;
cl349@2937 483 int rport, ret = 0;
kaf24@954 484
keir@18602 485 spin_lock(&ld->event_lock);
kaf24@954 486
kaf24@5308 487 if ( unlikely(!port_is_valid(ld, lport)) )
kaf24@954 488 {
keir@18602 489 spin_unlock(&ld->event_lock);
kaf24@954 490 return -EINVAL;
kaf24@954 491 }
kaf24@954 492
kaf24@5308 493 lchn = evtchn_from_port(ld, lport);
kfraser@10977 494
kfraser@10977 495 /* Guest cannot send via a Xen-attached event channel. */
kfraser@10977 496 if ( unlikely(lchn->consumer_is_xen) )
kfraser@10977 497 {
keir@18602 498 spin_unlock(&ld->event_lock);
kfraser@10977 499 return -EINVAL;
kfraser@10977 500 }
kfraser@10977 501
kfraser@15815 502 ret = xsm_evtchn_send(ld, lchn);
kfraser@15815 503 if ( ret )
kfraser@15815 504 goto out;
kfraser@15815 505
kaf24@5308 506 switch ( lchn->state )
cl349@2937 507 {
cl349@2937 508 case ECS_INTERDOMAIN:
kaf24@5308 509 rd = lchn->u.interdomain.remote_dom;
kaf24@5308 510 rport = lchn->u.interdomain.remote_port;
kaf24@5308 511 rchn = evtchn_from_port(rd, rport);
kfraser@10977 512 rvcpu = rd->vcpu[rchn->notify_vcpu_id];
kfraser@10977 513 if ( rchn->consumer_is_xen )
kfraser@10977 514 {
kfraser@10977 515 /* Xen consumers need notification only if they are blocked. */
kfraser@14663 516 if ( test_and_clear_bit(_VPF_blocked_in_xen,
kfraser@14663 517 &rvcpu->pause_flags) )
kfraser@10977 518 vcpu_wake(rvcpu);
kfraser@10977 519 }
kfraser@10977 520 else
kfraser@10977 521 {
kfraser@10977 522 evtchn_set_pending(rvcpu, rport);
kfraser@10977 523 }
cl349@2937 524 break;
cl349@2937 525 case ECS_IPI:
kaf24@5308 526 evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
cl349@2937 527 break;
kaf24@7250 528 case ECS_UNBOUND:
kaf24@7250 529 /* silently drop the notification */
kaf24@7250 530 break;
cl349@2937 531 default:
cl349@2937 532 ret = -EINVAL;
cl349@2937 533 }
kaf24@954 534
kfraser@15815 535 out:
keir@18602 536 spin_unlock(&ld->event_lock);
kaf24@954 537
cl349@2937 538 return ret;
kaf24@954 539 }
kaf24@954 540
keir@17969 541 static int evtchn_set_pending(struct vcpu *v, int port)
kaf24@9262 542 {
kaf24@9262 543 struct domain *d = v->domain;
keir@18441 544 int vcpuid;
kaf24@9262 545
kaf24@9262 546 /*
kaf24@9262 547 * The following bit operations must happen in strict order.
kaf24@9262 548 * NB. On x86, the atomic bit operations also act as memory barriers.
kaf24@9262 549 * There is therefore sufficiently strict ordering for this architecture --
kaf24@9262 550 * others may require explicit memory barriers.
kaf24@9262 551 */
kaf24@9262 552
keir@17211 553 if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
keir@17963 554 return 1;
kaf24@9262 555
keir@17211 556 if ( !test_bit (port, &shared_info(d, evtchn_mask)) &&
keir@19268 557 !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
keir@17211 558 &vcpu_info(v, evtchn_pending_sel)) )
kaf24@9262 559 {
kfraser@10405 560 vcpu_mark_events_pending(v);
kaf24@9262 561 }
kaf24@10357 562
kaf24@10357 563 /* Check if some VCPU might be polling for this event. */
keir@19788 564 if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
keir@18441 565 return 0;
keir@18441 566
keir@18441 567 /* Wake any interested (or potentially interested) pollers. */
keir@19788 568 for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
keir@19788 569 vcpuid < d->max_vcpus;
keir@19788 570 vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
kaf24@9262 571 {
keir@18441 572 v = d->vcpu[vcpuid];
keir@18441 573 if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
keir@18441 574 test_and_clear_bit(vcpuid, d->poll_mask) )
kfraser@14657 575 {
keir@18441 576 v->poll_evtchn = 0;
kfraser@14657 577 vcpu_unblock(v);
kfraser@14657 578 }
kaf24@9262 579 }
keir@17963 580
keir@17963 581 return 0;
kaf24@9262 582 }
kaf24@9262 583
keir@18208 584 int guest_enabled_event(struct vcpu *v, int virq)
keir@18208 585 {
keir@18208 586 return ((v != NULL) && (v->virq_to_evtchn[virq] != 0));
keir@18208 587 }
kaf24@9596 588
kaf24@9597 589 void send_guest_vcpu_virq(struct vcpu *v, int virq)
kaf24@9262 590 {
keir@18208 591 unsigned long flags;
kaf24@9597 592 int port;
kaf24@9262 593
kaf24@9597 594 ASSERT(!virq_is_global(virq));
kaf24@9597 595
keir@18208 596 spin_lock_irqsave(&v->virq_lock, flags);
keir@18208 597
kaf24@9597 598 port = v->virq_to_evtchn[virq];
kaf24@9597 599 if ( unlikely(port == 0) )
keir@18208 600 goto out;
kaf24@9597 601
kaf24@9597 602 evtchn_set_pending(v, port);
keir@17971 603
keir@18208 604 out:
keir@18208 605 spin_unlock_irqrestore(&v->virq_lock, flags);
keir@17971 606 }
keir@17971 607
kaf24@9597 608 void send_guest_global_virq(struct domain *d, int virq)
kaf24@9597 609 {
keir@18208 610 unsigned long flags;
kaf24@9597 611 int port;
kfraser@10655 612 struct vcpu *v;
kaf24@9597 613 struct evtchn *chn;
kaf24@9597 614
kaf24@9597 615 ASSERT(virq_is_global(virq));
kaf24@9597 616
keir@19788 617 if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) )
kfraser@14307 618 return;
kfraser@14307 619
kfraser@10655 620 v = d->vcpu[0];
kfraser@10655 621 if ( unlikely(v == NULL) )
kfraser@10655 622 return;
kfraser@10655 623
keir@18208 624 spin_lock_irqsave(&v->virq_lock, flags);
keir@18208 625
kfraser@10655 626 port = v->virq_to_evtchn[virq];
kaf24@9597 627 if ( unlikely(port == 0) )
keir@18208 628 goto out;
kaf24@9597 629
kaf24@9597 630 chn = evtchn_from_port(d, port);
kaf24@9597 631 evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
keir@18208 632
keir@18208 633 out:
keir@18208 634 spin_unlock_irqrestore(&v->virq_lock, flags);
kaf24@9262 635 }
kaf24@9262 636
keir@17963 637 int send_guest_pirq(struct domain *d, int pirq)
kaf24@5308 638 {
kaf24@5308 639 int port = d->pirq_to_evtchn[pirq];
kaf24@9597 640 struct evtchn *chn;
kaf24@9597 641
keir@18208 642 /*
keir@18208 643 * It should not be possible to race with __evtchn_close():
keir@18208 644 * The caller of this function must synchronise with pirq_guest_unbind().
keir@18208 645 */
kaf24@9597 646 ASSERT(port != 0);
kaf24@9597 647
kaf24@9597 648 chn = evtchn_from_port(d, port);
keir@17963 649 return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
kaf24@5308 650 }
kaf24@954 651
kaf24@9596 652
kaf24@1218 653 static long evtchn_status(evtchn_status_t *status)
kaf24@954 654 {
kaf24@1506 655 struct domain *d;
kaf24@1506 656 domid_t dom = status->dom;
kaf24@1506 657 int port = status->port;
kaf24@5308 658 struct evtchn *chn;
kaf24@1506 659 long rc = 0;
kaf24@1127 660
keir@18574 661 rc = rcu_lock_target_domain_by_id(dom, &d);
keir@18574 662 if ( rc )
keir@18574 663 return rc;
kaf24@954 664
keir@18602 665 spin_lock(&d->event_lock);
kaf24@954 666
kaf24@5308 667 if ( !port_is_valid(d, port) )
kaf24@954 668 {
kaf24@1266 669 rc = -EINVAL;
kaf24@1266 670 goto out;
kaf24@1127 671 }
kaf24@1127 672
kaf24@5308 673 chn = evtchn_from_port(d, port);
kfraser@15815 674
kfraser@15815 675 rc = xsm_evtchn_status(d, chn);
kfraser@15815 676 if ( rc )
kfraser@15815 677 goto out;
kfraser@15815 678
kaf24@5308 679 switch ( chn->state )
kaf24@1127 680 {
kaf24@1127 681 case ECS_FREE:
cl349@3297 682 case ECS_RESERVED:
kaf24@1127 683 status->status = EVTCHNSTAT_closed;
kaf24@1127 684 break;
kaf24@1218 685 case ECS_UNBOUND:
kaf24@1218 686 status->status = EVTCHNSTAT_unbound;
kaf24@5308 687 status->u.unbound.dom = chn->u.unbound.remote_domid;
kaf24@1127 688 break;
kaf24@1218 689 case ECS_INTERDOMAIN:
kaf24@1218 690 status->status = EVTCHNSTAT_interdomain;
cl349@2924 691 status->u.interdomain.dom =
kaf24@5308 692 chn->u.interdomain.remote_dom->domain_id;
kaf24@5308 693 status->u.interdomain.port = chn->u.interdomain.remote_port;
kaf24@1218 694 break;
kaf24@1218 695 case ECS_PIRQ:
kaf24@1218 696 status->status = EVTCHNSTAT_pirq;
kaf24@5308 697 status->u.pirq = chn->u.pirq;
kaf24@1218 698 break;
kaf24@1218 699 case ECS_VIRQ:
kaf24@1218 700 status->status = EVTCHNSTAT_virq;
kaf24@5308 701 status->u.virq = chn->u.virq;
kaf24@1127 702 break;
cl349@2932 703 case ECS_IPI:
kaf24@5703 704 status->status = EVTCHNSTAT_ipi;
cl349@2932 705 break;
kaf24@1127 706 default:
kaf24@1127 707 BUG();
kaf24@954 708 }
kaf24@954 709
kaf24@5703 710 status->vcpu = chn->notify_vcpu_id;
kaf24@5703 711
kaf24@1266 712 out:
keir@18602 713 spin_unlock(&d->event_lock);
kfraser@14192 714 rcu_unlock_domain(d);
keir@17349 715
kaf24@1266 716 return rc;
kaf24@954 717 }
kaf24@954 718
kaf24@9596 719
kaf24@8970 720 long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
iap10@5691 721 {
kaf24@8970 722 struct domain *d = current->domain;
iap10@5691 723 struct evtchn *chn;
kaf24@5703 724 long rc = 0;
kaf24@5703 725
keir@19788 726 if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
kaf24@7250 727 return -ENOENT;
iap10@5691 728
keir@19788 729 if ( unlikely(!d->vcpu[vcpu_id]->vcpu_info) )
keir@19788 730 return -EAGAIN;
keir@19788 731
keir@18602 732 spin_lock(&d->event_lock);
iap10@5691 733
iap10@5691 734 if ( !port_is_valid(d, port) )
iap10@5691 735 {
iap10@5691 736 rc = -EINVAL;
iap10@5691 737 goto out;
iap10@5691 738 }
iap10@5691 739
iap10@5691 740 chn = evtchn_from_port(d, port);
kfraser@10977 741
kfraser@10977 742 /* Guest cannot re-bind a Xen-attached event channel. */
kfraser@10977 743 if ( unlikely(chn->consumer_is_xen) )
kfraser@10977 744 {
kfraser@10977 745 rc = -EINVAL;
kfraser@10977 746 goto out;
kfraser@10977 747 }
kfraser@10977 748
kaf24@5703 749 switch ( chn->state )
kaf24@5703 750 {
kaf24@9597 751 case ECS_VIRQ:
kaf24@9597 752 if ( virq_is_global(chn->u.virq) )
kaf24@9597 753 chn->notify_vcpu_id = vcpu_id;
kaf24@9597 754 else
kaf24@9597 755 rc = -EINVAL;
kaf24@9597 756 break;
kaf24@5703 757 case ECS_UNBOUND:
kaf24@5703 758 case ECS_INTERDOMAIN:
kaf24@5703 759 case ECS_PIRQ:
kaf24@8970 760 chn->notify_vcpu_id = vcpu_id;
kaf24@5703 761 break;
kaf24@5703 762 default:
kaf24@5703 763 rc = -EINVAL;
kaf24@5703 764 break;
kaf24@5703 765 }
iap10@5691 766
iap10@5691 767 out:
keir@18602 768 spin_unlock(&d->event_lock);
keir@17349 769
iap10@5691 770 return rc;
iap10@5691 771 }
kaf24@954 772
kaf24@9596 773
keir@18845 774 int evtchn_unmask(unsigned int port)
kaf24@8351 775 {
kaf24@8351 776 struct domain *d = current->domain;
kaf24@8351 777 struct vcpu *v;
kaf24@8351 778
keir@18602 779 spin_lock(&d->event_lock);
kaf24@8351 780
kaf24@8351 781 if ( unlikely(!port_is_valid(d, port)) )
kaf24@8351 782 {
keir@18602 783 spin_unlock(&d->event_lock);
kaf24@8351 784 return -EINVAL;
kaf24@8351 785 }
kaf24@8351 786
kaf24@8351 787 v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
kaf24@8351 788
kaf24@8351 789 /*
kaf24@8351 790 * These operations must happen in strict order. Based on
kaf24@8351 791 * include/xen/event.h:evtchn_set_pending().
kaf24@8351 792 */
keir@17211 793 if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
keir@17211 794 test_bit (port, &shared_info(d, evtchn_pending)) &&
keir@19268 795 !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
keir@17211 796 &vcpu_info(v, evtchn_pending_sel)) )
kaf24@8351 797 {
kfraser@10405 798 vcpu_mark_events_pending(v);
kaf24@8351 799 }
kaf24@8351 800
keir@18602 801 spin_unlock(&d->event_lock);
kaf24@8351 802
kaf24@8351 803 return 0;
kaf24@8351 804 }
kaf24@8351 805
kaf24@9596 806
kfraser@13535 807 static long evtchn_reset(evtchn_reset_t *r)
kfraser@13535 808 {
kfraser@13535 809 domid_t dom = r->dom;
kfraser@13535 810 struct domain *d;
keir@17349 811 int i, rc;
kfraser@13535 812
keir@18574 813 rc = rcu_lock_target_domain_by_id(dom, &d);
keir@18574 814 if ( rc )
keir@18574 815 return rc;
kfraser@13535 816
kfraser@15815 817 rc = xsm_evtchn_reset(current->domain, d);
kfraser@15815 818 if ( rc )
keir@16856 819 goto out;
kfraser@15815 820
kfraser@13535 821 for ( i = 0; port_is_valid(d, i); i++ )
kfraser@13535 822 (void)__evtchn_close(d, i);
kfraser@13535 823
keir@16856 824 rc = 0;
keir@17349 825
keir@16856 826 out:
kfraser@14192 827 rcu_unlock_domain(d);
kfraser@13535 828
keir@16856 829 return rc;
kfraser@13535 830 }
kfraser@13535 831
kfraser@13535 832
kaf24@9896 833 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
kaf24@954 834 {
kaf24@954 835 long rc;
kaf24@954 836
kaf24@9896 837 switch ( cmd )
kaf24@954 838 {
kaf24@9896 839 case EVTCHNOP_alloc_unbound: {
kaf24@9896 840 struct evtchn_alloc_unbound alloc_unbound;
kaf24@9896 841 if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
kaf24@9896 842 return -EFAULT;
kaf24@9896 843 rc = evtchn_alloc_unbound(&alloc_unbound);
kaf24@9896 844 if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
kaf24@1235 845 rc = -EFAULT; /* Cleaning up here would be a mess! */
kaf24@1235 846 break;
kaf24@9896 847 }
kaf24@1235 848
kaf24@9896 849 case EVTCHNOP_bind_interdomain: {
kaf24@9896 850 struct evtchn_bind_interdomain bind_interdomain;
kaf24@9896 851 if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
kaf24@9896 852 return -EFAULT;
kaf24@9896 853 rc = evtchn_bind_interdomain(&bind_interdomain);
kaf24@9896 854 if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
kaf24@1127 855 rc = -EFAULT; /* Cleaning up here would be a mess! */
kaf24@954 856 break;
kaf24@9896 857 }
kaf24@954 858
kaf24@9896 859 case EVTCHNOP_bind_virq: {
kaf24@9896 860 struct evtchn_bind_virq bind_virq;
kaf24@9896 861 if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
kaf24@9896 862 return -EFAULT;
kaf24@9896 863 rc = evtchn_bind_virq(&bind_virq);
kaf24@9896 864 if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
kaf24@9896 865 rc = -EFAULT; /* Cleaning up here would be a mess! */
kaf24@954 866 break;
kaf24@9896 867 }
kaf24@954 868
kaf24@9896 869 case EVTCHNOP_bind_ipi: {
kaf24@9896 870 struct evtchn_bind_ipi bind_ipi;
kaf24@9896 871 if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
kaf24@9896 872 return -EFAULT;
kaf24@9896 873 rc = evtchn_bind_ipi(&bind_ipi);
kaf24@9896 874 if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
kaf24@9896 875 rc = -EFAULT; /* Cleaning up here would be a mess! */
kaf24@9896 876 break;
kaf24@9896 877 }
kaf24@9896 878
kaf24@9896 879 case EVTCHNOP_bind_pirq: {
kaf24@9896 880 struct evtchn_bind_pirq bind_pirq;
kaf24@9896 881 if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
kaf24@9896 882 return -EFAULT;
kaf24@9896 883 rc = evtchn_bind_pirq(&bind_pirq);
kaf24@9896 884 if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
kaf24@9896 885 rc = -EFAULT; /* Cleaning up here would be a mess! */
kaf24@9896 886 break;
kaf24@9896 887 }
kaf24@9896 888
kaf24@9896 889 case EVTCHNOP_close: {
kaf24@9896 890 struct evtchn_close close;
kaf24@9896 891 if ( copy_from_guest(&close, arg, 1) != 0 )
kaf24@9896 892 return -EFAULT;
kaf24@9896 893 rc = evtchn_close(&close);
kaf24@9896 894 break;
kaf24@9896 895 }
kaf24@9896 896
kaf24@9896 897 case EVTCHNOP_send: {
kaf24@9896 898 struct evtchn_send send;
kaf24@9896 899 if ( copy_from_guest(&send, arg, 1) != 0 )
kaf24@9896 900 return -EFAULT;
keir@17969 901 rc = evtchn_send(current->domain, send.port);
kaf24@9896 902 break;
kaf24@9896 903 }
kaf24@9896 904
kaf24@9896 905 case EVTCHNOP_status: {
kaf24@9896 906 struct evtchn_status status;
kaf24@9896 907 if ( copy_from_guest(&status, arg, 1) != 0 )
kaf24@9896 908 return -EFAULT;
kaf24@9896 909 rc = evtchn_status(&status);
kaf24@9896 910 if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
kaf24@1127 911 rc = -EFAULT;
kaf24@954 912 break;
kaf24@9896 913 }
kaf24@954 914
kaf24@9896 915 case EVTCHNOP_bind_vcpu: {
kaf24@9896 916 struct evtchn_bind_vcpu bind_vcpu;
kaf24@9896 917 if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
kaf24@9896 918 return -EFAULT;
kaf24@9896 919 rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
iap10@5691 920 break;
kaf24@9896 921 }
iap10@5691 922
kaf24@9896 923 case EVTCHNOP_unmask: {
kaf24@9896 924 struct evtchn_unmask unmask;
kaf24@9896 925 if ( copy_from_guest(&unmask, arg, 1) != 0 )
kaf24@9896 926 return -EFAULT;
keir@18845 927 rc = evtchn_unmask(unmask.port);
kaf24@8351 928 break;
kaf24@9896 929 }
kaf24@8351 930
kfraser@13535 931 case EVTCHNOP_reset: {
kfraser@13535 932 struct evtchn_reset reset;
kfraser@13535 933 if ( copy_from_guest(&reset, arg, 1) != 0 )
kfraser@13535 934 return -EFAULT;
kfraser@13535 935 rc = evtchn_reset(&reset);
kfraser@13535 936 break;
kfraser@13535 937 }
kfraser@13535 938
kaf24@954 939 default:
kaf24@954 940 rc = -ENOSYS;
kaf24@954 941 break;
kaf24@954 942 }
kaf24@954 943
kaf24@954 944 return rc;
kaf24@954 945 }
kaf24@954 946
kaf24@954 947
kfraser@10977 948 int alloc_unbound_xen_event_channel(
kfraser@10977 949 struct vcpu *local_vcpu, domid_t remote_domid)
kfraser@10977 950 {
kfraser@10977 951 struct evtchn *chn;
kfraser@10977 952 struct domain *d = local_vcpu->domain;
kfraser@10977 953 int port;
kfraser@10977 954
keir@19788 955 if ( unlikely(!local_vcpu->vcpu_info) )
keir@19788 956 return -EAGAIN;
keir@19788 957
keir@18602 958 spin_lock(&d->event_lock);
kfraser@10977 959
kfraser@10977 960 if ( (port = get_free_port(d)) < 0 )
kfraser@10977 961 goto out;
kfraser@10977 962 chn = evtchn_from_port(d, port);
kfraser@10977 963
kfraser@10977 964 chn->state = ECS_UNBOUND;
kfraser@10977 965 chn->consumer_is_xen = 1;
kfraser@10977 966 chn->notify_vcpu_id = local_vcpu->vcpu_id;
kfraser@10977 967 chn->u.unbound.remote_domid = remote_domid;
kfraser@10977 968
kfraser@10977 969 out:
keir@18602 970 spin_unlock(&d->event_lock);
kfraser@10977 971
kfraser@10977 972 return port;
kfraser@10977 973 }
kfraser@10977 974
kfraser@10977 975
kfraser@10977 976 void free_xen_event_channel(
kfraser@10977 977 struct vcpu *local_vcpu, int port)
kfraser@10977 978 {
kfraser@10977 979 struct evtchn *chn;
kfraser@10977 980 struct domain *d = local_vcpu->domain;
kfraser@10977 981
keir@18602 982 spin_lock(&d->event_lock);
keir@17446 983
keir@17446 984 if ( unlikely(d->is_dying) )
keir@17446 985 {
keir@18602 986 spin_unlock(&d->event_lock);
keir@17446 987 return;
keir@17446 988 }
keir@17446 989
keir@17446 990 BUG_ON(!port_is_valid(d, port));
kfraser@10977 991 chn = evtchn_from_port(d, port);
kfraser@10977 992 BUG_ON(!chn->consumer_is_xen);
kfraser@10977 993 chn->consumer_is_xen = 0;
keir@17446 994
keir@18602 995 spin_unlock(&d->event_lock);
kfraser@10977 996
kfraser@10977 997 (void)__evtchn_close(d, port);
kfraser@10977 998 }
kfraser@10977 999
kfraser@10977 1000
kfraser@10977 1001 void notify_via_xen_event_channel(int lport)
kfraser@10977 1002 {
kfraser@10977 1003 struct evtchn *lchn, *rchn;
kfraser@10977 1004 struct domain *ld = current->domain, *rd;
kfraser@10977 1005 int rport;
kfraser@10977 1006
keir@18602 1007 spin_lock(&ld->event_lock);
kfraser@10977 1008
kfraser@10977 1009 ASSERT(port_is_valid(ld, lport));
kfraser@10977 1010 lchn = evtchn_from_port(ld, lport);
kfraser@10977 1011 ASSERT(lchn->consumer_is_xen);
kfraser@10977 1012
kfraser@10977 1013 if ( likely(lchn->state == ECS_INTERDOMAIN) )
kfraser@10977 1014 {
kfraser@10977 1015 rd = lchn->u.interdomain.remote_dom;
kfraser@10977 1016 rport = lchn->u.interdomain.remote_port;
kfraser@10977 1017 rchn = evtchn_from_port(rd, rport);
kfraser@10977 1018 evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
kfraser@10977 1019 }
kfraser@10977 1020
keir@18602 1021 spin_unlock(&ld->event_lock);
kfraser@10977 1022 }
kfraser@10977 1023
kfraser@10977 1024
kaf24@5308 1025 int evtchn_init(struct domain *d)
kaf24@1218 1026 {
keir@18602 1027 spin_lock_init(&d->event_lock);
kaf24@5308 1028 if ( get_free_port(d) != 0 )
keir@19674 1029 return -EINVAL;
kaf24@5308 1030 evtchn_from_port(d, 0)->state = ECS_RESERVED;
keir@19788 1031
keir@19788 1032 #if MAX_VIRT_CPUS > BITS_PER_LONG
keir@19788 1033 d->poll_mask = xmalloc_array(unsigned long, BITS_TO_LONGS(MAX_VIRT_CPUS));
keir@19788 1034 if ( !d->poll_mask )
keir@19788 1035 return -ENOMEM;
keir@19788 1036 bitmap_zero(d->poll_mask, MAX_VIRT_CPUS);
keir@19788 1037 #endif
keir@19788 1038
cl349@3291 1039 return 0;
kaf24@1218 1040 }
kaf24@1218 1041
kaf24@1218 1042
kaf24@5308 1043 void evtchn_destroy(struct domain *d)
kaf24@954 1044 {
kaf24@954 1045 int i;
kaf24@5308 1046
kfraser@15501 1047 /* After this barrier no new event-channel allocations can occur. */
kfraser@15501 1048 BUG_ON(!d->is_dying);
keir@18602 1049 spin_barrier(&d->event_lock);
kfraser@15501 1050
kfraser@15501 1051 /* Close all existing event channels. */
kaf24@5308 1052 for ( i = 0; port_is_valid(d, i); i++ )
kfraser@10977 1053 {
kfraser@10977 1054 evtchn_from_port(d, i)->consumer_is_xen = 0;
kfraser@10977 1055 (void)__evtchn_close(d, i);
kfraser@10977 1056 }
kaf24@5308 1057
kfraser@15501 1058 /* Free all event-channel buckets. */
keir@18602 1059 spin_lock(&d->event_lock);
kaf24@5308 1060 for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
kfraser@15815 1061 {
kfraser@15815 1062 xsm_free_security_evtchn(d->evtchn[i]);
kaf24@7782 1063 xfree(d->evtchn[i]);
keir@17446 1064 d->evtchn[i] = NULL;
kfraser@15815 1065 }
keir@18602 1066 spin_unlock(&d->event_lock);
keir@19788 1067
keir@19788 1068 #if MAX_VIRT_CPUS > BITS_PER_LONG
keir@19788 1069 xfree(d->poll_mask);
keir@19788 1070 d->poll_mask = NULL;
keir@19788 1071 #endif
kaf24@954 1072 }
kaf24@3914 1073
keir@18531 1074 static void domain_dump_evtchn_info(struct domain *d)
keir@18531 1075 {
keir@18531 1076 unsigned int port;
keir@18531 1077
keir@18531 1078 printk("Domain %d polling vCPUs: %08lx\n", d->domain_id, d->poll_mask[0]);
keir@18531 1079
keir@18602 1080 if ( !spin_trylock(&d->event_lock) )
keir@18531 1081 return;
keir@18531 1082
keir@18531 1083 printk("Event channel information for domain %d:\n",
keir@18531 1084 d->domain_id);
keir@18531 1085
keir@18531 1086 for ( port = 1; port < MAX_EVTCHNS(d); ++port )
keir@18531 1087 {
keir@18531 1088 const struct evtchn *chn;
keir@18531 1089
keir@18531 1090 if ( !port_is_valid(d, port) )
keir@18531 1091 continue;
keir@18531 1092 chn = evtchn_from_port(d, port);
keir@18531 1093 if ( chn->state == ECS_FREE )
keir@18531 1094 continue;
keir@18531 1095
keir@18531 1096 printk(" %4u[%d/%d]: s=%d n=%d",
keir@18531 1097 port,
keir@18531 1098 test_bit(port, &shared_info(d, evtchn_pending)),
keir@18531 1099 test_bit(port, &shared_info(d, evtchn_mask)),
keir@18531 1100 chn->state, chn->notify_vcpu_id);
keir@18531 1101 switch ( chn->state )
keir@18531 1102 {
keir@18531 1103 case ECS_UNBOUND:
keir@18531 1104 printk(" d=%d", chn->u.unbound.remote_domid);
keir@18531 1105 break;
keir@18531 1106 case ECS_INTERDOMAIN:
keir@18531 1107 printk(" d=%d p=%d",
keir@18531 1108 chn->u.interdomain.remote_dom->domain_id,
keir@18531 1109 chn->u.interdomain.remote_port);
keir@18531 1110 break;
keir@18531 1111 case ECS_PIRQ:
keir@18531 1112 printk(" p=%d", chn->u.pirq);
keir@18531 1113 break;
keir@18531 1114 case ECS_VIRQ:
keir@18531 1115 printk(" v=%d", chn->u.virq);
keir@18531 1116 break;
keir@18531 1117 }
keir@18531 1118 printk(" x=%d\n", chn->consumer_is_xen);
keir@18531 1119 }
keir@18531 1120
keir@18602 1121 spin_unlock(&d->event_lock);
keir@18531 1122 }
keir@18531 1123
keir@18531 1124 static void dump_evtchn_info(unsigned char key)
keir@18531 1125 {
keir@18531 1126 struct domain *d;
keir@18531 1127
keir@18531 1128 printk("'%c' pressed -> dumping event-channel info\n", key);
keir@18531 1129
keir@18531 1130 rcu_read_lock(&domlist_read_lock);
keir@18531 1131
keir@18531 1132 for_each_domain ( d )
keir@18531 1133 domain_dump_evtchn_info(d);
keir@18531 1134
keir@18531 1135 rcu_read_unlock(&domlist_read_lock);
keir@18531 1136 }
keir@18531 1137
keir@18531 1138 static int __init dump_evtchn_info_key_init(void)
keir@18531 1139 {
keir@18531 1140 register_keyhandler('e', dump_evtchn_info, "dump evtchn info");
keir@18531 1141 return 0;
keir@18531 1142 }
keir@18531 1143 __initcall(dump_evtchn_info_key_init);
keir@18531 1144
kaf24@3914 1145 /*
kaf24@3914 1146 * Local variables:
kaf24@3914 1147 * mode: C
kaf24@3914 1148 * c-set-style: "BSD"
kaf24@3914 1149 * c-basic-offset: 4
kaf24@3914 1150 * tab-width: 4
kaf24@3914 1151 * indent-tabs-mode: nil
kaf24@3988 1152 * End:
kaf24@3914 1153 */