ia64/xen-unstable

annotate xen/common/domain.c @ 17251:b2a3fe7f5591

domain_shutdown() needs to vcpu_pause_nosync() rather than directly
incrementing the pause_count field. The latter ensures that the VCPU
gets descheduled --- synchronously in the case of the
currently-running VCPU.

Based on a bug report and proposed patch by Ben Guthro and Robert
Phillips of Virtual Iron.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 18 15:23:25 2008 +0000 (2008-03-18)
parents af33f2054f47
children b667e220e556
rev   line source
kaf24@1749 1 /******************************************************************************
kaf24@1749 2 * domain.c
kaf24@1749 3 *
kaf24@1749 4 * Generic domain-handling functions.
kaf24@1749 5 */
kaf24@1450 6
kaf24@1210 7 #include <xen/config.h>
kfraser@13536 8 #include <xen/compat.h>
kaf24@1210 9 #include <xen/init.h>
kaf24@1210 10 #include <xen/lib.h>
kaf24@1210 11 #include <xen/errno.h>
kaf24@1210 12 #include <xen/sched.h>
cl349@5247 13 #include <xen/domain.h>
kaf24@1210 14 #include <xen/mm.h>
kaf24@1210 15 #include <xen/event.h>
kaf24@1210 16 #include <xen/time.h>
kaf24@1506 17 #include <xen/console.h>
kaf24@4335 18 #include <xen/softirq.h>
kaf24@5356 19 #include <xen/domain_page.h>
kaf24@8456 20 #include <xen/rangeset.h>
kaf24@9133 21 #include <xen/guest_access.h>
kaf24@9183 22 #include <xen/hypercall.h>
kaf24@9526 23 #include <xen/delay.h>
kaf24@10979 24 #include <xen/shutdown.h>
kaf24@11019 25 #include <xen/percpu.h>
kfraser@12397 26 #include <xen/multicall.h>
kfraser@14058 27 #include <xen/rcupdate.h>
kaf24@5356 28 #include <asm/debugger.h>
kaf24@7205 29 #include <public/sched.h>
kaf24@7170 30 #include <public/vcpu.h>
kfraser@15815 31 #include <xsm/xsm.h>
kaf24@881 32
keir@17187 33 /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
keir@17187 34 static unsigned int opt_dom0_vcpus_pin;
keir@17187 35 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
keir@17187 36
keir@17187 37 enum cpufreq_controller cpufreq_controller;
keir@17187 38 static void __init setup_cpufreq_option(char *str)
keir@17187 39 {
keir@17187 40 if ( !strcmp(str, "dom0-kernel") )
keir@17187 41 {
keir@17187 42 cpufreq_controller = FREQCTL_dom0_kernel;
keir@17187 43 opt_dom0_vcpus_pin = 1;
keir@17187 44 }
keir@17187 45 }
keir@17187 46 custom_param("cpufreq", setup_cpufreq_option);
keir@17187 47
kfraser@14058 48 /* Protect updates/reads (resp.) of domain_list and domain_hash. */
kfraser@14058 49 DEFINE_SPINLOCK(domlist_update_lock);
kfraser@14058 50 DEFINE_RCU_READ_LOCK(domlist_read_lock);
kfraser@14058 51
kfraser@14058 52 #define DOMAIN_HASH_SIZE 256
kfraser@14058 53 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
kfraser@14058 54 static struct domain *domain_hash[DOMAIN_HASH_SIZE];
kaf24@2806 55 struct domain *domain_list;
kaf24@382 56
kaf24@3338 57 struct domain *dom0;
kaf24@3338 58
kaf24@11014 59 struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
kaf24@10293 60
kaf24@12038 61 int current_domain_id(void)
kaf24@12038 62 {
kaf24@12038 63 return current->domain->domain_id;
kaf24@12038 64 }
kaf24@12038 65
kaf24@10281 66 struct domain *alloc_domain(domid_t domid)
kaf24@10281 67 {
kaf24@10281 68 struct domain *d;
kaf24@10281 69
kaf24@10281 70 if ( (d = xmalloc(struct domain)) == NULL )
kaf24@10281 71 return NULL;
kaf24@10281 72
kaf24@10281 73 memset(d, 0, sizeof(*d));
kaf24@10281 74 d->domain_id = domid;
kfraser@15815 75
kfraser@15815 76 if ( xsm_alloc_security_domain(d) != 0 )
kfraser@15815 77 {
kfraser@15815 78 free_domain(d);
kfraser@15815 79 return NULL;
kfraser@15815 80 }
kfraser@15815 81
kaf24@10281 82 atomic_set(&d->refcnt, 1);
kaf24@10281 83 spin_lock_init(&d->big_lock);
kaf24@10281 84 spin_lock_init(&d->page_alloc_lock);
kfraser@14705 85 spin_lock_init(&d->shutdown_lock);
keir@16064 86 spin_lock_init(&d->hypercall_deadlock_mutex);
kaf24@10281 87 INIT_LIST_HEAD(&d->page_list);
kaf24@10281 88 INIT_LIST_HEAD(&d->xenpage_list);
kaf24@10281 89
kaf24@10281 90 return d;
kaf24@10281 91 }
kaf24@10281 92
kaf24@10281 93 void free_domain(struct domain *d)
kaf24@10281 94 {
kfraser@15815 95 xsm_free_security_domain(d);
kaf24@10281 96 xfree(d);
kaf24@10281 97 }
kaf24@10281 98
kfraser@14705 99 static void __domain_finalise_shutdown(struct domain *d)
kfraser@14705 100 {
kfraser@14705 101 struct vcpu *v;
kfraser@14705 102
kfraser@14705 103 BUG_ON(!spin_is_locked(&d->shutdown_lock));
kfraser@14705 104
kfraser@14705 105 if ( d->is_shut_down )
kfraser@14705 106 return;
kfraser@14705 107
kfraser@14705 108 for_each_vcpu ( d, v )
kfraser@14705 109 if ( !v->paused_for_shutdown )
kfraser@14705 110 return;
kfraser@14705 111
kfraser@14705 112 d->is_shut_down = 1;
kfraser@14705 113 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
kfraser@14705 114 }
kfraser@14705 115
kfraser@14705 116 static void vcpu_check_shutdown(struct vcpu *v)
kfraser@14705 117 {
kfraser@14705 118 struct domain *d = v->domain;
kfraser@14705 119
kfraser@14705 120 spin_lock(&d->shutdown_lock);
kfraser@14705 121
kfraser@14705 122 if ( d->is_shutting_down )
kfraser@14705 123 {
kfraser@14705 124 if ( !v->paused_for_shutdown )
keir@17251 125 vcpu_pause_nosync(v);
kfraser@14705 126 v->paused_for_shutdown = 1;
kfraser@14705 127 v->defer_shutdown = 0;
kfraser@14705 128 __domain_finalise_shutdown(d);
kfraser@14705 129 }
kfraser@14705 130
kfraser@14705 131 spin_unlock(&d->shutdown_lock);
kfraser@14705 132 }
kfraser@14705 133
kaf24@10281 134 struct vcpu *alloc_vcpu(
kaf24@10281 135 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
kaf24@10281 136 {
kaf24@10281 137 struct vcpu *v;
kaf24@10281 138
kaf24@10281 139 BUG_ON(d->vcpu[vcpu_id] != NULL);
kaf24@10281 140
kfraser@12260 141 if ( (v = alloc_vcpu_struct()) == NULL )
kaf24@10281 142 return NULL;
kaf24@10281 143
kaf24@10281 144 v->domain = d;
kaf24@10281 145 v->vcpu_id = vcpu_id;
kaf24@10281 146
kaf24@10281 147 v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
kaf24@10281 148 v->runstate.state_entry_time = NOW();
kaf24@10281 149
kfraser@14374 150 if ( !is_idle_domain(d) )
kfraser@14581 151 {
kfraser@14663 152 set_bit(_VPF_down, &v->pause_flags);
keir@17211 153 v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
kfraser@14581 154 }
kaf24@10281 155
kfraser@12260 156 if ( sched_init_vcpu(v, cpu_id) != 0 )
kaf24@10281 157 {
kaf24@10281 158 free_vcpu_struct(v);
kaf24@10281 159 return NULL;
kaf24@10281 160 }
kaf24@10281 161
kfraser@12260 162 if ( vcpu_initialise(v) != 0 )
kfraser@12260 163 {
kfraser@12260 164 sched_destroy_vcpu(v);
kfraser@12260 165 free_vcpu_struct(v);
kfraser@12260 166 return NULL;
kfraser@12260 167 }
kfraser@12260 168
kaf24@10281 169 d->vcpu[vcpu_id] = v;
kaf24@10281 170 if ( vcpu_id != 0 )
kaf24@10281 171 d->vcpu[v->vcpu_id-1]->next_in_list = v;
kaf24@10281 172
kfraser@14705 173 /* Must be called after making new vcpu visible to for_each_vcpu(). */
kfraser@14705 174 vcpu_check_shutdown(v);
kfraser@14705 175
kaf24@10281 176 return v;
kaf24@10281 177 }
kaf24@10281 178
kaf24@10293 179 struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
kaf24@10293 180 {
kaf24@10293 181 struct domain *d;
kaf24@10293 182 struct vcpu *v;
kfraser@10655 183 unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
kaf24@10293 184
kfraser@13589 185 if ( (v = idle_vcpu[cpu_id]) != NULL )
kfraser@13589 186 return v;
kfraser@13589 187
kfraser@10655 188 d = (vcpu_id == 0) ?
kfraser@14911 189 domain_create(IDLE_DOMAIN_ID, 0, 0) :
kfraser@10655 190 idle_vcpu[cpu_id - vcpu_id]->domain;
kfraser@10655 191 BUG_ON(d == NULL);
kaf24@10293 192
kfraser@10655 193 v = alloc_vcpu(d, vcpu_id, cpu_id);
kaf24@10293 194 idle_vcpu[cpu_id] = v;
kaf24@10293 195
kaf24@10293 196 return v;
kaf24@10293 197 }
kaf24@10281 198
kfraser@14911 199 struct domain *domain_create(
kfraser@14911 200 domid_t domid, unsigned int domcr_flags, ssidref_t ssidref)
iap10@236 201 {
kaf24@1590 202 struct domain *d, **pd;
kfraser@15818 203 enum { INIT_evtchn = 1, INIT_gnttab = 2, INIT_arch = 8 };
kfraser@15501 204 int init_status = 0;
iap10@236 205
kfraser@10280 206 if ( (d = alloc_domain(domid)) == NULL )
kaf24@1123 207 return NULL;
iap10@236 208
kfraser@12210 209 if ( domcr_flags & DOMCRF_hvm )
kfraser@12210 210 d->is_hvm = 1;
kfraser@12210 211
keir@17187 212 if ( (domid == 0) && opt_dom0_vcpus_pin )
keir@17187 213 d->is_pinned = 1;
keir@17187 214
kaf24@8611 215 rangeset_domain_initialise(d);
kaf24@5146 216
kaf24@8611 217 if ( !is_idle_domain(d) )
kaf24@8611 218 {
kfraser@15815 219 if ( xsm_domain_create(d, ssidref) != 0 )
kfraser@15815 220 goto fail;
kfraser@15815 221
kfraser@14642 222 d->is_paused_by_controller = 1;
kfraser@14642 223 atomic_inc(&d->pause_count);
kfraser@14642 224
kaf24@8611 225 if ( evtchn_init(d) != 0 )
kfraser@15501 226 goto fail;
kfraser@15501 227 init_status |= INIT_evtchn;
kfraser@14642 228
kaf24@8611 229 if ( grant_table_create(d) != 0 )
kfraser@15501 230 goto fail;
kfraser@15501 231 init_status |= INIT_gnttab;
kaf24@8611 232 }
kaf24@8611 233
keir@16931 234 if ( arch_domain_create(d, domcr_flags) != 0 )
kfraser@15501 235 goto fail;
kfraser@15501 236 init_status |= INIT_arch;
kaf24@8611 237
kaf24@8468 238 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
kaf24@8468 239 d->irq_caps = rangeset_new(d, "Interrupts", 0);
kaf24@8611 240 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
kfraser@15501 241 goto fail;
kaf24@8467 242
kfraser@12260 243 if ( sched_init_domain(d) != 0 )
kfraser@15501 244 goto fail;
kfraser@12260 245
kaf24@8507 246 if ( !is_idle_domain(d) )
kaf24@1123 247 {
kfraser@14058 248 spin_lock(&domlist_update_lock);
kfraser@10280 249 pd = &domain_list; /* NB. domain_list maintained in order of domid. */
kaf24@4798 250 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
kaf24@4877 251 if ( (*pd)->domain_id > d->domain_id )
kaf24@1123 252 break;
kaf24@4798 253 d->next_in_list = *pd;
kfraser@10280 254 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
kfraser@14058 255 rcu_assign_pointer(*pd, d);
kfraser@14058 256 rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
kfraser@14058 257 spin_unlock(&domlist_update_lock);
kaf24@1123 258 }
iap10@236 259
kaf24@1590 260 return d;
kaf24@8467 261
kfraser@15501 262 fail:
kfraser@15825 263 d->is_dying = DOMDYING_dead;
kfraser@15501 264 atomic_set(&d->refcnt, DOMAIN_DESTROYED);
kfraser@15501 265 if ( init_status & INIT_arch )
kfraser@15501 266 arch_domain_destroy(d);
kfraser@15501 267 if ( init_status & INIT_gnttab )
kaf24@8611 268 grant_table_destroy(d);
kfraser@15501 269 if ( init_status & INIT_evtchn )
kaf24@8611 270 evtchn_destroy(d);
kaf24@8611 271 rangeset_domain_destroy(d);
kaf24@8467 272 free_domain(d);
kaf24@8467 273 return NULL;
iap10@236 274 }
iap10@236 275
kaf24@376 276
kaf24@13663 277 struct domain *get_domain_by_id(domid_t dom)
iap10@236 278 {
kaf24@1590 279 struct domain *d;
iap10@236 280
kfraser@14058 281 rcu_read_lock(&domlist_read_lock);
kfraser@14059 282
kfraser@14059 283 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
kfraser@14059 284 d != NULL;
kfraser@14059 285 d = rcu_dereference(d->next_in_hashbucket) )
kaf24@382 286 {
kaf24@4877 287 if ( d->domain_id == dom )
kaf24@382 288 {
kaf24@1590 289 if ( unlikely(!get_domain(d)) )
kaf24@1590 290 d = NULL;
kaf24@382 291 break;
iap10@236 292 }
kaf24@382 293 }
kfraser@14059 294
kfraser@14058 295 rcu_read_unlock(&domlist_read_lock);
iap10@236 296
kaf24@1590 297 return d;
iap10@236 298 }
iap10@236 299
iap10@236 300
kfraser@14191 301 struct domain *rcu_lock_domain_by_id(domid_t dom)
kfraser@14059 302 {
kfraser@14059 303 struct domain *d;
kfraser@14059 304
kfraser@14059 305 rcu_read_lock(&domlist_read_lock);
kfraser@14059 306
kfraser@14059 307 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
kfraser@14059 308 d != NULL;
kfraser@14059 309 d = rcu_dereference(d->next_in_hashbucket) )
kfraser@14059 310 {
kfraser@14059 311 if ( d->domain_id == dom )
kfraser@14059 312 return d;
kfraser@14059 313 }
kfraser@14059 314
kfraser@14059 315 rcu_read_unlock(&domlist_read_lock);
kfraser@14059 316
kfraser@14059 317 return NULL;
kfraser@14059 318 }
kfraser@14059 319
kfraser@14059 320
kfraser@15825 321 int domain_kill(struct domain *d)
iap10@236 322 {
kfraser@15825 323 int rc = 0;
kfraser@15825 324
kfraser@15825 325 if ( d == current->domain )
kfraser@15825 326 return -EINVAL;
kaf24@10281 327
kfraser@15825 328 /* Protected by domctl_lock. */
kfraser@15825 329 switch ( d->is_dying )
keir@14676 330 {
kfraser@15825 331 case DOMDYING_alive:
kfraser@15825 332 domain_pause(d);
kfraser@15825 333 d->is_dying = DOMDYING_dying;
kfraser@15825 334 evtchn_destroy(d);
kfraser@15825 335 gnttab_release_mappings(d);
kfraser@15828 336 /* fallthrough */
kfraser@15825 337 case DOMDYING_dying:
kfraser@15825 338 rc = domain_relinquish_resources(d);
kfraser@15825 339 page_scrub_kick();
kfraser@15825 340 if ( rc != 0 )
kfraser@15825 341 {
kfraser@15825 342 BUG_ON(rc != -EAGAIN);
kfraser@15825 343 break;
kfraser@15825 344 }
kfraser@15825 345 d->is_dying = DOMDYING_dead;
kfraser@15825 346 put_domain(d);
kfraser@15825 347 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
kfraser@15828 348 /* fallthrough */
kfraser@15825 349 case DOMDYING_dead:
kfraser@15825 350 break;
keir@14676 351 }
cl349@3114 352
kfraser@15825 353 return rc;
kaf24@376 354 }
kaf24@376 355
kaf24@376 356
sos22@8660 357 void __domain_crash(struct domain *d)
kaf24@376 358 {
kfraser@14705 359 if ( d->is_shutting_down )
kfraser@12902 360 {
kfraser@12902 361 /* Print nothing: the domain is already shutting down. */
kfraser@12902 362 }
kfraser@12902 363 else if ( d == current->domain )
kaf24@7809 364 {
kaf24@7809 365 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
kaf24@7809 366 d->domain_id, current->vcpu_id, smp_processor_id());
kfraser@10478 367 show_execution_state(guest_cpu_user_regs());
kaf24@7809 368 }
kaf24@7809 369 else
kaf24@7809 370 {
kaf24@7809 371 printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
kaf24@7809 372 d->domain_id, current->domain->domain_id, smp_processor_id());
kaf24@7809 373 }
kaf24@7809 374
kaf24@7809 375 domain_shutdown(d, SHUTDOWN_crash);
cl349@4339 376 }
cl349@4339 377
cl349@4339 378
sos22@8660 379 void __domain_crash_synchronous(void)
cl349@4339 380 {
sos22@8660 381 __domain_crash(current->domain);
kfraser@12397 382
kfraser@12495 383 /*
kfraser@12495 384 * Flush multicall state before dying if a multicall is in progress.
kfraser@12495 385 * This shouldn't be necessary, but some architectures are calling
kfraser@12495 386 * domain_crash_synchronous() when they really shouldn't (i.e., from
kfraser@12495 387 * within hypercall context).
kfraser@12495 388 */
kfraser@12495 389 if ( this_cpu(mc_state).flags != 0 )
kfraser@12495 390 {
kfraser@12495 391 dprintk(XENLOG_ERR,
kfraser@12495 392 "FIXME: synchronous domain crash during a multicall!\n");
kfraser@12495 393 this_cpu(mc_state).flags = 0;
kfraser@12495 394 }
kfraser@12397 395
cl349@4339 396 for ( ; ; )
cl349@4339 397 do_softirq();
kaf24@1447 398 }
kaf24@1447 399
sos22@1951 400
kaf24@7809 401 void domain_shutdown(struct domain *d, u8 reason)
kaf24@1447 402 {
kaf24@7809 403 struct vcpu *v;
shand@6593 404
kaf24@4877 405 if ( d->domain_id == 0 )
kaf24@10979 406 dom0_shutdown(reason);
kaf24@1450 407
kfraser@14705 408 spin_lock(&d->shutdown_lock);
kfraser@14705 409
kfraser@14705 410 if ( d->is_shutting_down )
kfraser@14705 411 {
kfraser@14705 412 spin_unlock(&d->shutdown_lock);
kfraser@14705 413 return;
kfraser@14705 414 }
kfraser@14705 415
kfraser@14705 416 d->is_shutting_down = 1;
kfraser@14705 417 d->shutdown_code = reason;
kfraser@14705 418
kfraser@14705 419 smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */
iap10@1411 420
kaf24@5289 421 for_each_vcpu ( d, v )
kfraser@14705 422 {
kfraser@14705 423 if ( v->defer_shutdown )
kfraser@14705 424 continue;
keir@17251 425 vcpu_pause_nosync(v);
kfraser@14705 426 v->paused_for_shutdown = 1;
kfraser@14705 427 }
kfraser@14705 428
kfraser@14705 429 __domain_finalise_shutdown(d);
kfraser@14705 430
kfraser@14705 431 spin_unlock(&d->shutdown_lock);
kfraser@14705 432 }
kfraser@14705 433
kfraser@14705 434 void domain_resume(struct domain *d)
kfraser@14705 435 {
kfraser@14705 436 struct vcpu *v;
kfraser@14705 437
kfraser@14705 438 /*
kfraser@14705 439 * Some code paths assume that shutdown status does not get reset under
kfraser@14705 440 * their feet (e.g., some assertions make this assumption).
kfraser@14705 441 */
kfraser@14705 442 domain_pause(d);
kfraser@14705 443
kfraser@14705 444 spin_lock(&d->shutdown_lock);
kfraser@14705 445
kfraser@14705 446 d->is_shutting_down = d->is_shut_down = 0;
kaf24@8511 447
kfraser@14705 448 for_each_vcpu ( d, v )
kfraser@14705 449 {
kfraser@14705 450 if ( v->paused_for_shutdown )
kfraser@14705 451 vcpu_unpause(v);
kfraser@14705 452 v->paused_for_shutdown = 0;
kfraser@14705 453 }
kfraser@14705 454
kfraser@14705 455 spin_unlock(&d->shutdown_lock);
kfraser@14705 456
kfraser@14705 457 domain_unpause(d);
kfraser@14705 458 }
kfraser@14705 459
kfraser@14705 460 int vcpu_start_shutdown_deferral(struct vcpu *v)
kfraser@14705 461 {
kfraser@14705 462 v->defer_shutdown = 1;
kfraser@14705 463 smp_mb(); /* set deferral status /then/ check for shutdown */
kfraser@14705 464 if ( unlikely(v->domain->is_shutting_down) )
kfraser@14705 465 vcpu_check_shutdown(v);
kfraser@14705 466 return v->defer_shutdown;
kfraser@14705 467 }
kfraser@14705 468
kfraser@14705 469 void vcpu_end_shutdown_deferral(struct vcpu *v)
kfraser@14705 470 {
kfraser@14705 471 v->defer_shutdown = 0;
kfraser@14705 472 smp_mb(); /* clear deferral status /then/ check for shutdown */
kfraser@14705 473 if ( unlikely(v->domain->is_shutting_down) )
kfraser@14705 474 vcpu_check_shutdown(v);
tlh20@423 475 }
tlh20@423 476
kaf24@5318 477 void domain_pause_for_debugger(void)
kaf24@5318 478 {
kaf24@5318 479 struct domain *d = current->domain;
kaf24@5318 480 struct vcpu *v;
kaf24@5318 481
kfraser@14642 482 atomic_inc(&d->pause_count);
keir@14759 483 if ( test_and_set_bool(d->is_paused_by_controller) )
kfraser@14642 484 domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */
kaf24@12255 485
kaf24@5318 486 for_each_vcpu ( d, v )
kaf24@6445 487 vcpu_sleep_nosync(v);
kaf24@5318 488
kaf24@9597 489 send_guest_global_virq(dom0, VIRQ_DEBUGGER);
kaf24@5318 490 }
kaf24@5318 491
kfraser@14196 492 /* Complete domain destroy after RCU readers are not holding old references. */
kfraser@14058 493 static void complete_domain_destroy(struct rcu_head *head)
kfraser@14058 494 {
kfraser@14058 495 struct domain *d = container_of(head, struct domain, rcu);
kfraser@15243 496 struct vcpu *v;
kfraser@15243 497 int i;
kfraser@15243 498
kfraser@15243 499 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
kfraser@15243 500 {
kfraser@15243 501 if ( (v = d->vcpu[i]) == NULL )
kfraser@15243 502 continue;
kfraser@15243 503 vcpu_destroy(v);
kfraser@15243 504 sched_destroy_vcpu(v);
kfraser@15243 505 }
kfraser@14058 506
kfraser@14058 507 rangeset_domain_destroy(d);
kfraser@14058 508
kfraser@14058 509 grant_table_destroy(d);
kfraser@14058 510
kfraser@14058 511 arch_domain_destroy(d);
kfraser@14058 512
kfraser@15243 513 sched_destroy_domain(d);
kfraser@15243 514
kfraser@15262 515 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
kfraser@15262 516 if ( (v = d->vcpu[i]) != NULL )
kfraser@15262 517 free_vcpu_struct(v);
kfraser@15262 518
keir@16856 519 if (d->target)
keir@16856 520 put_domain(d->target);
keir@16856 521
kfraser@14058 522 free_domain(d);
kfraser@14058 523
kfraser@14058 524 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
kfraser@14058 525 }
kaf24@5318 526
iap10@236 527 /* Release resources belonging to task @p. */
kaf24@8611 528 void domain_destroy(struct domain *d)
iap10@236 529 {
kaf24@1542 530 struct domain **pd;
kaf24@2344 531 atomic_t old, new;
kaf24@1505 532
kfraser@14642 533 BUG_ON(!d->is_dying);
kaf24@1505 534
kaf24@8611 535 /* May be already destroyed, or get_domain() can race us. */
kaf24@2344 536 _atomic_set(old, 0);
kaf24@8611 537 _atomic_set(new, DOMAIN_DESTROYED);
kaf24@2344 538 old = atomic_compareandswap(old, new, &d->refcnt);
kaf24@2344 539 if ( _atomic_read(old) != 0 )
kaf24@1505 540 return;
kaf24@368 541
kaf24@1505 542 /* Delete from task list and task hashtable. */
kfraser@14058 543 spin_lock(&domlist_update_lock);
kaf24@2806 544 pd = &domain_list;
kaf24@1542 545 while ( *pd != d )
kaf24@4798 546 pd = &(*pd)->next_in_list;
kfraser@14058 547 rcu_assign_pointer(*pd, d->next_in_list);
kaf24@4877 548 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
kaf24@1542 549 while ( *pd != d )
kaf24@4798 550 pd = &(*pd)->next_in_hashbucket;
kfraser@14058 551 rcu_assign_pointer(*pd, d->next_in_hashbucket);
kfraser@14058 552 spin_unlock(&domlist_update_lock);
kaf24@8456 553
kfraser@14196 554 /* Schedule RCU asynchronous completion of domain destroy. */
kfraser@14058 555 call_rcu(&d->rcu, complete_domain_destroy);
iap10@236 556 }
iap10@236 557
ack@13011 558 void vcpu_pause(struct vcpu *v)
ack@13011 559 {
ack@13011 560 ASSERT(v != current);
kfraser@14661 561 atomic_inc(&v->pause_count);
kaf24@6445 562 vcpu_sleep_sync(v);
cl349@5246 563 }
cl349@5246 564
ack@13011 565 void vcpu_pause_nosync(struct vcpu *v)
ack@13011 566 {
kfraser@14661 567 atomic_inc(&v->pause_count);
ack@13011 568 vcpu_sleep_nosync(v);
ack@13011 569 }
ack@13011 570
kfraser@10655 571 void vcpu_unpause(struct vcpu *v)
kfraser@10655 572 {
kfraser@14661 573 if ( atomic_dec_and_test(&v->pause_count) )
kfraser@10655 574 vcpu_wake(v);
kfraser@10655 575 }
kfraser@10655 576
cl349@5246 577 void domain_pause(struct domain *d)
cl349@5246 578 {
kaf24@5289 579 struct vcpu *v;
cl349@5246 580
kfraser@10655 581 ASSERT(d != current->domain);
kfraser@10655 582
kfraser@14642 583 atomic_inc(&d->pause_count);
kfraser@10655 584
kaf24@5289 585 for_each_vcpu( d, v )
kfraser@10655 586 vcpu_sleep_sync(v);
cl349@5246 587 }
cl349@5246 588
cl349@5246 589 void domain_unpause(struct domain *d)
cl349@5246 590 {
kaf24@5289 591 struct vcpu *v;
cl349@5246 592
kfraser@14642 593 if ( atomic_dec_and_test(&d->pause_count) )
kfraser@10655 594 for_each_vcpu( d, v )
kfraser@10655 595 vcpu_wake(v);
cl349@5246 596 }
cl349@5246 597
cl349@5246 598 void domain_pause_by_systemcontroller(struct domain *d)
cl349@5246 599 {
kfraser@14642 600 domain_pause(d);
keir@14759 601 if ( test_and_set_bool(d->is_paused_by_controller) )
kfraser@14642 602 domain_unpause(d);
cl349@5246 603 }
cl349@5246 604
cl349@5246 605 void domain_unpause_by_systemcontroller(struct domain *d)
cl349@5246 606 {
keir@14759 607 if ( test_and_clear_bool(d->is_paused_by_controller) )
kfraser@14642 608 domain_unpause(d);
cl349@5246 609 }
cl349@5246 610
ack@13308 611 int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt)
cl349@2926 612 {
kaf24@7364 613 struct vcpu *v = d->vcpu[vcpuid];
cl349@2926 614
kfraser@14657 615 BUG_ON(v->is_initialised);
cl349@2926 616
kaf24@8091 617 return arch_set_info_guest(v, ctxt);
kaf24@7170 618 }
kaf24@7170 619
kfraser@13536 620 int vcpu_reset(struct vcpu *v)
kfraser@13536 621 {
kfraser@13536 622 struct domain *d = v->domain;
kfraser@13536 623 int rc;
kfraser@13536 624
kfraser@13536 625 domain_pause(d);
kfraser@13536 626 LOCK_BIGLOCK(d);
kfraser@13536 627
kfraser@13536 628 rc = arch_vcpu_reset(v);
kfraser@13536 629 if ( rc != 0 )
kfraser@13536 630 goto out;
kfraser@13536 631
kfraser@14663 632 set_bit(_VPF_down, &v->pause_flags);
kfraser@13536 633
kfraser@14657 634 v->fpu_initialised = 0;
kfraser@14657 635 v->fpu_dirtied = 0;
kfraser@14657 636 v->is_polling = 0;
kfraser@14657 637 v->is_initialised = 0;
kfraser@14661 638 v->nmi_pending = 0;
kfraser@14661 639 v->nmi_masked = 0;
kfraser@14663 640 clear_bit(_VPF_blocked, &v->pause_flags);
kfraser@13536 641
kfraser@13536 642 out:
kfraser@13536 643 UNLOCK_BIGLOCK(v->domain);
kfraser@13536 644 domain_unpause(d);
kfraser@13536 645
kfraser@13536 646 return rc;
kfraser@13536 647 }
kfraser@13536 648
kfraser@13536 649
kaf24@9873 650 long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
kaf24@7170 651 {
kaf24@7170 652 struct domain *d = current->domain;
kaf24@7170 653 struct vcpu *v;
kaf24@7170 654 struct vcpu_guest_context *ctxt;
kaf24@7170 655 long rc = 0;
kaf24@7170 656
kaf24@7170 657 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
kaf24@7170 658 return -EINVAL;
kaf24@7170 659
kaf24@7364 660 if ( (v = d->vcpu[vcpuid]) == NULL )
kaf24@7170 661 return -ENOENT;
kaf24@7170 662
kaf24@7170 663 switch ( cmd )
kaf24@7170 664 {
kaf24@7203 665 case VCPUOP_initialise:
kaf24@7170 666 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
kfraser@14347 667 return -ENOMEM;
kaf24@7170 668
kaf24@9183 669 if ( copy_from_guest(ctxt, arg, 1) )
kaf24@7170 670 {
kaf24@7170 671 xfree(ctxt);
kfraser@14347 672 return -EFAULT;
kaf24@7170 673 }
kaf24@7170 674
kaf24@7170 675 LOCK_BIGLOCK(d);
kaf24@7364 676 rc = -EEXIST;
kfraser@14657 677 if ( !v->is_initialised )
kaf24@7364 678 rc = boot_vcpu(d, vcpuid, ctxt);
kaf24@7170 679 UNLOCK_BIGLOCK(d);
kaf24@7170 680
kaf24@7170 681 xfree(ctxt);
kaf24@7170 682 break;
kaf24@7170 683
kaf24@7170 684 case VCPUOP_up:
kfraser@14657 685 if ( !v->is_initialised )
kfraser@14347 686 return -EINVAL;
kfraser@14347 687
kfraser@14663 688 if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
kaf24@7170 689 vcpu_wake(v);
kfraser@14347 690
kaf24@7170 691 break;
kaf24@7170 692
kaf24@7170 693 case VCPUOP_down:
kfraser@14663 694 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
kaf24@7170 695 vcpu_sleep_nosync(v);
kaf24@7170 696 break;
kaf24@7170 697
kaf24@7170 698 case VCPUOP_is_up:
kfraser@14663 699 rc = !test_bit(_VPF_down, &v->pause_flags);
kaf24@7170 700 break;
kaf24@9022 701
kaf24@9022 702 case VCPUOP_get_runstate_info:
kaf24@9022 703 {
kaf24@9022 704 struct vcpu_runstate_info runstate;
kaf24@9022 705 vcpu_runstate_get(v, &runstate);
kaf24@9183 706 if ( copy_to_guest(arg, &runstate, 1) )
kaf24@9022 707 rc = -EFAULT;
kaf24@9022 708 break;
kaf24@9022 709 }
kaf24@9022 710
kfraser@14340 711 case VCPUOP_set_periodic_timer:
kfraser@14340 712 {
kfraser@14340 713 struct vcpu_set_periodic_timer set;
kfraser@14340 714
kfraser@14340 715 if ( copy_from_guest(&set, arg, 1) )
kfraser@14347 716 return -EFAULT;
kfraser@14340 717
kfraser@14340 718 if ( set.period_ns < MILLISECS(1) )
kfraser@14347 719 return -EINVAL;
kfraser@14340 720
kfraser@14340 721 v->periodic_period = set.period_ns;
kfraser@14340 722 vcpu_force_reschedule(v);
kfraser@14340 723
kfraser@14340 724 break;
kfraser@14340 725 }
kfraser@14340 726
kfraser@14340 727 case VCPUOP_stop_periodic_timer:
kfraser@14340 728 v->periodic_period = 0;
kfraser@14340 729 vcpu_force_reschedule(v);
kfraser@14340 730 break;
kfraser@14340 731
kfraser@14340 732 case VCPUOP_set_singleshot_timer:
kfraser@14340 733 {
kfraser@14340 734 struct vcpu_set_singleshot_timer set;
kfraser@14340 735
kfraser@14340 736 if ( v != current )
kfraser@14340 737 return -EINVAL;
kfraser@14340 738
kfraser@14340 739 if ( copy_from_guest(&set, arg, 1) )
kfraser@14340 740 return -EFAULT;
kfraser@14340 741
kfraser@14348 742 if ( (set.flags & VCPU_SSHOTTMR_future) &&
kfraser@14348 743 (set.timeout_abs_ns < NOW()) )
kfraser@14348 744 return -ETIME;
kfraser@14348 745
kfraser@14340 746 if ( v->singleshot_timer.cpu != smp_processor_id() )
kfraser@14340 747 {
kfraser@14340 748 stop_timer(&v->singleshot_timer);
kfraser@14340 749 v->singleshot_timer.cpu = smp_processor_id();
kfraser@14340 750 }
kfraser@14340 751
kfraser@14340 752 set_timer(&v->singleshot_timer, set.timeout_abs_ns);
kfraser@14340 753
kfraser@14340 754 break;
kfraser@14340 755 }
kfraser@14340 756
kfraser@14340 757 case VCPUOP_stop_singleshot_timer:
kfraser@14340 758 if ( v != current )
kfraser@14340 759 return -EINVAL;
kfraser@14340 760
kfraser@14340 761 stop_timer(&v->singleshot_timer);
keir@16184 762
kfraser@14340 763 break;
keir@16184 764
keir@16184 765 case VCPUOP_send_nmi:
keir@16184 766 if ( !guest_handle_is_null(arg) )
keir@16184 767 return -EINVAL;
keir@16184 768
keir@16184 769 if ( !test_and_set_bool(v->nmi_pending) )
keir@16184 770 vcpu_kick(v);
keir@16184 771
keir@16184 772 break;
kfraser@14340 773
kaf24@9022 774 default:
kaf24@9183 775 rc = arch_do_vcpu_op(cmd, v, arg);
kaf24@9022 776 break;
kaf24@7170 777 }
kaf24@7170 778
cl349@2926 779 return rc;
cl349@2926 780 }
cl349@2926 781
cl349@2448 782 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
cl349@2448 783 {
cl349@2448 784 if ( type > MAX_VMASST_TYPE )
cl349@2448 785 return -EINVAL;
cl349@2448 786
cl349@2448 787 switch ( cmd )
cl349@2448 788 {
cl349@2448 789 case VMASST_CMD_enable:
cl349@2448 790 set_bit(type, &p->vm_assist);
cl349@2448 791 return 0;
cl349@2448 792 case VMASST_CMD_disable:
cl349@2448 793 clear_bit(type, &p->vm_assist);
cl349@2448 794 return 0;
cl349@2448 795 }
cl349@2448 796
cl349@2448 797 return -ENOSYS;
cl349@2448 798 }
kaf24@3914 799
kaf24@3914 800 /*
kaf24@3914 801 * Local variables:
kaf24@3914 802 * mode: C
kaf24@3914 803 * c-set-style: "BSD"
kaf24@3914 804 * c-basic-offset: 4
kaf24@3914 805 * tab-width: 4
kaf24@3914 806 * indent-tabs-mode: nil
kaf24@3988 807 * End:
kaf24@3914 808 */