ia64/xen-unstable

view xen/common/domain.c @ 19686:50134a902c66

tmem: fix corner case crash on forcible domain destruction

When a tmem-enabled domain is destroyed, if the domain was
using a persistent pool, the domain destruction process
to scrubs page races tmem's attempts to gracefully dismantle
data structures. Move tmem_destroy earlier in the domain
destruction process.

Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jun 01 14:07:46 2009 +0100 (2009-06-01)
parents ae5bd69227d1
children 2f9e1348aa98
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/compat.h>
9 #include <xen/init.h>
10 #include <xen/lib.h>
11 #include <xen/ctype.h>
12 #include <xen/errno.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/mm.h>
16 #include <xen/event.h>
17 #include <xen/time.h>
18 #include <xen/console.h>
19 #include <xen/softirq.h>
20 #include <xen/domain_page.h>
21 #include <xen/rangeset.h>
22 #include <xen/guest_access.h>
23 #include <xen/hypercall.h>
24 #include <xen/delay.h>
25 #include <xen/shutdown.h>
26 #include <xen/percpu.h>
27 #include <xen/multicall.h>
28 #include <xen/rcupdate.h>
29 #include <acpi/cpufreq/cpufreq.h>
30 #include <asm/debugger.h>
31 #include <public/sched.h>
32 #include <public/vcpu.h>
33 #include <xsm/xsm.h>
34 #include <xen/trace.h>
35 #include <xen/tmem.h>
37 /* Linux config option: propageted to domain0 */
38 /* xen_processor_pmbits: xen control Cx, Px, ... */
39 unsigned int xen_processor_pmbits = XEN_PROCESSOR_PM_PX;
41 /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
42 static unsigned int opt_dom0_vcpus_pin;
43 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
45 /* set xen as default cpufreq */
46 enum cpufreq_controller cpufreq_controller = FREQCTL_xen;
48 static void __init setup_cpufreq_option(char *str)
49 {
50 char *arg;
52 if ( !strcmp(str, "dom0-kernel") )
53 {
54 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
55 cpufreq_controller = FREQCTL_dom0_kernel;
56 opt_dom0_vcpus_pin = 1;
57 return;
58 }
60 if ( !strcmp(str, "none") )
61 {
62 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
63 cpufreq_controller = FREQCTL_none;
64 return;
65 }
67 if ( (arg = strpbrk(str, ",:")) != NULL )
68 *arg++ = '\0';
70 if ( !strcmp(str, "xen") )
71 if ( arg && *arg )
72 cpufreq_cmdline_parse(arg);
73 }
74 custom_param("cpufreq", setup_cpufreq_option);
76 /* Protect updates/reads (resp.) of domain_list and domain_hash. */
77 DEFINE_SPINLOCK(domlist_update_lock);
78 DEFINE_RCU_READ_LOCK(domlist_read_lock);
80 #define DOMAIN_HASH_SIZE 256
81 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
82 static struct domain *domain_hash[DOMAIN_HASH_SIZE];
83 struct domain *domain_list;
85 struct domain *dom0;
87 struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
89 int current_domain_id(void)
90 {
91 return current->domain->domain_id;
92 }
94 static void __domain_finalise_shutdown(struct domain *d)
95 {
96 struct vcpu *v;
98 BUG_ON(!spin_is_locked(&d->shutdown_lock));
100 if ( d->is_shut_down )
101 return;
103 for_each_vcpu ( d, v )
104 if ( !v->paused_for_shutdown )
105 return;
107 d->is_shut_down = 1;
108 if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn )
109 evtchn_send(d, d->suspend_evtchn);
110 else
111 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
112 }
114 static void vcpu_check_shutdown(struct vcpu *v)
115 {
116 struct domain *d = v->domain;
118 spin_lock(&d->shutdown_lock);
120 if ( d->is_shutting_down )
121 {
122 if ( !v->paused_for_shutdown )
123 vcpu_pause_nosync(v);
124 v->paused_for_shutdown = 1;
125 v->defer_shutdown = 0;
126 __domain_finalise_shutdown(d);
127 }
129 spin_unlock(&d->shutdown_lock);
130 }
132 struct vcpu *alloc_vcpu(
133 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
134 {
135 struct vcpu *v;
137 BUG_ON(d->vcpu[vcpu_id] != NULL);
139 if ( (v = alloc_vcpu_struct()) == NULL )
140 return NULL;
142 v->domain = d;
143 v->vcpu_id = vcpu_id;
145 spin_lock_init(&v->virq_lock);
147 if ( is_idle_domain(d) )
148 {
149 v->runstate.state = RUNSTATE_running;
150 }
151 else
152 {
153 v->runstate.state = RUNSTATE_offline;
154 v->runstate.state_entry_time = NOW();
155 set_bit(_VPF_down, &v->pause_flags);
156 v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
157 }
159 if ( sched_init_vcpu(v, cpu_id) != 0 )
160 {
161 free_vcpu_struct(v);
162 return NULL;
163 }
165 if ( vcpu_initialise(v) != 0 )
166 {
167 sched_destroy_vcpu(v);
168 free_vcpu_struct(v);
169 return NULL;
170 }
172 d->vcpu[vcpu_id] = v;
173 if ( vcpu_id != 0 )
174 d->vcpu[v->vcpu_id-1]->next_in_list = v;
176 /* Must be called after making new vcpu visible to for_each_vcpu(). */
177 vcpu_check_shutdown(v);
179 return v;
180 }
182 struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
183 {
184 struct domain *d;
185 struct vcpu *v;
186 unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
188 if ( (v = idle_vcpu[cpu_id]) != NULL )
189 return v;
191 d = (vcpu_id == 0) ?
192 domain_create(IDLE_DOMAIN_ID, 0, 0) :
193 idle_vcpu[cpu_id - vcpu_id]->domain;
194 BUG_ON(d == NULL);
196 v = alloc_vcpu(d, vcpu_id, cpu_id);
197 idle_vcpu[cpu_id] = v;
199 return v;
200 }
202 static unsigned int extra_dom0_irqs, extra_domU_irqs = 8;
203 static void __init parse_extra_guest_irqs(const char *s)
204 {
205 if ( isdigit(*s) )
206 extra_domU_irqs = simple_strtoul(s, &s, 0);
207 if ( *s == ',' && isdigit(*++s) )
208 extra_dom0_irqs = simple_strtoul(s, &s, 0);
209 }
210 custom_param("extra_guest_irqs", parse_extra_guest_irqs);
212 struct domain *domain_create(
213 domid_t domid, unsigned int domcr_flags, ssidref_t ssidref)
214 {
215 struct domain *d, **pd;
216 enum { INIT_xsm = 1u<<0, INIT_rangeset = 1u<<1, INIT_evtchn = 1u<<2,
217 INIT_gnttab = 1u<<3, INIT_arch = 1u<<4 };
218 int init_status = 0;
220 if ( (d = alloc_domain_struct()) == NULL )
221 return NULL;
223 memset(d, 0, sizeof(*d));
224 d->domain_id = domid;
226 if ( xsm_alloc_security_domain(d) != 0 )
227 goto fail;
228 init_status |= INIT_xsm;
230 atomic_set(&d->refcnt, 1);
231 spin_lock_init(&d->domain_lock);
232 spin_lock_init(&d->page_alloc_lock);
233 spin_lock_init(&d->shutdown_lock);
234 spin_lock_init(&d->hypercall_deadlock_mutex);
235 INIT_PAGE_LIST_HEAD(&d->page_list);
236 INIT_PAGE_LIST_HEAD(&d->xenpage_list);
238 if ( domcr_flags & DOMCRF_hvm )
239 d->is_hvm = 1;
241 if ( (domid == 0) && opt_dom0_vcpus_pin )
242 d->is_pinned = 1;
244 if ( domcr_flags & DOMCRF_dummy )
245 return d;
247 rangeset_domain_initialise(d);
248 init_status |= INIT_rangeset;
250 if ( !is_idle_domain(d) )
251 {
252 if ( xsm_domain_create(d, ssidref) != 0 )
253 goto fail;
255 d->is_paused_by_controller = 1;
256 atomic_inc(&d->pause_count);
258 d->nr_pirqs = (nr_irqs +
259 (domid ? extra_domU_irqs :
260 extra_dom0_irqs ?: nr_irqs));
261 d->pirq_to_evtchn = xmalloc_array(u16, d->nr_pirqs);
262 d->pirq_mask = xmalloc_array(
263 unsigned long, BITS_TO_LONGS(d->nr_pirqs));
264 if ( (d->pirq_to_evtchn == NULL) || (d->pirq_mask == NULL) )
265 goto fail;
266 memset(d->pirq_to_evtchn, 0, d->nr_pirqs * sizeof(*d->pirq_to_evtchn));
267 bitmap_zero(d->pirq_mask, d->nr_pirqs);
269 if ( evtchn_init(d) != 0 )
270 goto fail;
271 init_status |= INIT_evtchn;
273 if ( grant_table_create(d) != 0 )
274 goto fail;
275 init_status |= INIT_gnttab;
276 }
278 if ( arch_domain_create(d, domcr_flags) != 0 )
279 goto fail;
280 init_status |= INIT_arch;
282 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
283 d->irq_caps = rangeset_new(d, "Interrupts", 0);
284 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
285 goto fail;
287 if ( sched_init_domain(d) != 0 )
288 goto fail;
290 if ( !is_idle_domain(d) )
291 {
292 spin_lock(&domlist_update_lock);
293 pd = &domain_list; /* NB. domain_list maintained in order of domid. */
294 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
295 if ( (*pd)->domain_id > d->domain_id )
296 break;
297 d->next_in_list = *pd;
298 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
299 rcu_assign_pointer(*pd, d);
300 rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
301 spin_unlock(&domlist_update_lock);
302 }
304 return d;
306 fail:
307 d->is_dying = DOMDYING_dead;
308 atomic_set(&d->refcnt, DOMAIN_DESTROYED);
309 if ( init_status & INIT_arch )
310 arch_domain_destroy(d);
311 if ( init_status & INIT_gnttab )
312 grant_table_destroy(d);
313 if ( init_status & INIT_evtchn )
314 evtchn_destroy(d);
315 if ( init_status & INIT_rangeset )
316 rangeset_domain_destroy(d);
317 if ( init_status & INIT_xsm )
318 xsm_free_security_domain(d);
319 xfree(d->pirq_mask);
320 xfree(d->pirq_to_evtchn);
321 free_domain_struct(d);
322 return NULL;
323 }
326 struct domain *get_domain_by_id(domid_t dom)
327 {
328 struct domain *d;
330 rcu_read_lock(&domlist_read_lock);
332 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
333 d != NULL;
334 d = rcu_dereference(d->next_in_hashbucket) )
335 {
336 if ( d->domain_id == dom )
337 {
338 if ( unlikely(!get_domain(d)) )
339 d = NULL;
340 break;
341 }
342 }
344 rcu_read_unlock(&domlist_read_lock);
346 return d;
347 }
350 struct domain *rcu_lock_domain_by_id(domid_t dom)
351 {
352 struct domain *d;
354 rcu_read_lock(&domlist_read_lock);
356 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
357 d != NULL;
358 d = rcu_dereference(d->next_in_hashbucket) )
359 {
360 if ( d->domain_id == dom )
361 return d;
362 }
364 rcu_read_unlock(&domlist_read_lock);
366 return NULL;
367 }
369 int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d)
370 {
371 if ( dom == DOMID_SELF )
372 {
373 *d = rcu_lock_current_domain();
374 return 0;
375 }
377 if ( (*d = rcu_lock_domain_by_id(dom)) == NULL )
378 return -ESRCH;
380 if ( !IS_PRIV_FOR(current->domain, *d) )
381 {
382 rcu_unlock_domain(*d);
383 return -EPERM;
384 }
386 return 0;
387 }
389 int domain_kill(struct domain *d)
390 {
391 int rc = 0;
393 if ( d == current->domain )
394 return -EINVAL;
396 /* Protected by domctl_lock. */
397 switch ( d->is_dying )
398 {
399 case DOMDYING_alive:
400 domain_pause(d);
401 d->is_dying = DOMDYING_dying;
402 spin_barrier(&d->domain_lock);
403 evtchn_destroy(d);
404 gnttab_release_mappings(d);
405 tmem_destroy(d->tmem);
406 d->tmem = NULL;
407 /* fallthrough */
408 case DOMDYING_dying:
409 rc = domain_relinquish_resources(d);
410 page_scrub_kick();
411 if ( rc != 0 )
412 {
413 BUG_ON(rc != -EAGAIN);
414 break;
415 }
416 d->is_dying = DOMDYING_dead;
417 put_domain(d);
418 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
419 /* fallthrough */
420 case DOMDYING_dead:
421 break;
422 }
424 return rc;
425 }
428 void __domain_crash(struct domain *d)
429 {
430 if ( d->is_shutting_down )
431 {
432 /* Print nothing: the domain is already shutting down. */
433 }
434 else if ( d == current->domain )
435 {
436 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
437 d->domain_id, current->vcpu_id, smp_processor_id());
438 show_execution_state(guest_cpu_user_regs());
439 }
440 else
441 {
442 printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
443 d->domain_id, current->domain->domain_id, smp_processor_id());
444 }
446 domain_shutdown(d, SHUTDOWN_crash);
447 }
450 void __domain_crash_synchronous(void)
451 {
452 __domain_crash(current->domain);
454 /*
455 * Flush multicall state before dying if a multicall is in progress.
456 * This shouldn't be necessary, but some architectures are calling
457 * domain_crash_synchronous() when they really shouldn't (i.e., from
458 * within hypercall context).
459 */
460 if ( this_cpu(mc_state).flags != 0 )
461 {
462 dprintk(XENLOG_ERR,
463 "FIXME: synchronous domain crash during a multicall!\n");
464 this_cpu(mc_state).flags = 0;
465 }
467 vcpu_end_shutdown_deferral(current);
469 for ( ; ; )
470 do_softirq();
471 }
474 void domain_shutdown(struct domain *d, u8 reason)
475 {
476 struct vcpu *v;
478 if ( d->domain_id == 0 )
479 dom0_shutdown(reason);
481 spin_lock(&d->shutdown_lock);
483 if ( d->is_shutting_down )
484 {
485 spin_unlock(&d->shutdown_lock);
486 return;
487 }
489 d->is_shutting_down = 1;
490 d->shutdown_code = reason;
492 smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */
494 for_each_vcpu ( d, v )
495 {
496 if ( reason == SHUTDOWN_crash )
497 v->defer_shutdown = 0;
498 else if ( v->defer_shutdown )
499 continue;
500 vcpu_pause_nosync(v);
501 v->paused_for_shutdown = 1;
502 }
504 __domain_finalise_shutdown(d);
506 spin_unlock(&d->shutdown_lock);
507 }
509 void domain_resume(struct domain *d)
510 {
511 struct vcpu *v;
513 /*
514 * Some code paths assume that shutdown status does not get reset under
515 * their feet (e.g., some assertions make this assumption).
516 */
517 domain_pause(d);
519 spin_lock(&d->shutdown_lock);
521 d->is_shutting_down = d->is_shut_down = 0;
523 for_each_vcpu ( d, v )
524 {
525 if ( v->paused_for_shutdown )
526 vcpu_unpause(v);
527 v->paused_for_shutdown = 0;
528 }
530 spin_unlock(&d->shutdown_lock);
532 domain_unpause(d);
533 }
535 int vcpu_start_shutdown_deferral(struct vcpu *v)
536 {
537 if ( v->defer_shutdown )
538 return 1;
540 v->defer_shutdown = 1;
541 smp_mb(); /* set deferral status /then/ check for shutdown */
542 if ( unlikely(v->domain->is_shutting_down) )
543 vcpu_check_shutdown(v);
545 return v->defer_shutdown;
546 }
548 void vcpu_end_shutdown_deferral(struct vcpu *v)
549 {
550 v->defer_shutdown = 0;
551 smp_mb(); /* clear deferral status /then/ check for shutdown */
552 if ( unlikely(v->domain->is_shutting_down) )
553 vcpu_check_shutdown(v);
554 }
556 void domain_pause_for_debugger(void)
557 {
558 struct domain *d = current->domain;
559 struct vcpu *v;
561 atomic_inc(&d->pause_count);
562 if ( test_and_set_bool(d->is_paused_by_controller) )
563 domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */
565 for_each_vcpu ( d, v )
566 vcpu_sleep_nosync(v);
568 send_guest_global_virq(dom0, VIRQ_DEBUGGER);
569 }
571 /* Complete domain destroy after RCU readers are not holding old references. */
572 static void complete_domain_destroy(struct rcu_head *head)
573 {
574 struct domain *d = container_of(head, struct domain, rcu);
575 struct vcpu *v;
576 int i;
578 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
579 {
580 if ( (v = d->vcpu[i]) == NULL )
581 continue;
582 vcpu_destroy(v);
583 sched_destroy_vcpu(v);
584 }
586 grant_table_destroy(d);
588 arch_domain_destroy(d);
590 rangeset_domain_destroy(d);
592 sched_destroy_domain(d);
594 /* Free page used by xen oprofile buffer. */
595 free_xenoprof_pages(d);
597 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
598 if ( (v = d->vcpu[i]) != NULL )
599 free_vcpu_struct(v);
601 if ( d->target != NULL )
602 put_domain(d->target);
604 xfree(d->pirq_mask);
605 xfree(d->pirq_to_evtchn);
607 xsm_free_security_domain(d);
608 free_domain_struct(d);
610 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
611 }
613 /* Release resources belonging to task @p. */
614 void domain_destroy(struct domain *d)
615 {
616 struct domain **pd;
617 atomic_t old, new;
619 BUG_ON(!d->is_dying);
621 /* May be already destroyed, or get_domain() can race us. */
622 _atomic_set(old, 0);
623 _atomic_set(new, DOMAIN_DESTROYED);
624 old = atomic_compareandswap(old, new, &d->refcnt);
625 if ( _atomic_read(old) != 0 )
626 return;
628 /* Delete from task list and task hashtable. */
629 TRACE_1D(TRC_SCHED_DOM_REM, d->domain_id);
630 spin_lock(&domlist_update_lock);
631 pd = &domain_list;
632 while ( *pd != d )
633 pd = &(*pd)->next_in_list;
634 rcu_assign_pointer(*pd, d->next_in_list);
635 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
636 while ( *pd != d )
637 pd = &(*pd)->next_in_hashbucket;
638 rcu_assign_pointer(*pd, d->next_in_hashbucket);
639 spin_unlock(&domlist_update_lock);
641 /* Schedule RCU asynchronous completion of domain destroy. */
642 call_rcu(&d->rcu, complete_domain_destroy);
643 }
645 void vcpu_pause(struct vcpu *v)
646 {
647 ASSERT(v != current);
648 atomic_inc(&v->pause_count);
649 vcpu_sleep_sync(v);
650 }
652 void vcpu_pause_nosync(struct vcpu *v)
653 {
654 atomic_inc(&v->pause_count);
655 vcpu_sleep_nosync(v);
656 }
658 void vcpu_unpause(struct vcpu *v)
659 {
660 if ( atomic_dec_and_test(&v->pause_count) )
661 vcpu_wake(v);
662 }
664 void domain_pause(struct domain *d)
665 {
666 struct vcpu *v;
668 ASSERT(d != current->domain);
670 atomic_inc(&d->pause_count);
672 for_each_vcpu( d, v )
673 vcpu_sleep_sync(v);
674 }
676 void domain_unpause(struct domain *d)
677 {
678 struct vcpu *v;
680 if ( atomic_dec_and_test(&d->pause_count) )
681 for_each_vcpu( d, v )
682 vcpu_wake(v);
683 }
685 void domain_pause_by_systemcontroller(struct domain *d)
686 {
687 domain_pause(d);
688 if ( test_and_set_bool(d->is_paused_by_controller) )
689 domain_unpause(d);
690 }
692 void domain_unpause_by_systemcontroller(struct domain *d)
693 {
694 if ( test_and_clear_bool(d->is_paused_by_controller) )
695 domain_unpause(d);
696 }
698 int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt)
699 {
700 struct vcpu *v = d->vcpu[vcpuid];
702 BUG_ON(v->is_initialised);
704 return arch_set_info_guest(v, ctxt);
705 }
707 void vcpu_reset(struct vcpu *v)
708 {
709 struct domain *d = v->domain;
711 vcpu_pause(v);
712 domain_lock(d);
714 arch_vcpu_reset(v);
716 set_bit(_VPF_down, &v->pause_flags);
718 clear_bit(v->vcpu_id, d->poll_mask);
719 v->poll_evtchn = 0;
721 v->fpu_initialised = 0;
722 v->fpu_dirtied = 0;
723 v->is_initialised = 0;
724 v->nmi_pending = 0;
725 v->mce_pending = 0;
726 v->old_trap_priority = VCPU_TRAP_NONE;
727 v->trap_priority = VCPU_TRAP_NONE;
728 clear_bit(_VPF_blocked, &v->pause_flags);
730 domain_unlock(v->domain);
731 vcpu_unpause(v);
732 }
735 long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
736 {
737 struct domain *d = current->domain;
738 struct vcpu *v;
739 struct vcpu_guest_context *ctxt;
740 long rc = 0;
742 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
743 return -EINVAL;
745 if ( (v = d->vcpu[vcpuid]) == NULL )
746 return -ENOENT;
748 switch ( cmd )
749 {
750 case VCPUOP_initialise:
751 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
752 return -ENOMEM;
754 if ( copy_from_guest(ctxt, arg, 1) )
755 {
756 xfree(ctxt);
757 return -EFAULT;
758 }
760 domain_lock(d);
761 rc = -EEXIST;
762 if ( !v->is_initialised )
763 rc = boot_vcpu(d, vcpuid, ctxt);
764 domain_unlock(d);
766 xfree(ctxt);
767 break;
769 case VCPUOP_up:
770 if ( !v->is_initialised )
771 return -EINVAL;
773 if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
774 vcpu_wake(v);
776 break;
778 case VCPUOP_down:
779 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
780 vcpu_sleep_nosync(v);
781 break;
783 case VCPUOP_is_up:
784 rc = !test_bit(_VPF_down, &v->pause_flags);
785 break;
787 case VCPUOP_get_runstate_info:
788 {
789 struct vcpu_runstate_info runstate;
790 vcpu_runstate_get(v, &runstate);
791 if ( copy_to_guest(arg, &runstate, 1) )
792 rc = -EFAULT;
793 break;
794 }
796 case VCPUOP_set_periodic_timer:
797 {
798 struct vcpu_set_periodic_timer set;
800 if ( copy_from_guest(&set, arg, 1) )
801 return -EFAULT;
803 if ( set.period_ns < MILLISECS(1) )
804 return -EINVAL;
806 v->periodic_period = set.period_ns;
807 vcpu_force_reschedule(v);
809 break;
810 }
812 case VCPUOP_stop_periodic_timer:
813 v->periodic_period = 0;
814 vcpu_force_reschedule(v);
815 break;
817 case VCPUOP_set_singleshot_timer:
818 {
819 struct vcpu_set_singleshot_timer set;
821 if ( v != current )
822 return -EINVAL;
824 if ( copy_from_guest(&set, arg, 1) )
825 return -EFAULT;
827 if ( (set.flags & VCPU_SSHOTTMR_future) &&
828 (set.timeout_abs_ns < NOW()) )
829 return -ETIME;
831 if ( v->singleshot_timer.cpu != smp_processor_id() )
832 {
833 stop_timer(&v->singleshot_timer);
834 v->singleshot_timer.cpu = smp_processor_id();
835 }
837 set_timer(&v->singleshot_timer, set.timeout_abs_ns);
839 break;
840 }
842 case VCPUOP_stop_singleshot_timer:
843 if ( v != current )
844 return -EINVAL;
846 stop_timer(&v->singleshot_timer);
848 break;
850 case VCPUOP_send_nmi:
851 if ( !guest_handle_is_null(arg) )
852 return -EINVAL;
854 if ( !test_and_set_bool(v->nmi_pending) )
855 vcpu_kick(v);
857 break;
859 default:
860 rc = arch_do_vcpu_op(cmd, v, arg);
861 break;
862 }
864 return rc;
865 }
867 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
868 {
869 if ( type > MAX_VMASST_TYPE )
870 return -EINVAL;
872 switch ( cmd )
873 {
874 case VMASST_CMD_enable:
875 set_bit(type, &p->vm_assist);
876 return 0;
877 case VMASST_CMD_disable:
878 clear_bit(type, &p->vm_assist);
879 return 0;
880 }
882 return -ENOSYS;
883 }
885 /*
886 * Local variables:
887 * mode: C
888 * c-set-style: "BSD"
889 * c-basic-offset: 4
890 * tab-width: 4
891 * indent-tabs-mode: nil
892 * End:
893 */