ia64/xen-unstable

view xen/common/domain.c @ 19261:c62b453f27d5

When a domain crashes, ignore shutdown deferrals.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Mar 02 14:09:21 2009 +0000 (2009-03-02)
parents efef232bbbdb
children 1282561a2bf2
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/compat.h>
9 #include <xen/init.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/domain.h>
14 #include <xen/mm.h>
15 #include <xen/event.h>
16 #include <xen/time.h>
17 #include <xen/console.h>
18 #include <xen/softirq.h>
19 #include <xen/domain_page.h>
20 #include <xen/rangeset.h>
21 #include <xen/guest_access.h>
22 #include <xen/hypercall.h>
23 #include <xen/delay.h>
24 #include <xen/shutdown.h>
25 #include <xen/percpu.h>
26 #include <xen/multicall.h>
27 #include <xen/rcupdate.h>
28 #include <acpi/cpufreq/cpufreq.h>
29 #include <asm/debugger.h>
30 #include <public/sched.h>
31 #include <public/vcpu.h>
32 #include <xsm/xsm.h>
34 /* Linux config option: propageted to domain0 */
35 /* xen_processor_pmbits: xen control Cx, Px, ... */
36 unsigned int xen_processor_pmbits = XEN_PROCESSOR_PM_PX;
38 /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
39 static unsigned int opt_dom0_vcpus_pin;
40 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
42 /* set xen as default cpufreq */
43 enum cpufreq_controller cpufreq_controller = FREQCTL_xen;
45 static void __init setup_cpufreq_option(char *str)
46 {
47 char *arg;
49 if ( !strcmp(str, "dom0-kernel") )
50 {
51 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
52 cpufreq_controller = FREQCTL_dom0_kernel;
53 opt_dom0_vcpus_pin = 1;
54 return;
55 }
57 if ( !strcmp(str, "none") )
58 {
59 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
60 cpufreq_controller = FREQCTL_none;
61 return;
62 }
64 if ( (arg = strpbrk(str, ",:")) != NULL )
65 *arg++ = '\0';
67 if ( !strcmp(str, "xen") )
68 if ( arg && *arg )
69 cpufreq_cmdline_parse(arg);
70 }
71 custom_param("cpufreq", setup_cpufreq_option);
73 /* Protect updates/reads (resp.) of domain_list and domain_hash. */
74 DEFINE_SPINLOCK(domlist_update_lock);
75 DEFINE_RCU_READ_LOCK(domlist_read_lock);
77 #define DOMAIN_HASH_SIZE 256
78 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
79 static struct domain *domain_hash[DOMAIN_HASH_SIZE];
80 struct domain *domain_list;
82 struct domain *dom0;
84 struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
86 int current_domain_id(void)
87 {
88 return current->domain->domain_id;
89 }
91 static void __domain_finalise_shutdown(struct domain *d)
92 {
93 struct vcpu *v;
95 BUG_ON(!spin_is_locked(&d->shutdown_lock));
97 if ( d->is_shut_down )
98 return;
100 for_each_vcpu ( d, v )
101 if ( !v->paused_for_shutdown )
102 return;
104 d->is_shut_down = 1;
105 if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn )
106 evtchn_send(d, d->suspend_evtchn);
107 else
108 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
109 }
111 static void vcpu_check_shutdown(struct vcpu *v)
112 {
113 struct domain *d = v->domain;
115 spin_lock(&d->shutdown_lock);
117 if ( d->is_shutting_down )
118 {
119 if ( !v->paused_for_shutdown )
120 vcpu_pause_nosync(v);
121 v->paused_for_shutdown = 1;
122 v->defer_shutdown = 0;
123 __domain_finalise_shutdown(d);
124 }
126 spin_unlock(&d->shutdown_lock);
127 }
129 struct vcpu *alloc_vcpu(
130 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
131 {
132 struct vcpu *v;
134 BUG_ON(d->vcpu[vcpu_id] != NULL);
136 if ( (v = alloc_vcpu_struct()) == NULL )
137 return NULL;
139 v->domain = d;
140 v->vcpu_id = vcpu_id;
142 spin_lock_init(&v->virq_lock);
144 if ( is_idle_domain(d) )
145 {
146 v->runstate.state = RUNSTATE_running;
147 }
148 else
149 {
150 v->runstate.state = RUNSTATE_offline;
151 v->runstate.state_entry_time = NOW();
152 set_bit(_VPF_down, &v->pause_flags);
153 v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
154 }
156 if ( sched_init_vcpu(v, cpu_id) != 0 )
157 {
158 free_vcpu_struct(v);
159 return NULL;
160 }
162 if ( vcpu_initialise(v) != 0 )
163 {
164 sched_destroy_vcpu(v);
165 free_vcpu_struct(v);
166 return NULL;
167 }
169 d->vcpu[vcpu_id] = v;
170 if ( vcpu_id != 0 )
171 d->vcpu[v->vcpu_id-1]->next_in_list = v;
173 /* Must be called after making new vcpu visible to for_each_vcpu(). */
174 vcpu_check_shutdown(v);
176 return v;
177 }
179 struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
180 {
181 struct domain *d;
182 struct vcpu *v;
183 unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
185 if ( (v = idle_vcpu[cpu_id]) != NULL )
186 return v;
188 d = (vcpu_id == 0) ?
189 domain_create(IDLE_DOMAIN_ID, 0, 0) :
190 idle_vcpu[cpu_id - vcpu_id]->domain;
191 BUG_ON(d == NULL);
193 v = alloc_vcpu(d, vcpu_id, cpu_id);
194 idle_vcpu[cpu_id] = v;
196 return v;
197 }
199 struct domain *domain_create(
200 domid_t domid, unsigned int domcr_flags, ssidref_t ssidref)
201 {
202 struct domain *d, **pd;
203 enum { INIT_xsm = 1u<<0, INIT_rangeset = 1u<<1, INIT_evtchn = 1u<<2,
204 INIT_gnttab = 1u<<3, INIT_arch = 1u<<4 };
205 int init_status = 0;
207 if ( (d = alloc_domain_struct()) == NULL )
208 return NULL;
210 memset(d, 0, sizeof(*d));
211 d->domain_id = domid;
213 if ( xsm_alloc_security_domain(d) != 0 )
214 goto fail;
215 init_status |= INIT_xsm;
217 atomic_set(&d->refcnt, 1);
218 spin_lock_init(&d->domain_lock);
219 spin_lock_init(&d->page_alloc_lock);
220 spin_lock_init(&d->shutdown_lock);
221 spin_lock_init(&d->hypercall_deadlock_mutex);
222 INIT_PAGE_LIST_HEAD(&d->page_list);
223 INIT_PAGE_LIST_HEAD(&d->xenpage_list);
225 if ( domcr_flags & DOMCRF_hvm )
226 d->is_hvm = 1;
228 if ( (domid == 0) && opt_dom0_vcpus_pin )
229 d->is_pinned = 1;
231 if ( domcr_flags & DOMCRF_dummy )
232 return d;
234 rangeset_domain_initialise(d);
235 init_status |= INIT_rangeset;
237 if ( !is_idle_domain(d) )
238 {
239 if ( xsm_domain_create(d, ssidref) != 0 )
240 goto fail;
242 d->is_paused_by_controller = 1;
243 atomic_inc(&d->pause_count);
245 if ( evtchn_init(d) != 0 )
246 goto fail;
247 init_status |= INIT_evtchn;
249 if ( grant_table_create(d) != 0 )
250 goto fail;
251 init_status |= INIT_gnttab;
252 }
254 if ( arch_domain_create(d, domcr_flags) != 0 )
255 goto fail;
256 init_status |= INIT_arch;
258 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
259 d->irq_caps = rangeset_new(d, "Interrupts", 0);
260 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
261 goto fail;
263 if ( sched_init_domain(d) != 0 )
264 goto fail;
266 if ( !is_idle_domain(d) )
267 {
268 spin_lock(&domlist_update_lock);
269 pd = &domain_list; /* NB. domain_list maintained in order of domid. */
270 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
271 if ( (*pd)->domain_id > d->domain_id )
272 break;
273 d->next_in_list = *pd;
274 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
275 rcu_assign_pointer(*pd, d);
276 rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
277 spin_unlock(&domlist_update_lock);
278 }
280 return d;
282 fail:
283 d->is_dying = DOMDYING_dead;
284 atomic_set(&d->refcnt, DOMAIN_DESTROYED);
285 if ( init_status & INIT_arch )
286 arch_domain_destroy(d);
287 if ( init_status & INIT_gnttab )
288 grant_table_destroy(d);
289 if ( init_status & INIT_evtchn )
290 evtchn_destroy(d);
291 if ( init_status & INIT_rangeset )
292 rangeset_domain_destroy(d);
293 if ( init_status & INIT_xsm )
294 xsm_free_security_domain(d);
295 free_domain_struct(d);
296 return NULL;
297 }
300 struct domain *get_domain_by_id(domid_t dom)
301 {
302 struct domain *d;
304 rcu_read_lock(&domlist_read_lock);
306 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
307 d != NULL;
308 d = rcu_dereference(d->next_in_hashbucket) )
309 {
310 if ( d->domain_id == dom )
311 {
312 if ( unlikely(!get_domain(d)) )
313 d = NULL;
314 break;
315 }
316 }
318 rcu_read_unlock(&domlist_read_lock);
320 return d;
321 }
324 struct domain *rcu_lock_domain_by_id(domid_t dom)
325 {
326 struct domain *d;
328 rcu_read_lock(&domlist_read_lock);
330 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
331 d != NULL;
332 d = rcu_dereference(d->next_in_hashbucket) )
333 {
334 if ( d->domain_id == dom )
335 return d;
336 }
338 rcu_read_unlock(&domlist_read_lock);
340 return NULL;
341 }
343 int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d)
344 {
345 if ( dom == DOMID_SELF )
346 {
347 *d = rcu_lock_current_domain();
348 return 0;
349 }
351 if ( (*d = rcu_lock_domain_by_id(dom)) == NULL )
352 return -ESRCH;
354 if ( !IS_PRIV_FOR(current->domain, *d) )
355 {
356 rcu_unlock_domain(*d);
357 return -EPERM;
358 }
360 return 0;
361 }
363 int domain_kill(struct domain *d)
364 {
365 int rc = 0;
367 if ( d == current->domain )
368 return -EINVAL;
370 /* Protected by domctl_lock. */
371 switch ( d->is_dying )
372 {
373 case DOMDYING_alive:
374 domain_pause(d);
375 d->is_dying = DOMDYING_dying;
376 spin_barrier(&d->domain_lock);
377 evtchn_destroy(d);
378 gnttab_release_mappings(d);
379 /* fallthrough */
380 case DOMDYING_dying:
381 rc = domain_relinquish_resources(d);
382 page_scrub_kick();
383 if ( rc != 0 )
384 {
385 BUG_ON(rc != -EAGAIN);
386 break;
387 }
388 d->is_dying = DOMDYING_dead;
389 put_domain(d);
390 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
391 /* fallthrough */
392 case DOMDYING_dead:
393 break;
394 }
396 return rc;
397 }
400 void __domain_crash(struct domain *d)
401 {
402 if ( d->is_shutting_down )
403 {
404 /* Print nothing: the domain is already shutting down. */
405 }
406 else if ( d == current->domain )
407 {
408 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
409 d->domain_id, current->vcpu_id, smp_processor_id());
410 show_execution_state(guest_cpu_user_regs());
411 }
412 else
413 {
414 printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
415 d->domain_id, current->domain->domain_id, smp_processor_id());
416 }
418 domain_shutdown(d, SHUTDOWN_crash);
419 }
422 void __domain_crash_synchronous(void)
423 {
424 __domain_crash(current->domain);
426 /*
427 * Flush multicall state before dying if a multicall is in progress.
428 * This shouldn't be necessary, but some architectures are calling
429 * domain_crash_synchronous() when they really shouldn't (i.e., from
430 * within hypercall context).
431 */
432 if ( this_cpu(mc_state).flags != 0 )
433 {
434 dprintk(XENLOG_ERR,
435 "FIXME: synchronous domain crash during a multicall!\n");
436 this_cpu(mc_state).flags = 0;
437 }
439 vcpu_end_shutdown_deferral(current);
441 for ( ; ; )
442 do_softirq();
443 }
446 void domain_shutdown(struct domain *d, u8 reason)
447 {
448 struct vcpu *v;
450 if ( d->domain_id == 0 )
451 dom0_shutdown(reason);
453 spin_lock(&d->shutdown_lock);
455 if ( d->is_shutting_down )
456 {
457 spin_unlock(&d->shutdown_lock);
458 return;
459 }
461 d->is_shutting_down = 1;
462 d->shutdown_code = reason;
464 smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */
466 for_each_vcpu ( d, v )
467 {
468 if ( reason == SHUTDOWN_crash )
469 v->defer_shutdown = 0;
470 else if ( v->defer_shutdown )
471 continue;
472 vcpu_pause_nosync(v);
473 v->paused_for_shutdown = 1;
474 }
476 __domain_finalise_shutdown(d);
478 spin_unlock(&d->shutdown_lock);
479 }
481 void domain_resume(struct domain *d)
482 {
483 struct vcpu *v;
485 /*
486 * Some code paths assume that shutdown status does not get reset under
487 * their feet (e.g., some assertions make this assumption).
488 */
489 domain_pause(d);
491 spin_lock(&d->shutdown_lock);
493 d->is_shutting_down = d->is_shut_down = 0;
495 for_each_vcpu ( d, v )
496 {
497 if ( v->paused_for_shutdown )
498 vcpu_unpause(v);
499 v->paused_for_shutdown = 0;
500 }
502 spin_unlock(&d->shutdown_lock);
504 domain_unpause(d);
505 }
507 int vcpu_start_shutdown_deferral(struct vcpu *v)
508 {
509 if ( v->defer_shutdown )
510 return 1;
512 v->defer_shutdown = 1;
513 smp_mb(); /* set deferral status /then/ check for shutdown */
514 if ( unlikely(v->domain->is_shutting_down) )
515 vcpu_check_shutdown(v);
517 return v->defer_shutdown;
518 }
520 void vcpu_end_shutdown_deferral(struct vcpu *v)
521 {
522 v->defer_shutdown = 0;
523 smp_mb(); /* clear deferral status /then/ check for shutdown */
524 if ( unlikely(v->domain->is_shutting_down) )
525 vcpu_check_shutdown(v);
526 }
528 void domain_pause_for_debugger(void)
529 {
530 struct domain *d = current->domain;
531 struct vcpu *v;
533 atomic_inc(&d->pause_count);
534 if ( test_and_set_bool(d->is_paused_by_controller) )
535 domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */
537 for_each_vcpu ( d, v )
538 vcpu_sleep_nosync(v);
540 send_guest_global_virq(dom0, VIRQ_DEBUGGER);
541 }
543 /* Complete domain destroy after RCU readers are not holding old references. */
544 static void complete_domain_destroy(struct rcu_head *head)
545 {
546 struct domain *d = container_of(head, struct domain, rcu);
547 struct vcpu *v;
548 int i;
550 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
551 {
552 if ( (v = d->vcpu[i]) == NULL )
553 continue;
554 vcpu_destroy(v);
555 sched_destroy_vcpu(v);
556 }
558 grant_table_destroy(d);
560 arch_domain_destroy(d);
562 rangeset_domain_destroy(d);
564 sched_destroy_domain(d);
566 /* Free page used by xen oprofile buffer. */
567 free_xenoprof_pages(d);
569 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
570 if ( (v = d->vcpu[i]) != NULL )
571 free_vcpu_struct(v);
573 if ( d->target != NULL )
574 put_domain(d->target);
576 xsm_free_security_domain(d);
577 free_domain_struct(d);
579 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
580 }
582 /* Release resources belonging to task @p. */
583 void domain_destroy(struct domain *d)
584 {
585 struct domain **pd;
586 atomic_t old, new;
588 BUG_ON(!d->is_dying);
590 /* May be already destroyed, or get_domain() can race us. */
591 _atomic_set(old, 0);
592 _atomic_set(new, DOMAIN_DESTROYED);
593 old = atomic_compareandswap(old, new, &d->refcnt);
594 if ( _atomic_read(old) != 0 )
595 return;
597 /* Delete from task list and task hashtable. */
598 spin_lock(&domlist_update_lock);
599 pd = &domain_list;
600 while ( *pd != d )
601 pd = &(*pd)->next_in_list;
602 rcu_assign_pointer(*pd, d->next_in_list);
603 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
604 while ( *pd != d )
605 pd = &(*pd)->next_in_hashbucket;
606 rcu_assign_pointer(*pd, d->next_in_hashbucket);
607 spin_unlock(&domlist_update_lock);
609 /* Schedule RCU asynchronous completion of domain destroy. */
610 call_rcu(&d->rcu, complete_domain_destroy);
611 }
613 void vcpu_pause(struct vcpu *v)
614 {
615 ASSERT(v != current);
616 atomic_inc(&v->pause_count);
617 vcpu_sleep_sync(v);
618 }
620 void vcpu_pause_nosync(struct vcpu *v)
621 {
622 atomic_inc(&v->pause_count);
623 vcpu_sleep_nosync(v);
624 }
626 void vcpu_unpause(struct vcpu *v)
627 {
628 if ( atomic_dec_and_test(&v->pause_count) )
629 vcpu_wake(v);
630 }
632 void domain_pause(struct domain *d)
633 {
634 struct vcpu *v;
636 ASSERT(d != current->domain);
638 atomic_inc(&d->pause_count);
640 for_each_vcpu( d, v )
641 vcpu_sleep_sync(v);
642 }
644 void domain_unpause(struct domain *d)
645 {
646 struct vcpu *v;
648 if ( atomic_dec_and_test(&d->pause_count) )
649 for_each_vcpu( d, v )
650 vcpu_wake(v);
651 }
653 void domain_pause_by_systemcontroller(struct domain *d)
654 {
655 domain_pause(d);
656 if ( test_and_set_bool(d->is_paused_by_controller) )
657 domain_unpause(d);
658 }
660 void domain_unpause_by_systemcontroller(struct domain *d)
661 {
662 if ( test_and_clear_bool(d->is_paused_by_controller) )
663 domain_unpause(d);
664 }
666 int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt)
667 {
668 struct vcpu *v = d->vcpu[vcpuid];
670 BUG_ON(v->is_initialised);
672 return arch_set_info_guest(v, ctxt);
673 }
675 void vcpu_reset(struct vcpu *v)
676 {
677 struct domain *d = v->domain;
679 vcpu_pause(v);
680 domain_lock(d);
682 arch_vcpu_reset(v);
684 set_bit(_VPF_down, &v->pause_flags);
686 clear_bit(v->vcpu_id, d->poll_mask);
687 v->poll_evtchn = 0;
689 v->fpu_initialised = 0;
690 v->fpu_dirtied = 0;
691 v->is_initialised = 0;
692 v->nmi_pending = 0;
693 v->mce_pending = 0;
694 v->old_trap_priority = VCPU_TRAP_NONE;
695 v->trap_priority = VCPU_TRAP_NONE;
696 clear_bit(_VPF_blocked, &v->pause_flags);
698 domain_unlock(v->domain);
699 vcpu_unpause(v);
700 }
703 long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
704 {
705 struct domain *d = current->domain;
706 struct vcpu *v;
707 struct vcpu_guest_context *ctxt;
708 long rc = 0;
710 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
711 return -EINVAL;
713 if ( (v = d->vcpu[vcpuid]) == NULL )
714 return -ENOENT;
716 switch ( cmd )
717 {
718 case VCPUOP_initialise:
719 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
720 return -ENOMEM;
722 if ( copy_from_guest(ctxt, arg, 1) )
723 {
724 xfree(ctxt);
725 return -EFAULT;
726 }
728 domain_lock(d);
729 rc = -EEXIST;
730 if ( !v->is_initialised )
731 rc = boot_vcpu(d, vcpuid, ctxt);
732 domain_unlock(d);
734 xfree(ctxt);
735 break;
737 case VCPUOP_up:
738 if ( !v->is_initialised )
739 return -EINVAL;
741 if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
742 vcpu_wake(v);
744 break;
746 case VCPUOP_down:
747 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
748 vcpu_sleep_nosync(v);
749 break;
751 case VCPUOP_is_up:
752 rc = !test_bit(_VPF_down, &v->pause_flags);
753 break;
755 case VCPUOP_get_runstate_info:
756 {
757 struct vcpu_runstate_info runstate;
758 vcpu_runstate_get(v, &runstate);
759 if ( copy_to_guest(arg, &runstate, 1) )
760 rc = -EFAULT;
761 break;
762 }
764 case VCPUOP_set_periodic_timer:
765 {
766 struct vcpu_set_periodic_timer set;
768 if ( copy_from_guest(&set, arg, 1) )
769 return -EFAULT;
771 if ( set.period_ns < MILLISECS(1) )
772 return -EINVAL;
774 v->periodic_period = set.period_ns;
775 vcpu_force_reschedule(v);
777 break;
778 }
780 case VCPUOP_stop_periodic_timer:
781 v->periodic_period = 0;
782 vcpu_force_reschedule(v);
783 break;
785 case VCPUOP_set_singleshot_timer:
786 {
787 struct vcpu_set_singleshot_timer set;
789 if ( v != current )
790 return -EINVAL;
792 if ( copy_from_guest(&set, arg, 1) )
793 return -EFAULT;
795 if ( (set.flags & VCPU_SSHOTTMR_future) &&
796 (set.timeout_abs_ns < NOW()) )
797 return -ETIME;
799 if ( v->singleshot_timer.cpu != smp_processor_id() )
800 {
801 stop_timer(&v->singleshot_timer);
802 v->singleshot_timer.cpu = smp_processor_id();
803 }
805 set_timer(&v->singleshot_timer, set.timeout_abs_ns);
807 break;
808 }
810 case VCPUOP_stop_singleshot_timer:
811 if ( v != current )
812 return -EINVAL;
814 stop_timer(&v->singleshot_timer);
816 break;
818 case VCPUOP_send_nmi:
819 if ( !guest_handle_is_null(arg) )
820 return -EINVAL;
822 if ( !test_and_set_bool(v->nmi_pending) )
823 vcpu_kick(v);
825 break;
827 default:
828 rc = arch_do_vcpu_op(cmd, v, arg);
829 break;
830 }
832 return rc;
833 }
835 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
836 {
837 if ( type > MAX_VMASST_TYPE )
838 return -EINVAL;
840 switch ( cmd )
841 {
842 case VMASST_CMD_enable:
843 set_bit(type, &p->vm_assist);
844 return 0;
845 case VMASST_CMD_disable:
846 clear_bit(type, &p->vm_assist);
847 return 0;
848 }
850 return -ENOSYS;
851 }
853 /*
854 * Local variables:
855 * mode: C
856 * c-set-style: "BSD"
857 * c-basic-offset: 4
858 * tab-width: 4
859 * indent-tabs-mode: nil
860 * End:
861 */