ia64/xen-unstable

view xen/common/domain.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents 2f9e1348aa98
children
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/compat.h>
9 #include <xen/init.h>
10 #include <xen/lib.h>
11 #include <xen/ctype.h>
12 #include <xen/errno.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/mm.h>
16 #include <xen/event.h>
17 #include <xen/time.h>
18 #include <xen/console.h>
19 #include <xen/softirq.h>
20 #include <xen/domain_page.h>
21 #include <xen/rangeset.h>
22 #include <xen/guest_access.h>
23 #include <xen/hypercall.h>
24 #include <xen/delay.h>
25 #include <xen/shutdown.h>
26 #include <xen/percpu.h>
27 #include <xen/multicall.h>
28 #include <xen/rcupdate.h>
29 #include <acpi/cpufreq/cpufreq.h>
30 #include <asm/debugger.h>
31 #include <public/sched.h>
32 #include <public/vcpu.h>
33 #include <xsm/xsm.h>
34 #include <xen/trace.h>
35 #include <xen/tmem.h>
37 /* Linux config option: propageted to domain0 */
38 /* xen_processor_pmbits: xen control Cx, Px, ... */
39 unsigned int xen_processor_pmbits = XEN_PROCESSOR_PM_PX;
41 /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
42 static unsigned int opt_dom0_vcpus_pin;
43 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
45 /* set xen as default cpufreq */
46 enum cpufreq_controller cpufreq_controller = FREQCTL_xen;
48 static void __init setup_cpufreq_option(char *str)
49 {
50 char *arg;
52 if ( !strcmp(str, "dom0-kernel") )
53 {
54 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
55 cpufreq_controller = FREQCTL_dom0_kernel;
56 opt_dom0_vcpus_pin = 1;
57 return;
58 }
60 if ( !strcmp(str, "none") )
61 {
62 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
63 cpufreq_controller = FREQCTL_none;
64 return;
65 }
67 if ( (arg = strpbrk(str, ",:")) != NULL )
68 *arg++ = '\0';
70 if ( !strcmp(str, "xen") )
71 if ( arg && *arg )
72 cpufreq_cmdline_parse(arg);
73 }
74 custom_param("cpufreq", setup_cpufreq_option);
76 /* Protect updates/reads (resp.) of domain_list and domain_hash. */
77 DEFINE_SPINLOCK(domlist_update_lock);
78 DEFINE_RCU_READ_LOCK(domlist_read_lock);
80 #define DOMAIN_HASH_SIZE 256
81 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
82 static struct domain *domain_hash[DOMAIN_HASH_SIZE];
83 struct domain *domain_list;
85 struct domain *dom0;
87 struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
89 int current_domain_id(void)
90 {
91 return current->domain->domain_id;
92 }
94 static void __domain_finalise_shutdown(struct domain *d)
95 {
96 struct vcpu *v;
98 BUG_ON(!spin_is_locked(&d->shutdown_lock));
100 if ( d->is_shut_down )
101 return;
103 for_each_vcpu ( d, v )
104 if ( !v->paused_for_shutdown )
105 return;
107 d->is_shut_down = 1;
108 if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn )
109 evtchn_send(d, d->suspend_evtchn);
110 else
111 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
112 }
114 static void vcpu_check_shutdown(struct vcpu *v)
115 {
116 struct domain *d = v->domain;
118 spin_lock(&d->shutdown_lock);
120 if ( d->is_shutting_down )
121 {
122 if ( !v->paused_for_shutdown )
123 vcpu_pause_nosync(v);
124 v->paused_for_shutdown = 1;
125 v->defer_shutdown = 0;
126 __domain_finalise_shutdown(d);
127 }
129 spin_unlock(&d->shutdown_lock);
130 }
132 struct vcpu *alloc_vcpu(
133 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
134 {
135 struct vcpu *v;
137 BUG_ON((!is_idle_domain(d) || vcpu_id) && d->vcpu[vcpu_id]);
139 if ( (v = alloc_vcpu_struct()) == NULL )
140 return NULL;
142 v->domain = d;
143 v->vcpu_id = vcpu_id;
145 spin_lock_init(&v->virq_lock);
147 if ( is_idle_domain(d) )
148 {
149 v->runstate.state = RUNSTATE_running;
150 }
151 else
152 {
153 v->runstate.state = RUNSTATE_offline;
154 v->runstate.state_entry_time = NOW();
155 set_bit(_VPF_down, &v->pause_flags);
156 if ( vcpu_id < XEN_LEGACY_MAX_VCPUS )
157 v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
158 }
160 if ( sched_init_vcpu(v, cpu_id) != 0 )
161 {
162 free_vcpu_struct(v);
163 return NULL;
164 }
166 if ( vcpu_initialise(v) != 0 )
167 {
168 sched_destroy_vcpu(v);
169 free_vcpu_struct(v);
170 return NULL;
171 }
173 d->vcpu[vcpu_id] = v;
174 if ( vcpu_id != 0 )
175 d->vcpu[v->vcpu_id-1]->next_in_list = v;
177 /* Must be called after making new vcpu visible to for_each_vcpu(). */
178 vcpu_check_shutdown(v);
180 return v;
181 }
183 struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
184 {
185 return idle_vcpu[cpu_id] ?: alloc_vcpu(idle_vcpu[0]->domain,
186 cpu_id, cpu_id);
187 }
189 static unsigned int extra_dom0_irqs, extra_domU_irqs = 8;
190 static void __init parse_extra_guest_irqs(const char *s)
191 {
192 if ( isdigit(*s) )
193 extra_domU_irqs = simple_strtoul(s, &s, 0);
194 if ( *s == ',' && isdigit(*++s) )
195 extra_dom0_irqs = simple_strtoul(s, &s, 0);
196 }
197 custom_param("extra_guest_irqs", parse_extra_guest_irqs);
199 struct domain *domain_create(
200 domid_t domid, unsigned int domcr_flags, ssidref_t ssidref)
201 {
202 struct domain *d, **pd;
203 enum { INIT_xsm = 1u<<0, INIT_rangeset = 1u<<1, INIT_evtchn = 1u<<2,
204 INIT_gnttab = 1u<<3, INIT_arch = 1u<<4 };
205 int init_status = 0;
207 if ( (d = alloc_domain_struct()) == NULL )
208 return NULL;
210 memset(d, 0, sizeof(*d));
211 d->domain_id = domid;
213 if ( xsm_alloc_security_domain(d) != 0 )
214 goto fail;
215 init_status |= INIT_xsm;
217 atomic_set(&d->refcnt, 1);
218 spin_lock_init(&d->domain_lock);
219 spin_lock_init(&d->page_alloc_lock);
220 spin_lock_init(&d->shutdown_lock);
221 spin_lock_init(&d->hypercall_deadlock_mutex);
222 INIT_PAGE_LIST_HEAD(&d->page_list);
223 INIT_PAGE_LIST_HEAD(&d->xenpage_list);
225 if ( domcr_flags & DOMCRF_hvm )
226 d->is_hvm = 1;
228 if ( (domid == 0) && opt_dom0_vcpus_pin )
229 d->is_pinned = 1;
231 if ( domcr_flags & DOMCRF_dummy )
232 return d;
234 rangeset_domain_initialise(d);
235 init_status |= INIT_rangeset;
237 if ( !is_idle_domain(d) )
238 {
239 if ( xsm_domain_create(d, ssidref) != 0 )
240 goto fail;
242 d->is_paused_by_controller = 1;
243 atomic_inc(&d->pause_count);
245 d->nr_pirqs = (nr_irqs +
246 (domid ? extra_domU_irqs :
247 extra_dom0_irqs ?: nr_irqs));
248 d->pirq_to_evtchn = xmalloc_array(u16, d->nr_pirqs);
249 d->pirq_mask = xmalloc_array(
250 unsigned long, BITS_TO_LONGS(d->nr_pirqs));
251 if ( (d->pirq_to_evtchn == NULL) || (d->pirq_mask == NULL) )
252 goto fail;
253 memset(d->pirq_to_evtchn, 0, d->nr_pirqs * sizeof(*d->pirq_to_evtchn));
254 bitmap_zero(d->pirq_mask, d->nr_pirqs);
256 if ( evtchn_init(d) != 0 )
257 goto fail;
258 init_status |= INIT_evtchn;
260 if ( grant_table_create(d) != 0 )
261 goto fail;
262 init_status |= INIT_gnttab;
263 }
265 if ( arch_domain_create(d, domcr_flags) != 0 )
266 goto fail;
267 init_status |= INIT_arch;
269 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
270 d->irq_caps = rangeset_new(d, "Interrupts", 0);
271 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
272 goto fail;
274 if ( sched_init_domain(d) != 0 )
275 goto fail;
277 if ( !is_idle_domain(d) )
278 {
279 spin_lock(&domlist_update_lock);
280 pd = &domain_list; /* NB. domain_list maintained in order of domid. */
281 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
282 if ( (*pd)->domain_id > d->domain_id )
283 break;
284 d->next_in_list = *pd;
285 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
286 rcu_assign_pointer(*pd, d);
287 rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
288 spin_unlock(&domlist_update_lock);
289 }
291 return d;
293 fail:
294 d->is_dying = DOMDYING_dead;
295 atomic_set(&d->refcnt, DOMAIN_DESTROYED);
296 if ( init_status & INIT_arch )
297 arch_domain_destroy(d);
298 if ( init_status & INIT_gnttab )
299 grant_table_destroy(d);
300 if ( init_status & INIT_evtchn )
301 evtchn_destroy(d);
302 if ( init_status & INIT_rangeset )
303 rangeset_domain_destroy(d);
304 if ( init_status & INIT_xsm )
305 xsm_free_security_domain(d);
306 xfree(d->pirq_mask);
307 xfree(d->pirq_to_evtchn);
308 free_domain_struct(d);
309 return NULL;
310 }
313 struct domain *get_domain_by_id(domid_t dom)
314 {
315 struct domain *d;
317 rcu_read_lock(&domlist_read_lock);
319 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
320 d != NULL;
321 d = rcu_dereference(d->next_in_hashbucket) )
322 {
323 if ( d->domain_id == dom )
324 {
325 if ( unlikely(!get_domain(d)) )
326 d = NULL;
327 break;
328 }
329 }
331 rcu_read_unlock(&domlist_read_lock);
333 return d;
334 }
337 struct domain *rcu_lock_domain_by_id(domid_t dom)
338 {
339 struct domain *d;
341 rcu_read_lock(&domlist_read_lock);
343 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
344 d != NULL;
345 d = rcu_dereference(d->next_in_hashbucket) )
346 {
347 if ( d->domain_id == dom )
348 return d;
349 }
351 rcu_read_unlock(&domlist_read_lock);
353 return NULL;
354 }
356 int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d)
357 {
358 if ( dom == DOMID_SELF )
359 {
360 *d = rcu_lock_current_domain();
361 return 0;
362 }
364 if ( (*d = rcu_lock_domain_by_id(dom)) == NULL )
365 return -ESRCH;
367 if ( !IS_PRIV_FOR(current->domain, *d) )
368 {
369 rcu_unlock_domain(*d);
370 return -EPERM;
371 }
373 return 0;
374 }
376 int domain_kill(struct domain *d)
377 {
378 int rc = 0;
380 if ( d == current->domain )
381 return -EINVAL;
383 /* Protected by domctl_lock. */
384 switch ( d->is_dying )
385 {
386 case DOMDYING_alive:
387 domain_pause(d);
388 d->is_dying = DOMDYING_dying;
389 spin_barrier(&d->domain_lock);
390 evtchn_destroy(d);
391 gnttab_release_mappings(d);
392 tmem_destroy(d->tmem);
393 d->tmem = NULL;
394 /* fallthrough */
395 case DOMDYING_dying:
396 rc = domain_relinquish_resources(d);
397 page_scrub_kick();
398 if ( rc != 0 )
399 {
400 BUG_ON(rc != -EAGAIN);
401 break;
402 }
403 d->is_dying = DOMDYING_dead;
404 put_domain(d);
405 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
406 /* fallthrough */
407 case DOMDYING_dead:
408 break;
409 }
411 return rc;
412 }
415 void __domain_crash(struct domain *d)
416 {
417 if ( d->is_shutting_down )
418 {
419 /* Print nothing: the domain is already shutting down. */
420 }
421 else if ( d == current->domain )
422 {
423 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
424 d->domain_id, current->vcpu_id, smp_processor_id());
425 show_execution_state(guest_cpu_user_regs());
426 }
427 else
428 {
429 printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
430 d->domain_id, current->domain->domain_id, smp_processor_id());
431 }
433 domain_shutdown(d, SHUTDOWN_crash);
434 }
437 void __domain_crash_synchronous(void)
438 {
439 __domain_crash(current->domain);
441 /*
442 * Flush multicall state before dying if a multicall is in progress.
443 * This shouldn't be necessary, but some architectures are calling
444 * domain_crash_synchronous() when they really shouldn't (i.e., from
445 * within hypercall context).
446 */
447 if ( this_cpu(mc_state).flags != 0 )
448 {
449 dprintk(XENLOG_ERR,
450 "FIXME: synchronous domain crash during a multicall!\n");
451 this_cpu(mc_state).flags = 0;
452 }
454 vcpu_end_shutdown_deferral(current);
456 for ( ; ; )
457 do_softirq();
458 }
461 void domain_shutdown(struct domain *d, u8 reason)
462 {
463 struct vcpu *v;
465 if ( d->domain_id == 0 )
466 dom0_shutdown(reason);
468 spin_lock(&d->shutdown_lock);
470 if ( d->is_shutting_down )
471 {
472 spin_unlock(&d->shutdown_lock);
473 return;
474 }
476 d->is_shutting_down = 1;
477 d->shutdown_code = reason;
479 smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */
481 for_each_vcpu ( d, v )
482 {
483 if ( reason == SHUTDOWN_crash )
484 v->defer_shutdown = 0;
485 else if ( v->defer_shutdown )
486 continue;
487 vcpu_pause_nosync(v);
488 v->paused_for_shutdown = 1;
489 }
491 __domain_finalise_shutdown(d);
493 spin_unlock(&d->shutdown_lock);
494 }
496 void domain_resume(struct domain *d)
497 {
498 struct vcpu *v;
500 /*
501 * Some code paths assume that shutdown status does not get reset under
502 * their feet (e.g., some assertions make this assumption).
503 */
504 domain_pause(d);
506 spin_lock(&d->shutdown_lock);
508 d->is_shutting_down = d->is_shut_down = 0;
510 for_each_vcpu ( d, v )
511 {
512 if ( v->paused_for_shutdown )
513 vcpu_unpause(v);
514 v->paused_for_shutdown = 0;
515 }
517 spin_unlock(&d->shutdown_lock);
519 domain_unpause(d);
520 }
522 int vcpu_start_shutdown_deferral(struct vcpu *v)
523 {
524 if ( v->defer_shutdown )
525 return 1;
527 v->defer_shutdown = 1;
528 smp_mb(); /* set deferral status /then/ check for shutdown */
529 if ( unlikely(v->domain->is_shutting_down) )
530 vcpu_check_shutdown(v);
532 return v->defer_shutdown;
533 }
535 void vcpu_end_shutdown_deferral(struct vcpu *v)
536 {
537 v->defer_shutdown = 0;
538 smp_mb(); /* clear deferral status /then/ check for shutdown */
539 if ( unlikely(v->domain->is_shutting_down) )
540 vcpu_check_shutdown(v);
541 }
543 void domain_pause_for_debugger(void)
544 {
545 struct domain *d = current->domain;
546 struct vcpu *v;
548 atomic_inc(&d->pause_count);
549 if ( test_and_set_bool(d->is_paused_by_controller) )
550 domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */
552 for_each_vcpu ( d, v )
553 vcpu_sleep_nosync(v);
555 send_guest_global_virq(dom0, VIRQ_DEBUGGER);
556 }
558 /* Complete domain destroy after RCU readers are not holding old references. */
559 static void complete_domain_destroy(struct rcu_head *head)
560 {
561 struct domain *d = container_of(head, struct domain, rcu);
562 struct vcpu *v;
563 int i;
565 for ( i = d->max_vcpus - 1; i >= 0; i-- )
566 {
567 if ( (v = d->vcpu[i]) == NULL )
568 continue;
569 vcpu_destroy(v);
570 sched_destroy_vcpu(v);
571 }
573 grant_table_destroy(d);
575 arch_domain_destroy(d);
577 rangeset_domain_destroy(d);
579 sched_destroy_domain(d);
581 /* Free page used by xen oprofile buffer. */
582 free_xenoprof_pages(d);
584 for ( i = d->max_vcpus - 1; i >= 0; i-- )
585 if ( (v = d->vcpu[i]) != NULL )
586 free_vcpu_struct(v);
588 if ( d->target != NULL )
589 put_domain(d->target);
591 xfree(d->pirq_mask);
592 xfree(d->pirq_to_evtchn);
594 xsm_free_security_domain(d);
595 free_domain_struct(d);
597 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
598 }
600 /* Release resources belonging to task @p. */
601 void domain_destroy(struct domain *d)
602 {
603 struct domain **pd;
604 atomic_t old, new;
606 BUG_ON(!d->is_dying);
608 /* May be already destroyed, or get_domain() can race us. */
609 _atomic_set(old, 0);
610 _atomic_set(new, DOMAIN_DESTROYED);
611 old = atomic_compareandswap(old, new, &d->refcnt);
612 if ( _atomic_read(old) != 0 )
613 return;
615 /* Delete from task list and task hashtable. */
616 TRACE_1D(TRC_SCHED_DOM_REM, d->domain_id);
617 spin_lock(&domlist_update_lock);
618 pd = &domain_list;
619 while ( *pd != d )
620 pd = &(*pd)->next_in_list;
621 rcu_assign_pointer(*pd, d->next_in_list);
622 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
623 while ( *pd != d )
624 pd = &(*pd)->next_in_hashbucket;
625 rcu_assign_pointer(*pd, d->next_in_hashbucket);
626 spin_unlock(&domlist_update_lock);
628 /* Schedule RCU asynchronous completion of domain destroy. */
629 call_rcu(&d->rcu, complete_domain_destroy);
630 }
632 void vcpu_pause(struct vcpu *v)
633 {
634 ASSERT(v != current);
635 atomic_inc(&v->pause_count);
636 vcpu_sleep_sync(v);
637 }
639 void vcpu_pause_nosync(struct vcpu *v)
640 {
641 atomic_inc(&v->pause_count);
642 vcpu_sleep_nosync(v);
643 }
645 void vcpu_unpause(struct vcpu *v)
646 {
647 if ( atomic_dec_and_test(&v->pause_count) )
648 vcpu_wake(v);
649 }
651 void domain_pause(struct domain *d)
652 {
653 struct vcpu *v;
655 ASSERT(d != current->domain);
657 atomic_inc(&d->pause_count);
659 for_each_vcpu( d, v )
660 vcpu_sleep_sync(v);
661 }
663 void domain_unpause(struct domain *d)
664 {
665 struct vcpu *v;
667 if ( atomic_dec_and_test(&d->pause_count) )
668 for_each_vcpu( d, v )
669 vcpu_wake(v);
670 }
672 void domain_pause_by_systemcontroller(struct domain *d)
673 {
674 domain_pause(d);
675 if ( test_and_set_bool(d->is_paused_by_controller) )
676 domain_unpause(d);
677 }
679 void domain_unpause_by_systemcontroller(struct domain *d)
680 {
681 if ( test_and_clear_bool(d->is_paused_by_controller) )
682 domain_unpause(d);
683 }
685 int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt)
686 {
687 struct vcpu *v = d->vcpu[vcpuid];
689 BUG_ON(v->is_initialised);
691 return arch_set_info_guest(v, ctxt);
692 }
694 void vcpu_reset(struct vcpu *v)
695 {
696 struct domain *d = v->domain;
698 vcpu_pause(v);
699 domain_lock(d);
701 arch_vcpu_reset(v);
703 set_bit(_VPF_down, &v->pause_flags);
705 clear_bit(v->vcpu_id, d->poll_mask);
706 v->poll_evtchn = 0;
708 v->fpu_initialised = 0;
709 v->fpu_dirtied = 0;
710 v->is_initialised = 0;
711 v->nmi_pending = 0;
712 v->mce_pending = 0;
713 v->old_trap_priority = VCPU_TRAP_NONE;
714 v->trap_priority = VCPU_TRAP_NONE;
715 clear_bit(_VPF_blocked, &v->pause_flags);
717 domain_unlock(v->domain);
718 vcpu_unpause(v);
719 }
722 long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
723 {
724 struct domain *d = current->domain;
725 struct vcpu *v;
726 struct vcpu_guest_context *ctxt;
727 long rc = 0;
729 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
730 return -EINVAL;
732 if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
733 return -ENOENT;
735 switch ( cmd )
736 {
737 case VCPUOP_initialise:
738 if ( !v->vcpu_info )
739 return -EINVAL;
741 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
742 return -ENOMEM;
744 if ( copy_from_guest(ctxt, arg, 1) )
745 {
746 xfree(ctxt);
747 return -EFAULT;
748 }
750 domain_lock(d);
751 rc = -EEXIST;
752 if ( !v->is_initialised )
753 rc = boot_vcpu(d, vcpuid, ctxt);
754 domain_unlock(d);
756 xfree(ctxt);
757 break;
759 case VCPUOP_up:
760 if ( !v->is_initialised )
761 return -EINVAL;
763 if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
764 vcpu_wake(v);
766 break;
768 case VCPUOP_down:
769 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
770 vcpu_sleep_nosync(v);
771 break;
773 case VCPUOP_is_up:
774 rc = !test_bit(_VPF_down, &v->pause_flags);
775 break;
777 case VCPUOP_get_runstate_info:
778 {
779 struct vcpu_runstate_info runstate;
780 vcpu_runstate_get(v, &runstate);
781 if ( copy_to_guest(arg, &runstate, 1) )
782 rc = -EFAULT;
783 break;
784 }
786 case VCPUOP_set_periodic_timer:
787 {
788 struct vcpu_set_periodic_timer set;
790 if ( copy_from_guest(&set, arg, 1) )
791 return -EFAULT;
793 if ( set.period_ns < MILLISECS(1) )
794 return -EINVAL;
796 v->periodic_period = set.period_ns;
797 vcpu_force_reschedule(v);
799 break;
800 }
802 case VCPUOP_stop_periodic_timer:
803 v->periodic_period = 0;
804 vcpu_force_reschedule(v);
805 break;
807 case VCPUOP_set_singleshot_timer:
808 {
809 struct vcpu_set_singleshot_timer set;
811 if ( v != current )
812 return -EINVAL;
814 if ( copy_from_guest(&set, arg, 1) )
815 return -EFAULT;
817 if ( (set.flags & VCPU_SSHOTTMR_future) &&
818 (set.timeout_abs_ns < NOW()) )
819 return -ETIME;
821 if ( v->singleshot_timer.cpu != smp_processor_id() )
822 {
823 stop_timer(&v->singleshot_timer);
824 v->singleshot_timer.cpu = smp_processor_id();
825 }
827 set_timer(&v->singleshot_timer, set.timeout_abs_ns);
829 break;
830 }
832 case VCPUOP_stop_singleshot_timer:
833 if ( v != current )
834 return -EINVAL;
836 stop_timer(&v->singleshot_timer);
838 break;
840 case VCPUOP_send_nmi:
841 if ( !guest_handle_is_null(arg) )
842 return -EINVAL;
844 if ( !test_and_set_bool(v->nmi_pending) )
845 vcpu_kick(v);
847 break;
849 default:
850 rc = arch_do_vcpu_op(cmd, v, arg);
851 break;
852 }
854 return rc;
855 }
857 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
858 {
859 if ( type > MAX_VMASST_TYPE )
860 return -EINVAL;
862 switch ( cmd )
863 {
864 case VMASST_CMD_enable:
865 set_bit(type, &p->vm_assist);
866 return 0;
867 case VMASST_CMD_disable:
868 clear_bit(type, &p->vm_assist);
869 return 0;
870 }
872 return -ENOSYS;
873 }
875 /*
876 * Local variables:
877 * mode: C
878 * c-set-style: "BSD"
879 * c-basic-offset: 4
880 * tab-width: 4
881 * indent-tabs-mode: nil
882 * End:
883 */