ia64/xen-unstable

view xen/common/domain.c @ 18594:5e4e234d58be

x86: Define __per_cpu_shift label to help kdump/crashdump.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Oct 08 13:11:06 2008 +0100 (2008-10-08)
parents 9a7b46546e05
children fc5208167bb6
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/compat.h>
9 #include <xen/init.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/domain.h>
14 #include <xen/mm.h>
15 #include <xen/event.h>
16 #include <xen/time.h>
17 #include <xen/console.h>
18 #include <xen/softirq.h>
19 #include <xen/domain_page.h>
20 #include <xen/rangeset.h>
21 #include <xen/guest_access.h>
22 #include <xen/hypercall.h>
23 #include <xen/delay.h>
24 #include <xen/shutdown.h>
25 #include <xen/percpu.h>
26 #include <xen/multicall.h>
27 #include <xen/rcupdate.h>
28 #include <asm/debugger.h>
29 #include <public/sched.h>
30 #include <public/vcpu.h>
31 #include <xsm/xsm.h>
33 /* Linux config option: propageted to domain0 */
34 /* xen_processor_pmbits: xen control Cx, Px, ... */
35 unsigned int xen_processor_pmbits = 0;
37 /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
38 static unsigned int opt_dom0_vcpus_pin;
39 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
41 enum cpufreq_controller cpufreq_controller;
42 static void __init setup_cpufreq_option(char *str)
43 {
44 if ( !strcmp(str, "dom0-kernel") )
45 {
46 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
47 cpufreq_controller = FREQCTL_dom0_kernel;
48 opt_dom0_vcpus_pin = 1;
49 }
50 else if ( !strcmp(str, "xen") )
51 {
52 xen_processor_pmbits |= XEN_PROCESSOR_PM_PX;
53 cpufreq_controller = FREQCTL_xen;
54 }
55 }
56 custom_param("cpufreq", setup_cpufreq_option);
58 /* Protect updates/reads (resp.) of domain_list and domain_hash. */
59 DEFINE_SPINLOCK(domlist_update_lock);
60 DEFINE_RCU_READ_LOCK(domlist_read_lock);
62 #define DOMAIN_HASH_SIZE 256
63 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
64 static struct domain *domain_hash[DOMAIN_HASH_SIZE];
65 struct domain *domain_list;
67 struct domain *dom0;
69 struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
71 int current_domain_id(void)
72 {
73 return current->domain->domain_id;
74 }
76 static struct domain *alloc_domain_struct(void)
77 {
78 return xmalloc(struct domain);
79 }
81 static void free_domain_struct(struct domain *d)
82 {
83 xfree(d);
84 }
86 static void __domain_finalise_shutdown(struct domain *d)
87 {
88 struct vcpu *v;
90 BUG_ON(!spin_is_locked(&d->shutdown_lock));
92 if ( d->is_shut_down )
93 return;
95 for_each_vcpu ( d, v )
96 if ( !v->paused_for_shutdown )
97 return;
99 d->is_shut_down = 1;
100 if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn )
101 evtchn_send(d, d->suspend_evtchn);
102 else
103 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
104 }
106 static void vcpu_check_shutdown(struct vcpu *v)
107 {
108 struct domain *d = v->domain;
110 spin_lock(&d->shutdown_lock);
112 if ( d->is_shutting_down )
113 {
114 if ( !v->paused_for_shutdown )
115 vcpu_pause_nosync(v);
116 v->paused_for_shutdown = 1;
117 v->defer_shutdown = 0;
118 __domain_finalise_shutdown(d);
119 }
121 spin_unlock(&d->shutdown_lock);
122 }
124 struct vcpu *alloc_vcpu(
125 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
126 {
127 struct vcpu *v;
129 BUG_ON(d->vcpu[vcpu_id] != NULL);
131 if ( (v = alloc_vcpu_struct()) == NULL )
132 return NULL;
134 v->domain = d;
135 v->vcpu_id = vcpu_id;
137 v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
138 v->runstate.state_entry_time = NOW();
140 spin_lock_init(&v->virq_lock);
142 if ( !is_idle_domain(d) )
143 {
144 set_bit(_VPF_down, &v->pause_flags);
145 v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
146 }
148 if ( sched_init_vcpu(v, cpu_id) != 0 )
149 {
150 free_vcpu_struct(v);
151 return NULL;
152 }
154 if ( vcpu_initialise(v) != 0 )
155 {
156 sched_destroy_vcpu(v);
157 free_vcpu_struct(v);
158 return NULL;
159 }
161 d->vcpu[vcpu_id] = v;
162 if ( vcpu_id != 0 )
163 d->vcpu[v->vcpu_id-1]->next_in_list = v;
165 /* Must be called after making new vcpu visible to for_each_vcpu(). */
166 vcpu_check_shutdown(v);
168 return v;
169 }
171 struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
172 {
173 struct domain *d;
174 struct vcpu *v;
175 unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
177 if ( (v = idle_vcpu[cpu_id]) != NULL )
178 return v;
180 d = (vcpu_id == 0) ?
181 domain_create(IDLE_DOMAIN_ID, 0, 0) :
182 idle_vcpu[cpu_id - vcpu_id]->domain;
183 BUG_ON(d == NULL);
185 v = alloc_vcpu(d, vcpu_id, cpu_id);
186 idle_vcpu[cpu_id] = v;
188 return v;
189 }
191 struct domain *domain_create(
192 domid_t domid, unsigned int domcr_flags, ssidref_t ssidref)
193 {
194 struct domain *d, **pd;
195 enum { INIT_xsm = 1u<<0, INIT_rangeset = 1u<<1, INIT_evtchn = 1u<<2,
196 INIT_gnttab = 1u<<3, INIT_arch = 1u<<4 };
197 int init_status = 0;
199 if ( (d = alloc_domain_struct()) == NULL )
200 return NULL;
202 memset(d, 0, sizeof(*d));
203 d->domain_id = domid;
205 if ( xsm_alloc_security_domain(d) != 0 )
206 goto fail;
207 init_status |= INIT_xsm;
209 atomic_set(&d->refcnt, 1);
210 spin_lock_init(&d->domain_lock);
211 spin_lock_init(&d->page_alloc_lock);
212 spin_lock_init(&d->shutdown_lock);
213 spin_lock_init(&d->hypercall_deadlock_mutex);
214 INIT_LIST_HEAD(&d->page_list);
215 INIT_LIST_HEAD(&d->xenpage_list);
217 if ( domcr_flags & DOMCRF_hvm )
218 d->is_hvm = 1;
220 if ( (domid == 0) && opt_dom0_vcpus_pin )
221 d->is_pinned = 1;
223 if ( domcr_flags & DOMCRF_dummy )
224 return d;
226 rangeset_domain_initialise(d);
227 init_status |= INIT_rangeset;
229 if ( !is_idle_domain(d) )
230 {
231 if ( xsm_domain_create(d, ssidref) != 0 )
232 goto fail;
234 d->is_paused_by_controller = 1;
235 atomic_inc(&d->pause_count);
237 if ( evtchn_init(d) != 0 )
238 goto fail;
239 init_status |= INIT_evtchn;
241 if ( grant_table_create(d) != 0 )
242 goto fail;
243 init_status |= INIT_gnttab;
244 }
246 if ( arch_domain_create(d, domcr_flags) != 0 )
247 goto fail;
248 init_status |= INIT_arch;
250 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
251 d->irq_caps = rangeset_new(d, "Interrupts", 0);
252 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
253 goto fail;
255 if ( sched_init_domain(d) != 0 )
256 goto fail;
258 if ( !is_idle_domain(d) )
259 {
260 spin_lock(&domlist_update_lock);
261 pd = &domain_list; /* NB. domain_list maintained in order of domid. */
262 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
263 if ( (*pd)->domain_id > d->domain_id )
264 break;
265 d->next_in_list = *pd;
266 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
267 rcu_assign_pointer(*pd, d);
268 rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
269 spin_unlock(&domlist_update_lock);
270 }
272 return d;
274 fail:
275 d->is_dying = DOMDYING_dead;
276 atomic_set(&d->refcnt, DOMAIN_DESTROYED);
277 if ( init_status & INIT_arch )
278 arch_domain_destroy(d);
279 if ( init_status & INIT_gnttab )
280 grant_table_destroy(d);
281 if ( init_status & INIT_evtchn )
282 evtchn_destroy(d);
283 if ( init_status & INIT_rangeset )
284 rangeset_domain_destroy(d);
285 if ( init_status & INIT_xsm )
286 xsm_free_security_domain(d);
287 free_domain_struct(d);
288 return NULL;
289 }
292 struct domain *get_domain_by_id(domid_t dom)
293 {
294 struct domain *d;
296 rcu_read_lock(&domlist_read_lock);
298 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
299 d != NULL;
300 d = rcu_dereference(d->next_in_hashbucket) )
301 {
302 if ( d->domain_id == dom )
303 {
304 if ( unlikely(!get_domain(d)) )
305 d = NULL;
306 break;
307 }
308 }
310 rcu_read_unlock(&domlist_read_lock);
312 return d;
313 }
316 struct domain *rcu_lock_domain_by_id(domid_t dom)
317 {
318 struct domain *d;
320 rcu_read_lock(&domlist_read_lock);
322 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
323 d != NULL;
324 d = rcu_dereference(d->next_in_hashbucket) )
325 {
326 if ( d->domain_id == dom )
327 return d;
328 }
330 rcu_read_unlock(&domlist_read_lock);
332 return NULL;
333 }
335 int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d)
336 {
337 if ( dom == DOMID_SELF )
338 {
339 *d = rcu_lock_current_domain();
340 return 0;
341 }
343 if ( (*d = rcu_lock_domain_by_id(dom)) == NULL )
344 return -ESRCH;
346 if ( !IS_PRIV_FOR(current->domain, *d) )
347 {
348 rcu_unlock_domain(*d);
349 return -EPERM;
350 }
352 return 0;
353 }
355 int domain_kill(struct domain *d)
356 {
357 int rc = 0;
359 if ( d == current->domain )
360 return -EINVAL;
362 /* Protected by domctl_lock. */
363 switch ( d->is_dying )
364 {
365 case DOMDYING_alive:
366 domain_pause(d);
367 d->is_dying = DOMDYING_dying;
368 spin_barrier(&d->domain_lock);
369 evtchn_destroy(d);
370 gnttab_release_mappings(d);
371 /* fallthrough */
372 case DOMDYING_dying:
373 rc = domain_relinquish_resources(d);
374 page_scrub_kick();
375 if ( rc != 0 )
376 {
377 BUG_ON(rc != -EAGAIN);
378 break;
379 }
380 d->is_dying = DOMDYING_dead;
381 put_domain(d);
382 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
383 /* fallthrough */
384 case DOMDYING_dead:
385 break;
386 }
388 return rc;
389 }
392 void __domain_crash(struct domain *d)
393 {
394 if ( d->is_shutting_down )
395 {
396 /* Print nothing: the domain is already shutting down. */
397 }
398 else if ( d == current->domain )
399 {
400 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
401 d->domain_id, current->vcpu_id, smp_processor_id());
402 show_execution_state(guest_cpu_user_regs());
403 }
404 else
405 {
406 printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
407 d->domain_id, current->domain->domain_id, smp_processor_id());
408 }
410 domain_shutdown(d, SHUTDOWN_crash);
411 }
414 void __domain_crash_synchronous(void)
415 {
416 __domain_crash(current->domain);
418 /*
419 * Flush multicall state before dying if a multicall is in progress.
420 * This shouldn't be necessary, but some architectures are calling
421 * domain_crash_synchronous() when they really shouldn't (i.e., from
422 * within hypercall context).
423 */
424 if ( this_cpu(mc_state).flags != 0 )
425 {
426 dprintk(XENLOG_ERR,
427 "FIXME: synchronous domain crash during a multicall!\n");
428 this_cpu(mc_state).flags = 0;
429 }
431 vcpu_end_shutdown_deferral(current);
433 for ( ; ; )
434 do_softirq();
435 }
438 void domain_shutdown(struct domain *d, u8 reason)
439 {
440 struct vcpu *v;
442 if ( d->domain_id == 0 )
443 dom0_shutdown(reason);
445 spin_lock(&d->shutdown_lock);
447 if ( d->is_shutting_down )
448 {
449 spin_unlock(&d->shutdown_lock);
450 return;
451 }
453 d->is_shutting_down = 1;
454 d->shutdown_code = reason;
456 smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */
458 for_each_vcpu ( d, v )
459 {
460 if ( v->defer_shutdown )
461 continue;
462 vcpu_pause_nosync(v);
463 v->paused_for_shutdown = 1;
464 }
466 __domain_finalise_shutdown(d);
468 spin_unlock(&d->shutdown_lock);
469 }
471 void domain_resume(struct domain *d)
472 {
473 struct vcpu *v;
475 /*
476 * Some code paths assume that shutdown status does not get reset under
477 * their feet (e.g., some assertions make this assumption).
478 */
479 domain_pause(d);
481 spin_lock(&d->shutdown_lock);
483 d->is_shutting_down = d->is_shut_down = 0;
485 for_each_vcpu ( d, v )
486 {
487 if ( v->paused_for_shutdown )
488 vcpu_unpause(v);
489 v->paused_for_shutdown = 0;
490 }
492 spin_unlock(&d->shutdown_lock);
494 domain_unpause(d);
495 }
497 int vcpu_start_shutdown_deferral(struct vcpu *v)
498 {
499 if ( v->defer_shutdown )
500 return 1;
502 v->defer_shutdown = 1;
503 smp_mb(); /* set deferral status /then/ check for shutdown */
504 if ( unlikely(v->domain->is_shutting_down) )
505 vcpu_check_shutdown(v);
507 return v->defer_shutdown;
508 }
510 void vcpu_end_shutdown_deferral(struct vcpu *v)
511 {
512 v->defer_shutdown = 0;
513 smp_mb(); /* clear deferral status /then/ check for shutdown */
514 if ( unlikely(v->domain->is_shutting_down) )
515 vcpu_check_shutdown(v);
516 }
518 void domain_pause_for_debugger(void)
519 {
520 struct domain *d = current->domain;
521 struct vcpu *v;
523 atomic_inc(&d->pause_count);
524 if ( test_and_set_bool(d->is_paused_by_controller) )
525 domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */
527 for_each_vcpu ( d, v )
528 vcpu_sleep_nosync(v);
530 send_guest_global_virq(dom0, VIRQ_DEBUGGER);
531 }
533 /* Complete domain destroy after RCU readers are not holding old references. */
534 static void complete_domain_destroy(struct rcu_head *head)
535 {
536 struct domain *d = container_of(head, struct domain, rcu);
537 struct vcpu *v;
538 int i;
540 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
541 {
542 if ( (v = d->vcpu[i]) == NULL )
543 continue;
544 vcpu_destroy(v);
545 sched_destroy_vcpu(v);
546 }
548 rangeset_domain_destroy(d);
550 grant_table_destroy(d);
552 arch_domain_destroy(d);
554 sched_destroy_domain(d);
556 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
557 if ( (v = d->vcpu[i]) != NULL )
558 free_vcpu_struct(v);
560 if ( d->target != NULL )
561 put_domain(d->target);
563 xsm_free_security_domain(d);
564 free_domain_struct(d);
566 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
567 }
569 /* Release resources belonging to task @p. */
570 void domain_destroy(struct domain *d)
571 {
572 struct domain **pd;
573 atomic_t old, new;
575 BUG_ON(!d->is_dying);
577 /* May be already destroyed, or get_domain() can race us. */
578 _atomic_set(old, 0);
579 _atomic_set(new, DOMAIN_DESTROYED);
580 old = atomic_compareandswap(old, new, &d->refcnt);
581 if ( _atomic_read(old) != 0 )
582 return;
584 /* Delete from task list and task hashtable. */
585 spin_lock(&domlist_update_lock);
586 pd = &domain_list;
587 while ( *pd != d )
588 pd = &(*pd)->next_in_list;
589 rcu_assign_pointer(*pd, d->next_in_list);
590 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
591 while ( *pd != d )
592 pd = &(*pd)->next_in_hashbucket;
593 rcu_assign_pointer(*pd, d->next_in_hashbucket);
594 spin_unlock(&domlist_update_lock);
596 /* Schedule RCU asynchronous completion of domain destroy. */
597 call_rcu(&d->rcu, complete_domain_destroy);
598 }
600 void vcpu_pause(struct vcpu *v)
601 {
602 ASSERT(v != current);
603 atomic_inc(&v->pause_count);
604 vcpu_sleep_sync(v);
605 }
607 void vcpu_pause_nosync(struct vcpu *v)
608 {
609 atomic_inc(&v->pause_count);
610 vcpu_sleep_nosync(v);
611 }
613 void vcpu_unpause(struct vcpu *v)
614 {
615 if ( atomic_dec_and_test(&v->pause_count) )
616 vcpu_wake(v);
617 }
619 void domain_pause(struct domain *d)
620 {
621 struct vcpu *v;
623 ASSERT(d != current->domain);
625 atomic_inc(&d->pause_count);
627 for_each_vcpu( d, v )
628 vcpu_sleep_sync(v);
629 }
631 void domain_unpause(struct domain *d)
632 {
633 struct vcpu *v;
635 if ( atomic_dec_and_test(&d->pause_count) )
636 for_each_vcpu( d, v )
637 vcpu_wake(v);
638 }
640 void domain_pause_by_systemcontroller(struct domain *d)
641 {
642 domain_pause(d);
643 if ( test_and_set_bool(d->is_paused_by_controller) )
644 domain_unpause(d);
645 }
647 void domain_unpause_by_systemcontroller(struct domain *d)
648 {
649 if ( test_and_clear_bool(d->is_paused_by_controller) )
650 domain_unpause(d);
651 }
653 int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt)
654 {
655 struct vcpu *v = d->vcpu[vcpuid];
657 BUG_ON(v->is_initialised);
659 return arch_set_info_guest(v, ctxt);
660 }
662 void vcpu_reset(struct vcpu *v)
663 {
664 struct domain *d = v->domain;
666 vcpu_pause(v);
667 domain_lock(d);
669 arch_vcpu_reset(v);
671 set_bit(_VPF_down, &v->pause_flags);
673 clear_bit(v->vcpu_id, d->poll_mask);
674 v->poll_evtchn = 0;
676 v->fpu_initialised = 0;
677 v->fpu_dirtied = 0;
678 v->is_initialised = 0;
679 v->nmi_pending = 0;
680 v->mce_pending = 0;
681 v->old_trap_priority = VCPU_TRAP_NONE;
682 v->trap_priority = VCPU_TRAP_NONE;
683 clear_bit(_VPF_blocked, &v->pause_flags);
685 domain_unlock(v->domain);
686 vcpu_unpause(v);
687 }
690 long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
691 {
692 struct domain *d = current->domain;
693 struct vcpu *v;
694 struct vcpu_guest_context *ctxt;
695 long rc = 0;
697 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
698 return -EINVAL;
700 if ( (v = d->vcpu[vcpuid]) == NULL )
701 return -ENOENT;
703 switch ( cmd )
704 {
705 case VCPUOP_initialise:
706 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
707 return -ENOMEM;
709 if ( copy_from_guest(ctxt, arg, 1) )
710 {
711 xfree(ctxt);
712 return -EFAULT;
713 }
715 domain_lock(d);
716 rc = -EEXIST;
717 if ( !v->is_initialised )
718 rc = boot_vcpu(d, vcpuid, ctxt);
719 domain_unlock(d);
721 xfree(ctxt);
722 break;
724 case VCPUOP_up:
725 if ( !v->is_initialised )
726 return -EINVAL;
728 if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
729 vcpu_wake(v);
731 break;
733 case VCPUOP_down:
734 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
735 vcpu_sleep_nosync(v);
736 break;
738 case VCPUOP_is_up:
739 rc = !test_bit(_VPF_down, &v->pause_flags);
740 break;
742 case VCPUOP_get_runstate_info:
743 {
744 struct vcpu_runstate_info runstate;
745 vcpu_runstate_get(v, &runstate);
746 if ( copy_to_guest(arg, &runstate, 1) )
747 rc = -EFAULT;
748 break;
749 }
751 case VCPUOP_set_periodic_timer:
752 {
753 struct vcpu_set_periodic_timer set;
755 if ( copy_from_guest(&set, arg, 1) )
756 return -EFAULT;
758 if ( set.period_ns < MILLISECS(1) )
759 return -EINVAL;
761 v->periodic_period = set.period_ns;
762 vcpu_force_reschedule(v);
764 break;
765 }
767 case VCPUOP_stop_periodic_timer:
768 v->periodic_period = 0;
769 vcpu_force_reschedule(v);
770 break;
772 case VCPUOP_set_singleshot_timer:
773 {
774 struct vcpu_set_singleshot_timer set;
776 if ( v != current )
777 return -EINVAL;
779 if ( copy_from_guest(&set, arg, 1) )
780 return -EFAULT;
782 if ( (set.flags & VCPU_SSHOTTMR_future) &&
783 (set.timeout_abs_ns < NOW()) )
784 return -ETIME;
786 if ( v->singleshot_timer.cpu != smp_processor_id() )
787 {
788 stop_timer(&v->singleshot_timer);
789 v->singleshot_timer.cpu = smp_processor_id();
790 }
792 set_timer(&v->singleshot_timer, set.timeout_abs_ns);
794 break;
795 }
797 case VCPUOP_stop_singleshot_timer:
798 if ( v != current )
799 return -EINVAL;
801 stop_timer(&v->singleshot_timer);
803 break;
805 case VCPUOP_send_nmi:
806 if ( !guest_handle_is_null(arg) )
807 return -EINVAL;
809 if ( !test_and_set_bool(v->nmi_pending) )
810 vcpu_kick(v);
812 break;
814 default:
815 rc = arch_do_vcpu_op(cmd, v, arg);
816 break;
817 }
819 return rc;
820 }
822 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
823 {
824 if ( type > MAX_VMASST_TYPE )
825 return -EINVAL;
827 switch ( cmd )
828 {
829 case VMASST_CMD_enable:
830 set_bit(type, &p->vm_assist);
831 return 0;
832 case VMASST_CMD_disable:
833 clear_bit(type, &p->vm_assist);
834 return 0;
835 }
837 return -ENOSYS;
838 }
840 /*
841 * Local variables:
842 * mode: C
843 * c-set-style: "BSD"
844 * c-basic-offset: 4
845 * tab-width: 4
846 * indent-tabs-mode: nil
847 * End:
848 */