ia64/xen-unstable

view xen/common/schedule.c @ 17062:0769835cf50f

x86 shadow: Reduce scope of shadow lock.

emulate_map_dest doesn't require holding lock, since
only shadow related operation possibly involved is to
remove shadow which is less frequent and can acquire
lock inside. Rest are either guest table walk or
per-vcpu monitor table manipulation

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 14 10:33:12 2008 +0000 (2008-02-14)
parents 7327e1c2a42c
children 21532468020b
line source
1 /****************************************************************************
2 * (C) 2002-2003 - Rolf Neugebauer - Intel Research Cambridge
3 * (C) 2002-2003 University of Cambridge
4 * (C) 2004 - Mark Williamson - Intel Research Cambridge
5 ****************************************************************************
6 *
7 * File: common/schedule.c
8 * Author: Rolf Neugebauer & Keir Fraser
9 * Updated for generic API by Mark Williamson
10 *
11 * Description: Generic CPU scheduling code
12 * implements support functionality for the Xen scheduler API.
13 *
14 */
16 #ifndef COMPAT
17 #include <xen/config.h>
18 #include <xen/init.h>
19 #include <xen/lib.h>
20 #include <xen/sched.h>
21 #include <xen/domain.h>
22 #include <xen/delay.h>
23 #include <xen/event.h>
24 #include <xen/time.h>
25 #include <xen/timer.h>
26 #include <xen/perfc.h>
27 #include <xen/sched-if.h>
28 #include <xen/softirq.h>
29 #include <xen/trace.h>
30 #include <xen/mm.h>
31 #include <xen/errno.h>
32 #include <xen/guest_access.h>
33 #include <xen/multicall.h>
34 #include <public/sched.h>
35 #include <xsm/xsm.h>
37 /* opt_sched: scheduler - default to credit */
38 static char opt_sched[10] = "credit";
39 string_param("sched", opt_sched);
41 /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
42 static unsigned int opt_dom0_vcpus_pin;
43 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
45 enum cpufreq_controller cpufreq_controller;
46 static void __init setup_cpufreq_option(char *str)
47 {
48 if ( !strcmp(str, "dom0-kernel") )
49 {
50 cpufreq_controller = FREQCTL_dom0_kernel;
51 opt_dom0_vcpus_pin = 1;
52 }
53 }
54 custom_param("cpufreq", setup_cpufreq_option);
56 #define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */
58 /* Various timer handlers. */
59 static void s_timer_fn(void *unused);
60 static void vcpu_periodic_timer_fn(void *data);
61 static void vcpu_singleshot_timer_fn(void *data);
62 static void poll_timer_fn(void *data);
64 /* This is global for now so that private implementations can reach it */
65 DEFINE_PER_CPU(struct schedule_data, schedule_data);
67 extern struct scheduler sched_sedf_def;
68 extern struct scheduler sched_credit_def;
69 static struct scheduler *schedulers[] = {
70 &sched_sedf_def,
71 &sched_credit_def,
72 NULL
73 };
75 static struct scheduler ops;
77 #define SCHED_OP(fn, ...) \
78 (( ops.fn != NULL ) ? ops.fn( __VA_ARGS__ ) \
79 : (typeof(ops.fn(__VA_ARGS__)))0 )
81 static inline void vcpu_runstate_change(
82 struct vcpu *v, int new_state, s_time_t new_entry_time)
83 {
84 ASSERT(v->runstate.state != new_state);
85 ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock));
87 v->runstate.time[v->runstate.state] +=
88 new_entry_time - v->runstate.state_entry_time;
89 v->runstate.state_entry_time = new_entry_time;
90 v->runstate.state = new_state;
91 }
93 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
94 {
95 if ( likely(v == current) )
96 {
97 /* Fast lock-free path. */
98 memcpy(runstate, &v->runstate, sizeof(*runstate));
99 ASSERT(runstate->state == RUNSTATE_running);
100 runstate->time[RUNSTATE_running] += NOW() - runstate->state_entry_time;
101 }
102 else
103 {
104 vcpu_schedule_lock_irq(v);
105 memcpy(runstate, &v->runstate, sizeof(*runstate));
106 runstate->time[runstate->state] += NOW() - runstate->state_entry_time;
107 vcpu_schedule_unlock_irq(v);
108 }
109 }
111 int sched_init_vcpu(struct vcpu *v, unsigned int processor)
112 {
113 struct domain *d = v->domain;
115 /*
116 * Initialize processor and affinity settings. The idler, and potentially
117 * domain-0 VCPUs, are pinned onto their respective physical CPUs.
118 */
119 v->processor = processor;
120 if ( is_idle_domain(d) || ((d->domain_id == 0) && opt_dom0_vcpus_pin) )
121 v->cpu_affinity = cpumask_of_cpu(processor);
122 else
123 cpus_setall(v->cpu_affinity);
125 /* Initialise the per-vcpu timers. */
126 init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
127 v, v->processor);
128 init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn,
129 v, v->processor);
130 init_timer(&v->poll_timer, poll_timer_fn,
131 v, v->processor);
133 /* Idle VCPUs are scheduled immediately. */
134 if ( is_idle_domain(d) )
135 {
136 per_cpu(schedule_data, v->processor).curr = v;
137 per_cpu(schedule_data, v->processor).idle = v;
138 v->is_running = 1;
139 }
141 TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
143 return SCHED_OP(init_vcpu, v);
144 }
146 void sched_destroy_vcpu(struct vcpu *v)
147 {
148 kill_timer(&v->periodic_timer);
149 kill_timer(&v->singleshot_timer);
150 kill_timer(&v->poll_timer);
151 SCHED_OP(destroy_vcpu, v);
152 }
154 int sched_init_domain(struct domain *d)
155 {
156 return SCHED_OP(init_domain, d);
157 }
159 void sched_destroy_domain(struct domain *d)
160 {
161 SCHED_OP(destroy_domain, d);
162 }
164 void vcpu_sleep_nosync(struct vcpu *v)
165 {
166 unsigned long flags;
168 vcpu_schedule_lock_irqsave(v, flags);
170 if ( likely(!vcpu_runnable(v)) )
171 {
172 if ( v->runstate.state == RUNSTATE_runnable )
173 vcpu_runstate_change(v, RUNSTATE_offline, NOW());
175 SCHED_OP(sleep, v);
176 }
178 vcpu_schedule_unlock_irqrestore(v, flags);
180 TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
181 }
183 void vcpu_sleep_sync(struct vcpu *v)
184 {
185 vcpu_sleep_nosync(v);
187 while ( !vcpu_runnable(v) && v->is_running )
188 cpu_relax();
190 sync_vcpu_execstate(v);
191 }
193 void vcpu_wake(struct vcpu *v)
194 {
195 unsigned long flags;
197 vcpu_schedule_lock_irqsave(v, flags);
199 if ( likely(vcpu_runnable(v)) )
200 {
201 if ( v->runstate.state >= RUNSTATE_blocked )
202 vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
203 SCHED_OP(wake, v);
204 }
205 else if ( !test_bit(_VPF_blocked, &v->pause_flags) )
206 {
207 if ( v->runstate.state == RUNSTATE_blocked )
208 vcpu_runstate_change(v, RUNSTATE_offline, NOW());
209 }
211 vcpu_schedule_unlock_irqrestore(v, flags);
213 TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
214 }
216 static void vcpu_migrate(struct vcpu *v)
217 {
218 unsigned long flags;
219 int old_cpu;
221 vcpu_schedule_lock_irqsave(v, flags);
223 /*
224 * NB. Check of v->running happens /after/ setting migration flag
225 * because they both happen in (different) spinlock regions, and those
226 * regions are strictly serialised.
227 */
228 if ( v->is_running ||
229 !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
230 {
231 vcpu_schedule_unlock_irqrestore(v, flags);
232 return;
233 }
235 /* Switch to new CPU, then unlock old CPU. */
236 old_cpu = v->processor;
237 v->processor = SCHED_OP(pick_cpu, v);
238 spin_unlock_irqrestore(
239 &per_cpu(schedule_data, old_cpu).schedule_lock, flags);
241 /* Wake on new CPU. */
242 vcpu_wake(v);
243 }
245 /*
246 * Force a VCPU through a deschedule/reschedule path.
247 * For example, using this when setting the periodic timer period means that
248 * most periodic-timer state need only be touched from within the scheduler
249 * which can thus be done without need for synchronisation.
250 */
251 void vcpu_force_reschedule(struct vcpu *v)
252 {
253 vcpu_schedule_lock_irq(v);
254 if ( v->is_running )
255 set_bit(_VPF_migrating, &v->pause_flags);
256 vcpu_schedule_unlock_irq(v);
258 if ( test_bit(_VPF_migrating, &v->pause_flags) )
259 {
260 vcpu_sleep_nosync(v);
261 vcpu_migrate(v);
262 }
263 }
265 static int __vcpu_set_affinity(
266 struct vcpu *v, cpumask_t *affinity,
267 bool_t old_lock_status, bool_t new_lock_status)
268 {
269 cpumask_t online_affinity, old_affinity;
271 cpus_and(online_affinity, *affinity, cpu_online_map);
272 if ( cpus_empty(online_affinity) )
273 return -EINVAL;
275 vcpu_schedule_lock_irq(v);
277 if ( v->affinity_locked != old_lock_status )
278 {
279 BUG_ON(!v->affinity_locked);
280 vcpu_schedule_unlock_irq(v);
281 return -EBUSY;
282 }
284 v->affinity_locked = new_lock_status;
286 old_affinity = v->cpu_affinity;
287 v->cpu_affinity = *affinity;
288 *affinity = old_affinity;
289 if ( !cpu_isset(v->processor, v->cpu_affinity) )
290 set_bit(_VPF_migrating, &v->pause_flags);
292 vcpu_schedule_unlock_irq(v);
294 if ( test_bit(_VPF_migrating, &v->pause_flags) )
295 {
296 vcpu_sleep_nosync(v);
297 vcpu_migrate(v);
298 }
300 return 0;
301 }
303 int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
304 {
305 if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin )
306 return -EINVAL;
307 return __vcpu_set_affinity(v, affinity, 0, 0);
308 }
310 int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity)
311 {
312 return __vcpu_set_affinity(v, affinity, 0, 1);
313 }
315 void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity)
316 {
317 cpumask_t online_affinity;
319 /* Do not fail if no CPU in old affinity mask is online. */
320 cpus_and(online_affinity, *affinity, cpu_online_map);
321 if ( cpus_empty(online_affinity) )
322 *affinity = cpu_online_map;
324 if ( __vcpu_set_affinity(v, affinity, 1, 0) != 0 )
325 BUG();
326 }
328 /* Block the currently-executing domain until a pertinent event occurs. */
329 static long do_block(void)
330 {
331 struct vcpu *v = current;
333 local_event_delivery_enable();
334 set_bit(_VPF_blocked, &v->pause_flags);
336 /* Check for events /after/ blocking: avoids wakeup waiting race. */
337 if ( local_events_need_delivery() )
338 {
339 clear_bit(_VPF_blocked, &v->pause_flags);
340 }
341 else
342 {
343 TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
344 raise_softirq(SCHEDULE_SOFTIRQ);
345 }
347 return 0;
348 }
350 static long do_poll(struct sched_poll *sched_poll)
351 {
352 struct vcpu *v = current;
353 struct domain *d = v->domain;
354 evtchn_port_t port;
355 long rc = 0;
356 unsigned int i;
358 /* Fairly arbitrary limit. */
359 if ( sched_poll->nr_ports > 128 )
360 return -EINVAL;
362 if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
363 return -EFAULT;
365 set_bit(_VPF_blocked, &v->pause_flags);
366 v->is_polling = 1;
367 d->is_polling = 1;
369 /* Check for events /after/ setting flags: avoids wakeup waiting race. */
370 smp_wmb();
372 for ( i = 0; i < sched_poll->nr_ports; i++ )
373 {
374 rc = -EFAULT;
375 if ( __copy_from_guest_offset(&port, sched_poll->ports, i, 1) )
376 goto out;
378 rc = -EINVAL;
379 if ( port >= MAX_EVTCHNS(d) )
380 goto out;
382 rc = 0;
383 if ( test_bit(port, shared_info_addr(d, evtchn_pending)) )
384 goto out;
385 }
387 if ( sched_poll->timeout != 0 )
388 set_timer(&v->poll_timer, sched_poll->timeout);
390 TRACE_2D(TRC_SCHED_BLOCK, d->domain_id, v->vcpu_id);
391 raise_softirq(SCHEDULE_SOFTIRQ);
393 return 0;
395 out:
396 v->is_polling = 0;
397 clear_bit(_VPF_blocked, &v->pause_flags);
398 return rc;
399 }
401 /* Voluntarily yield the processor for this allocation. */
402 static long do_yield(void)
403 {
404 TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
405 raise_softirq(SCHEDULE_SOFTIRQ);
406 return 0;
407 }
409 long do_sched_op_compat(int cmd, unsigned long arg)
410 {
411 long ret = 0;
413 switch ( cmd )
414 {
415 case SCHEDOP_yield:
416 {
417 ret = do_yield();
418 break;
419 }
421 case SCHEDOP_block:
422 {
423 ret = do_block();
424 break;
425 }
427 case SCHEDOP_shutdown:
428 {
429 TRACE_3D(TRC_SCHED_SHUTDOWN,
430 current->domain->domain_id, current->vcpu_id, arg);
431 domain_shutdown(current->domain, (u8)arg);
432 break;
433 }
435 default:
436 ret = -ENOSYS;
437 }
439 return ret;
440 }
442 typedef long ret_t;
444 #endif /* !COMPAT */
446 ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE(void) arg)
447 {
448 ret_t ret = 0;
450 switch ( cmd )
451 {
452 case SCHEDOP_yield:
453 {
454 ret = do_yield();
455 break;
456 }
458 case SCHEDOP_block:
459 {
460 ret = do_block();
461 break;
462 }
464 case SCHEDOP_shutdown:
465 {
466 struct sched_shutdown sched_shutdown;
468 ret = -EFAULT;
469 if ( copy_from_guest(&sched_shutdown, arg, 1) )
470 break;
472 ret = 0;
473 TRACE_3D(TRC_SCHED_SHUTDOWN,
474 current->domain->domain_id, current->vcpu_id,
475 sched_shutdown.reason);
476 domain_shutdown(current->domain, (u8)sched_shutdown.reason);
478 break;
479 }
481 case SCHEDOP_poll:
482 {
483 struct sched_poll sched_poll;
485 ret = -EFAULT;
486 if ( copy_from_guest(&sched_poll, arg, 1) )
487 break;
489 ret = do_poll(&sched_poll);
491 break;
492 }
494 case SCHEDOP_remote_shutdown:
495 {
496 struct domain *d;
497 struct sched_remote_shutdown sched_remote_shutdown;
499 ret = -EFAULT;
500 if ( copy_from_guest(&sched_remote_shutdown, arg, 1) )
501 break;
503 ret = -ESRCH;
504 d = rcu_lock_domain_by_id(sched_remote_shutdown.domain_id);
505 if ( d == NULL )
506 break;
508 if ( !IS_PRIV_FOR(current->domain, d) )
509 {
510 rcu_unlock_domain(d);
511 return -EPERM;
512 }
514 ret = xsm_schedop_shutdown(current->domain, d);
515 if ( ret )
516 {
517 rcu_unlock_domain(d);
518 return ret;
519 }
521 /* domain_pause() prevens any further execution in guest context. */
522 domain_pause(d);
523 domain_shutdown(d, (u8)sched_remote_shutdown.reason);
524 domain_unpause(d);
526 rcu_unlock_domain(d);
527 ret = 0;
529 break;
530 }
532 default:
533 ret = -ENOSYS;
534 }
536 return ret;
537 }
539 #ifndef COMPAT
541 /* Per-vcpu oneshot-timer hypercall. */
542 long do_set_timer_op(s_time_t timeout)
543 {
544 struct vcpu *v = current;
545 s_time_t offset = timeout - NOW();
547 if ( timeout == 0 )
548 {
549 stop_timer(&v->singleshot_timer);
550 }
551 else if ( unlikely(timeout < 0) || /* overflow into 64th bit? */
552 unlikely((offset > 0) && ((uint32_t)(offset >> 50) != 0)) )
553 {
554 /*
555 * Linux workaround: occasionally we will see timeouts a long way in
556 * the future due to wrapping in Linux's jiffy time handling. We check
557 * for timeouts wrapped negative, and for positive timeouts more than
558 * about 13 days in the future (2^50ns). The correct fix is to trigger
559 * an interrupt immediately (since Linux in fact has pending work to
560 * do in this situation). However, older guests also set a long timeout
561 * when they have *no* pending timers at all: setting an immediate
562 * timeout in this case can burn a lot of CPU. We therefore go for a
563 * reasonable middleground of triggering a timer event in 100ms.
564 */
565 gdprintk(XENLOG_INFO,
566 "Warning: huge timeout set by vcpu %d: %"PRIx64"\n",
567 v->vcpu_id, (uint64_t)timeout);
568 set_timer(&v->singleshot_timer, NOW() + MILLISECS(100));
569 }
570 else
571 {
572 if ( v->singleshot_timer.cpu != smp_processor_id() )
573 {
574 stop_timer(&v->singleshot_timer);
575 v->singleshot_timer.cpu = smp_processor_id();
576 }
578 set_timer(&v->singleshot_timer, timeout);
579 }
581 return 0;
582 }
584 /* sched_id - fetch ID of current scheduler */
585 int sched_id(void)
586 {
587 return ops.sched_id;
588 }
590 /* Adjust scheduling parameter for a given domain. */
591 long sched_adjust(struct domain *d, struct xen_domctl_scheduler_op *op)
592 {
593 struct vcpu *v;
594 long ret;
596 if ( (op->sched_id != ops.sched_id) ||
597 ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
598 (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
599 return -EINVAL;
601 /*
602 * Most VCPUs we can simply pause. If we are adjusting this VCPU then
603 * we acquire the local schedule_lock to guard against concurrent updates.
604 *
605 * We only acquire the local schedule lock after we have paused all other
606 * VCPUs in this domain. There are two reasons for this:
607 * 1- We don't want to hold up interrupts as pausing a VCPU can
608 * trigger a tlb shootdown.
609 * 2- Pausing other VCPUs involves briefly locking the schedule
610 * lock of the CPU they are running on. This CPU could be the
611 * same as ours.
612 */
614 for_each_vcpu ( d, v )
615 {
616 if ( v != current )
617 vcpu_pause(v);
618 }
620 if ( d == current->domain )
621 vcpu_schedule_lock_irq(current);
623 if ( (ret = SCHED_OP(adjust, d, op)) == 0 )
624 TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
626 if ( d == current->domain )
627 vcpu_schedule_unlock_irq(current);
629 for_each_vcpu ( d, v )
630 {
631 if ( v != current )
632 vcpu_unpause(v);
633 }
635 return ret;
636 }
638 static void vcpu_periodic_timer_work(struct vcpu *v)
639 {
640 s_time_t now = NOW();
641 uint64_t periodic_next_event;
643 ASSERT(!active_timer(&v->periodic_timer));
645 if ( v->periodic_period == 0 )
646 return;
648 periodic_next_event = v->periodic_last_event + v->periodic_period;
649 if ( now > periodic_next_event )
650 {
651 send_timer_event(v);
652 v->periodic_last_event = now;
653 periodic_next_event = now + v->periodic_period;
654 }
656 v->periodic_timer.cpu = smp_processor_id();
657 set_timer(&v->periodic_timer, periodic_next_event);
658 }
660 /*
661 * The main function
662 * - deschedule the current domain (scheduler independent).
663 * - pick a new domain (scheduler dependent).
664 */
665 static void schedule(void)
666 {
667 struct vcpu *prev = current, *next = NULL;
668 s_time_t now = NOW();
669 struct schedule_data *sd;
670 struct task_slice next_slice;
671 s32 r_time; /* time for new dom to run */
673 ASSERT(!in_irq());
674 ASSERT(this_cpu(mc_state).flags == 0);
676 perfc_incr(sched_run);
678 sd = &this_cpu(schedule_data);
680 spin_lock_irq(&sd->schedule_lock);
682 stop_timer(&sd->s_timer);
684 /* get policy-specific decision on scheduling... */
685 next_slice = ops.do_schedule(now);
687 r_time = next_slice.time;
688 next = next_slice.task;
690 sd->curr = next;
692 set_timer(&sd->s_timer, now + r_time);
694 if ( unlikely(prev == next) )
695 {
696 spin_unlock_irq(&sd->schedule_lock);
697 return continue_running(prev);
698 }
700 TRACE_2D(TRC_SCHED_SWITCH_INFPREV,
701 prev->domain->domain_id,
702 now - prev->runstate.state_entry_time);
703 TRACE_3D(TRC_SCHED_SWITCH_INFNEXT,
704 next->domain->domain_id,
705 (next->runstate.state == RUNSTATE_runnable) ?
706 (now - next->runstate.state_entry_time) : 0,
707 r_time);
709 ASSERT(prev->runstate.state == RUNSTATE_running);
710 vcpu_runstate_change(
711 prev,
712 (test_bit(_VPF_blocked, &prev->pause_flags) ? RUNSTATE_blocked :
713 (vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)),
714 now);
716 ASSERT(next->runstate.state != RUNSTATE_running);
717 vcpu_runstate_change(next, RUNSTATE_running, now);
719 ASSERT(!next->is_running);
720 next->is_running = 1;
722 spin_unlock_irq(&sd->schedule_lock);
724 perfc_incr(sched_ctx);
726 stop_timer(&prev->periodic_timer);
728 /* Ensure that the domain has an up-to-date time base. */
729 update_vcpu_system_time(next);
730 vcpu_periodic_timer_work(next);
732 TRACE_4D(TRC_SCHED_SWITCH,
733 prev->domain->domain_id, prev->vcpu_id,
734 next->domain->domain_id, next->vcpu_id);
736 context_switch(prev, next);
737 }
739 void context_saved(struct vcpu *prev)
740 {
741 /* Clear running flag /after/ writing context to memory. */
742 smp_wmb();
744 prev->is_running = 0;
746 /* Check for migration request /after/ clearing running flag. */
747 smp_mb();
749 if ( unlikely(test_bit(_VPF_migrating, &prev->pause_flags)) )
750 vcpu_migrate(prev);
751 }
753 /* The scheduler timer: force a run through the scheduler */
754 static void s_timer_fn(void *unused)
755 {
756 raise_softirq(SCHEDULE_SOFTIRQ);
757 perfc_incr(sched_irq);
758 }
760 /* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
761 static void vcpu_periodic_timer_fn(void *data)
762 {
763 struct vcpu *v = data;
764 vcpu_periodic_timer_work(v);
765 }
767 /* Per-VCPU single-shot timer function: sends a virtual timer interrupt. */
768 static void vcpu_singleshot_timer_fn(void *data)
769 {
770 struct vcpu *v = data;
771 send_timer_event(v);
772 }
774 /* SCHEDOP_poll timeout callback. */
775 static void poll_timer_fn(void *data)
776 {
777 struct vcpu *v = data;
779 if ( !v->is_polling )
780 return;
782 v->is_polling = 0;
783 vcpu_unblock(v);
784 }
786 /* Initialise the data structures. */
787 void __init scheduler_init(void)
788 {
789 int i;
791 open_softirq(SCHEDULE_SOFTIRQ, schedule);
793 for_each_cpu ( i )
794 {
795 spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
796 init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
797 }
799 for ( i = 0; schedulers[i] != NULL; i++ )
800 {
801 ops = *schedulers[i];
802 if ( strcmp(ops.opt_name, opt_sched) == 0 )
803 break;
804 }
806 if ( schedulers[i] == NULL )
807 printk("Could not find scheduler: %s\n", opt_sched);
809 printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
810 SCHED_OP(init);
811 }
813 void dump_runq(unsigned char key)
814 {
815 s_time_t now = NOW();
816 int i;
817 unsigned long flags;
819 local_irq_save(flags);
821 printk("Scheduler: %s (%s)\n", ops.name, ops.opt_name);
822 SCHED_OP(dump_settings);
823 printk("NOW=0x%08X%08X\n", (u32)(now>>32), (u32)now);
825 for_each_online_cpu ( i )
826 {
827 spin_lock(&per_cpu(schedule_data, i).schedule_lock);
828 printk("CPU[%02d] ", i);
829 SCHED_OP(dump_cpu_state, i);
830 spin_unlock(&per_cpu(schedule_data, i).schedule_lock);
831 }
833 local_irq_restore(flags);
834 }
836 #ifdef CONFIG_COMPAT
837 #include "compat/schedule.c"
838 #endif
840 #endif /* !COMPAT */
842 /*
843 * Local variables:
844 * mode: C
845 * c-set-style: "BSD"
846 * c-basic-offset: 4
847 * tab-width: 4
848 * indent-tabs-mode: nil
849 * End:
850 */