ia64/xen-unstable

view xen/common/schedule.c @ 15896:42d4313b5fdd

[IA64] update .hgignore for xenitp

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author Alex Williamson <alex.williamson@hp.com>
date Mon Sep 24 14:21:02 2007 -0600 (2007-09-24)
parents 96f64f4c42f0
children 2477e94450aa
line source
1 /****************************************************************************
2 * (C) 2002-2003 - Rolf Neugebauer - Intel Research Cambridge
3 * (C) 2002-2003 University of Cambridge
4 * (C) 2004 - Mark Williamson - Intel Research Cambridge
5 ****************************************************************************
6 *
7 * File: common/schedule.c
8 * Author: Rolf Neugebauer & Keir Fraser
9 * Updated for generic API by Mark Williamson
10 *
11 * Description: Generic CPU scheduling code
12 * implements support functionality for the Xen scheduler API.
13 *
14 */
16 #ifndef COMPAT
17 #include <xen/config.h>
18 #include <xen/init.h>
19 #include <xen/lib.h>
20 #include <xen/sched.h>
21 #include <xen/domain.h>
22 #include <xen/delay.h>
23 #include <xen/event.h>
24 #include <xen/time.h>
25 #include <xen/timer.h>
26 #include <xen/perfc.h>
27 #include <xen/sched-if.h>
28 #include <xen/softirq.h>
29 #include <xen/trace.h>
30 #include <xen/mm.h>
31 #include <xen/errno.h>
32 #include <xen/guest_access.h>
33 #include <xen/multicall.h>
34 #include <public/sched.h>
35 #include <xsm/xsm.h>
37 /* opt_sched: scheduler - default to credit */
38 static char opt_sched[10] = "credit";
39 string_param("sched", opt_sched);
41 /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
42 static unsigned int opt_dom0_vcpus_pin;
43 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
45 #define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */
47 /* Various timer handlers. */
48 static void s_timer_fn(void *unused);
49 static void vcpu_periodic_timer_fn(void *data);
50 static void vcpu_singleshot_timer_fn(void *data);
51 static void poll_timer_fn(void *data);
53 /* This is global for now so that private implementations can reach it */
54 DEFINE_PER_CPU(struct schedule_data, schedule_data);
56 extern struct scheduler sched_sedf_def;
57 extern struct scheduler sched_credit_def;
58 static struct scheduler *schedulers[] = {
59 &sched_sedf_def,
60 &sched_credit_def,
61 NULL
62 };
64 static struct scheduler ops;
66 #define SCHED_OP(fn, ...) \
67 (( ops.fn != NULL ) ? ops.fn( __VA_ARGS__ ) \
68 : (typeof(ops.fn(__VA_ARGS__)))0 )
70 static inline void vcpu_runstate_change(
71 struct vcpu *v, int new_state, s_time_t new_entry_time)
72 {
73 ASSERT(v->runstate.state != new_state);
74 ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock));
76 v->runstate.time[v->runstate.state] +=
77 new_entry_time - v->runstate.state_entry_time;
78 v->runstate.state_entry_time = new_entry_time;
79 v->runstate.state = new_state;
80 }
82 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
83 {
84 if ( likely(v == current) )
85 {
86 /* Fast lock-free path. */
87 memcpy(runstate, &v->runstate, sizeof(*runstate));
88 ASSERT(runstate->state == RUNSTATE_running);
89 runstate->time[RUNSTATE_running] += NOW() - runstate->state_entry_time;
90 }
91 else
92 {
93 vcpu_schedule_lock_irq(v);
94 memcpy(runstate, &v->runstate, sizeof(*runstate));
95 runstate->time[runstate->state] += NOW() - runstate->state_entry_time;
96 vcpu_schedule_unlock_irq(v);
97 }
98 }
100 int sched_init_vcpu(struct vcpu *v, unsigned int processor)
101 {
102 struct domain *d = v->domain;
104 /*
105 * Initialize processor and affinity settings. The idler, and potentially
106 * domain-0 VCPUs, are pinned onto their respective physical CPUs.
107 */
108 v->processor = processor;
109 if ( is_idle_domain(d) || ((d->domain_id == 0) && opt_dom0_vcpus_pin) )
110 v->cpu_affinity = cpumask_of_cpu(processor);
111 else
112 cpus_setall(v->cpu_affinity);
114 /* Initialise the per-vcpu timers. */
115 init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
116 v, v->processor);
117 init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn,
118 v, v->processor);
119 init_timer(&v->poll_timer, poll_timer_fn,
120 v, v->processor);
122 /* Idle VCPUs are scheduled immediately. */
123 if ( is_idle_domain(d) )
124 {
125 per_cpu(schedule_data, v->processor).curr = v;
126 per_cpu(schedule_data, v->processor).idle = v;
127 v->is_running = 1;
128 }
130 TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
132 return SCHED_OP(init_vcpu, v);
133 }
135 void sched_destroy_vcpu(struct vcpu *v)
136 {
137 kill_timer(&v->periodic_timer);
138 kill_timer(&v->singleshot_timer);
139 kill_timer(&v->poll_timer);
140 SCHED_OP(destroy_vcpu, v);
141 }
143 int sched_init_domain(struct domain *d)
144 {
145 return SCHED_OP(init_domain, d);
146 }
148 void sched_destroy_domain(struct domain *d)
149 {
150 SCHED_OP(destroy_domain, d);
151 }
153 void vcpu_sleep_nosync(struct vcpu *v)
154 {
155 unsigned long flags;
157 vcpu_schedule_lock_irqsave(v, flags);
159 if ( likely(!vcpu_runnable(v)) )
160 {
161 if ( v->runstate.state == RUNSTATE_runnable )
162 vcpu_runstate_change(v, RUNSTATE_offline, NOW());
164 SCHED_OP(sleep, v);
165 }
167 vcpu_schedule_unlock_irqrestore(v, flags);
169 TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
170 }
172 void vcpu_sleep_sync(struct vcpu *v)
173 {
174 vcpu_sleep_nosync(v);
176 while ( !vcpu_runnable(v) && v->is_running )
177 cpu_relax();
179 sync_vcpu_execstate(v);
180 }
182 void vcpu_wake(struct vcpu *v)
183 {
184 unsigned long flags;
186 vcpu_schedule_lock_irqsave(v, flags);
188 if ( likely(vcpu_runnable(v)) )
189 {
190 if ( v->runstate.state >= RUNSTATE_blocked )
191 vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
192 SCHED_OP(wake, v);
193 }
194 else if ( !test_bit(_VPF_blocked, &v->pause_flags) )
195 {
196 if ( v->runstate.state == RUNSTATE_blocked )
197 vcpu_runstate_change(v, RUNSTATE_offline, NOW());
198 }
200 vcpu_schedule_unlock_irqrestore(v, flags);
202 TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
203 }
205 static void vcpu_migrate(struct vcpu *v)
206 {
207 unsigned long flags;
208 int old_cpu;
210 vcpu_schedule_lock_irqsave(v, flags);
212 /*
213 * NB. Check of v->running happens /after/ setting migration flag
214 * because they both happen in (different) spinlock regions, and those
215 * regions are strictly serialised.
216 */
217 if ( v->is_running ||
218 !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
219 {
220 vcpu_schedule_unlock_irqrestore(v, flags);
221 return;
222 }
224 /* Switch to new CPU, then unlock old CPU. */
225 old_cpu = v->processor;
226 v->processor = SCHED_OP(pick_cpu, v);
227 spin_unlock_irqrestore(
228 &per_cpu(schedule_data, old_cpu).schedule_lock, flags);
230 /* Wake on new CPU. */
231 vcpu_wake(v);
232 }
234 /*
235 * Force a VCPU through a deschedule/reschedule path.
236 * For example, using this when setting the periodic timer period means that
237 * most periodic-timer state need only be touched from within the scheduler
238 * which can thus be done without need for synchronisation.
239 */
240 void vcpu_force_reschedule(struct vcpu *v)
241 {
242 vcpu_schedule_lock_irq(v);
243 if ( v->is_running )
244 set_bit(_VPF_migrating, &v->pause_flags);
245 vcpu_schedule_unlock_irq(v);
247 if ( test_bit(_VPF_migrating, &v->pause_flags) )
248 {
249 vcpu_sleep_nosync(v);
250 vcpu_migrate(v);
251 }
252 }
254 int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
255 {
256 cpumask_t online_affinity;
258 if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin )
259 return -EINVAL;
261 cpus_and(online_affinity, *affinity, cpu_online_map);
262 if ( cpus_empty(online_affinity) )
263 return -EINVAL;
265 vcpu_schedule_lock_irq(v);
267 v->cpu_affinity = *affinity;
268 if ( !cpu_isset(v->processor, v->cpu_affinity) )
269 set_bit(_VPF_migrating, &v->pause_flags);
271 vcpu_schedule_unlock_irq(v);
273 if ( test_bit(_VPF_migrating, &v->pause_flags) )
274 {
275 vcpu_sleep_nosync(v);
276 vcpu_migrate(v);
277 }
279 return 0;
280 }
282 /* Block the currently-executing domain until a pertinent event occurs. */
283 static long do_block(void)
284 {
285 struct vcpu *v = current;
287 local_event_delivery_enable();
288 set_bit(_VPF_blocked, &v->pause_flags);
290 /* Check for events /after/ blocking: avoids wakeup waiting race. */
291 if ( local_events_need_delivery() )
292 {
293 clear_bit(_VPF_blocked, &v->pause_flags);
294 }
295 else
296 {
297 TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
298 raise_softirq(SCHEDULE_SOFTIRQ);
299 }
301 return 0;
302 }
304 static long do_poll(struct sched_poll *sched_poll)
305 {
306 struct vcpu *v = current;
307 struct domain *d = v->domain;
308 evtchn_port_t port;
309 long rc = 0;
310 unsigned int i;
312 /* Fairly arbitrary limit. */
313 if ( sched_poll->nr_ports > 128 )
314 return -EINVAL;
316 if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
317 return -EFAULT;
319 set_bit(_VPF_blocked, &v->pause_flags);
320 v->is_polling = 1;
321 d->is_polling = 1;
323 /* Check for events /after/ setting flags: avoids wakeup waiting race. */
324 smp_wmb();
326 for ( i = 0; i < sched_poll->nr_ports; i++ )
327 {
328 rc = -EFAULT;
329 if ( __copy_from_guest_offset(&port, sched_poll->ports, i, 1) )
330 goto out;
332 rc = -EINVAL;
333 if ( port >= MAX_EVTCHNS(d) )
334 goto out;
336 rc = 0;
337 if ( test_bit(port, shared_info_addr(d, evtchn_pending)) )
338 goto out;
339 }
341 if ( sched_poll->timeout != 0 )
342 set_timer(&v->poll_timer, sched_poll->timeout);
344 TRACE_2D(TRC_SCHED_BLOCK, d->domain_id, v->vcpu_id);
345 raise_softirq(SCHEDULE_SOFTIRQ);
347 return 0;
349 out:
350 v->is_polling = 0;
351 clear_bit(_VPF_blocked, &v->pause_flags);
352 return rc;
353 }
355 /* Voluntarily yield the processor for this allocation. */
356 static long do_yield(void)
357 {
358 TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
359 raise_softirq(SCHEDULE_SOFTIRQ);
360 return 0;
361 }
363 long do_sched_op_compat(int cmd, unsigned long arg)
364 {
365 long ret = 0;
367 switch ( cmd )
368 {
369 case SCHEDOP_yield:
370 {
371 ret = do_yield();
372 break;
373 }
375 case SCHEDOP_block:
376 {
377 ret = do_block();
378 break;
379 }
381 case SCHEDOP_shutdown:
382 {
383 TRACE_3D(TRC_SCHED_SHUTDOWN,
384 current->domain->domain_id, current->vcpu_id, arg);
385 domain_shutdown(current->domain, (u8)arg);
386 break;
387 }
389 default:
390 ret = -ENOSYS;
391 }
393 return ret;
394 }
396 typedef long ret_t;
398 #endif /* !COMPAT */
400 ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE(void) arg)
401 {
402 ret_t ret = 0;
404 switch ( cmd )
405 {
406 case SCHEDOP_yield:
407 {
408 ret = do_yield();
409 break;
410 }
412 case SCHEDOP_block:
413 {
414 ret = do_block();
415 break;
416 }
418 case SCHEDOP_shutdown:
419 {
420 struct sched_shutdown sched_shutdown;
422 ret = -EFAULT;
423 if ( copy_from_guest(&sched_shutdown, arg, 1) )
424 break;
426 ret = 0;
427 TRACE_3D(TRC_SCHED_SHUTDOWN,
428 current->domain->domain_id, current->vcpu_id,
429 sched_shutdown.reason);
430 domain_shutdown(current->domain, (u8)sched_shutdown.reason);
432 break;
433 }
435 case SCHEDOP_poll:
436 {
437 struct sched_poll sched_poll;
439 ret = -EFAULT;
440 if ( copy_from_guest(&sched_poll, arg, 1) )
441 break;
443 ret = do_poll(&sched_poll);
445 break;
446 }
448 case SCHEDOP_remote_shutdown:
449 {
450 struct domain *d;
451 struct sched_remote_shutdown sched_remote_shutdown;
453 if ( !IS_PRIV(current->domain) )
454 return -EPERM;
456 ret = -EFAULT;
457 if ( copy_from_guest(&sched_remote_shutdown, arg, 1) )
458 break;
460 ret = -ESRCH;
461 d = rcu_lock_domain_by_id(sched_remote_shutdown.domain_id);
462 if ( d == NULL )
463 break;
465 ret = xsm_schedop_shutdown(current->domain, d);
466 if ( ret )
467 {
468 rcu_unlock_domain(d);
469 return ret;
470 }
472 /* domain_pause() prevens any further execution in guest context. */
473 domain_pause(d);
474 domain_shutdown(d, (u8)sched_remote_shutdown.reason);
475 domain_unpause(d);
477 rcu_unlock_domain(d);
478 ret = 0;
480 break;
481 }
483 default:
484 ret = -ENOSYS;
485 }
487 return ret;
488 }
490 #ifndef COMPAT
492 /* Per-vcpu oneshot-timer hypercall. */
493 long do_set_timer_op(s_time_t timeout)
494 {
495 struct vcpu *v = current;
496 s_time_t offset = timeout - NOW();
498 if ( timeout == 0 )
499 {
500 stop_timer(&v->singleshot_timer);
501 }
502 else if ( unlikely(timeout < 0) || /* overflow into 64th bit? */
503 unlikely((offset > 0) && ((uint32_t)(offset >> 50) != 0)) )
504 {
505 /*
506 * Linux workaround: occasionally we will see timeouts a long way in
507 * the future due to wrapping in Linux's jiffy time handling. We check
508 * for timeouts wrapped negative, and for positive timeouts more than
509 * about 13 days in the future (2^50ns). The correct fix is to trigger
510 * an interrupt immediately (since Linux in fact has pending work to
511 * do in this situation). However, older guests also set a long timeout
512 * when they have *no* pending timers at all: setting an immediate
513 * timeout in this case can burn a lot of CPU. We therefore go for a
514 * reasonable middleground of triggering a timer event in 100ms.
515 */
516 gdprintk(XENLOG_INFO, "Warning: huge timeout set by domain %d "
517 "(vcpu %d): %"PRIx64"\n",
518 v->domain->domain_id, v->vcpu_id, (uint64_t)timeout);
519 set_timer(&v->singleshot_timer, NOW() + MILLISECS(100));
520 }
521 else
522 {
523 if ( v->singleshot_timer.cpu != smp_processor_id() )
524 {
525 stop_timer(&v->singleshot_timer);
526 v->singleshot_timer.cpu = smp_processor_id();
527 }
529 set_timer(&v->singleshot_timer, timeout);
530 }
532 return 0;
533 }
535 /* sched_id - fetch ID of current scheduler */
536 int sched_id(void)
537 {
538 return ops.sched_id;
539 }
541 /* Adjust scheduling parameter for a given domain. */
542 long sched_adjust(struct domain *d, struct xen_domctl_scheduler_op *op)
543 {
544 struct vcpu *v;
545 long ret;
547 if ( (op->sched_id != ops.sched_id) ||
548 ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
549 (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
550 return -EINVAL;
552 /*
553 * Most VCPUs we can simply pause. If we are adjusting this VCPU then
554 * we acquire the local schedule_lock to guard against concurrent updates.
555 *
556 * We only acquire the local schedule lock after we have paused all other
557 * VCPUs in this domain. There are two reasons for this:
558 * 1- We don't want to hold up interrupts as pausing a VCPU can
559 * trigger a tlb shootdown.
560 * 2- Pausing other VCPUs involves briefly locking the schedule
561 * lock of the CPU they are running on. This CPU could be the
562 * same as ours.
563 */
565 for_each_vcpu ( d, v )
566 {
567 if ( v != current )
568 vcpu_pause(v);
569 }
571 if ( d == current->domain )
572 vcpu_schedule_lock_irq(current);
574 if ( (ret = SCHED_OP(adjust, d, op)) == 0 )
575 TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
577 if ( d == current->domain )
578 vcpu_schedule_unlock_irq(current);
580 for_each_vcpu ( d, v )
581 {
582 if ( v != current )
583 vcpu_unpause(v);
584 }
586 return ret;
587 }
589 static void vcpu_periodic_timer_work(struct vcpu *v)
590 {
591 s_time_t now = NOW();
592 uint64_t periodic_next_event;
594 ASSERT(!active_timer(&v->periodic_timer));
596 if ( v->periodic_period == 0 )
597 return;
599 periodic_next_event = v->periodic_last_event + v->periodic_period;
600 if ( now > periodic_next_event )
601 {
602 send_timer_event(v);
603 v->periodic_last_event = now;
604 periodic_next_event = now + v->periodic_period;
605 }
607 v->periodic_timer.cpu = smp_processor_id();
608 set_timer(&v->periodic_timer, periodic_next_event);
609 }
611 /*
612 * The main function
613 * - deschedule the current domain (scheduler independent).
614 * - pick a new domain (scheduler dependent).
615 */
616 static void schedule(void)
617 {
618 struct vcpu *prev = current, *next = NULL;
619 s_time_t now = NOW();
620 struct schedule_data *sd;
621 struct task_slice next_slice;
622 s32 r_time; /* time for new dom to run */
624 ASSERT(!in_irq());
625 ASSERT(this_cpu(mc_state).flags == 0);
627 perfc_incr(sched_run);
629 sd = &this_cpu(schedule_data);
631 spin_lock_irq(&sd->schedule_lock);
633 stop_timer(&sd->s_timer);
635 /* get policy-specific decision on scheduling... */
636 next_slice = ops.do_schedule(now);
638 r_time = next_slice.time;
639 next = next_slice.task;
641 sd->curr = next;
643 set_timer(&sd->s_timer, now + r_time);
645 if ( unlikely(prev == next) )
646 {
647 spin_unlock_irq(&sd->schedule_lock);
648 return continue_running(prev);
649 }
651 TRACE_2D(TRC_SCHED_SWITCH_INFPREV,
652 prev->domain->domain_id,
653 now - prev->runstate.state_entry_time);
654 TRACE_3D(TRC_SCHED_SWITCH_INFNEXT,
655 next->domain->domain_id,
656 (next->runstate.state == RUNSTATE_runnable) ?
657 (now - next->runstate.state_entry_time) : 0,
658 r_time);
660 ASSERT(prev->runstate.state == RUNSTATE_running);
661 vcpu_runstate_change(
662 prev,
663 (test_bit(_VPF_blocked, &prev->pause_flags) ? RUNSTATE_blocked :
664 (vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)),
665 now);
667 ASSERT(next->runstate.state != RUNSTATE_running);
668 vcpu_runstate_change(next, RUNSTATE_running, now);
670 ASSERT(!next->is_running);
671 next->is_running = 1;
673 spin_unlock_irq(&sd->schedule_lock);
675 perfc_incr(sched_ctx);
677 stop_timer(&prev->periodic_timer);
679 /* Ensure that the domain has an up-to-date time base. */
680 update_vcpu_system_time(next);
681 vcpu_periodic_timer_work(next);
683 TRACE_4D(TRC_SCHED_SWITCH,
684 prev->domain->domain_id, prev->vcpu_id,
685 next->domain->domain_id, next->vcpu_id);
687 context_switch(prev, next);
688 }
690 void context_saved(struct vcpu *prev)
691 {
692 /* Clear running flag /after/ writing context to memory. */
693 smp_wmb();
695 prev->is_running = 0;
697 /* Check for migration request /after/ clearing running flag. */
698 smp_mb();
700 if ( unlikely(test_bit(_VPF_migrating, &prev->pause_flags)) )
701 vcpu_migrate(prev);
702 }
704 /* The scheduler timer: force a run through the scheduler */
705 static void s_timer_fn(void *unused)
706 {
707 raise_softirq(SCHEDULE_SOFTIRQ);
708 perfc_incr(sched_irq);
709 }
711 /* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
712 static void vcpu_periodic_timer_fn(void *data)
713 {
714 struct vcpu *v = data;
715 vcpu_periodic_timer_work(v);
716 }
718 /* Per-VCPU single-shot timer function: sends a virtual timer interrupt. */
719 static void vcpu_singleshot_timer_fn(void *data)
720 {
721 struct vcpu *v = data;
722 send_timer_event(v);
723 }
725 /* SCHEDOP_poll timeout callback. */
726 static void poll_timer_fn(void *data)
727 {
728 struct vcpu *v = data;
730 if ( !v->is_polling )
731 return;
733 v->is_polling = 0;
734 vcpu_unblock(v);
735 }
737 /* Initialise the data structures. */
738 void __init scheduler_init(void)
739 {
740 int i;
742 open_softirq(SCHEDULE_SOFTIRQ, schedule);
744 for_each_cpu ( i )
745 {
746 spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
747 init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
748 }
750 for ( i = 0; schedulers[i] != NULL; i++ )
751 {
752 ops = *schedulers[i];
753 if ( strcmp(ops.opt_name, opt_sched) == 0 )
754 break;
755 }
757 if ( schedulers[i] == NULL )
758 printk("Could not find scheduler: %s\n", opt_sched);
760 printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
761 SCHED_OP(init);
762 }
764 void dump_runq(unsigned char key)
765 {
766 s_time_t now = NOW();
767 int i;
768 unsigned long flags;
770 local_irq_save(flags);
772 printk("Scheduler: %s (%s)\n", ops.name, ops.opt_name);
773 SCHED_OP(dump_settings);
774 printk("NOW=0x%08X%08X\n", (u32)(now>>32), (u32)now);
776 for_each_online_cpu ( i )
777 {
778 spin_lock(&per_cpu(schedule_data, i).schedule_lock);
779 printk("CPU[%02d] ", i);
780 SCHED_OP(dump_cpu_state, i);
781 spin_unlock(&per_cpu(schedule_data, i).schedule_lock);
782 }
784 local_irq_restore(flags);
785 }
787 #ifdef CONFIG_COMPAT
788 #include "compat/schedule.c"
789 #endif
791 #endif /* !COMPAT */
793 /*
794 * Local variables:
795 * mode: C
796 * c-set-style: "BSD"
797 * c-basic-offset: 4
798 * tab-width: 4
799 * indent-tabs-mode: nil
800 * End:
801 */