ia64/xen-unstable

view xen/common/schedule.c @ 14534:fbe72d878196

Return an error when a schedule-setting call fails.

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author Ewan Mellor <ewan@xensource.com>
date Fri Mar 23 11:52:09 2007 +0000 (2007-03-23)
parents 8fa8de63abf4
children ba9d3fd4ee4b
line source
1 /****************************************************************************
2 * (C) 2002-2003 - Rolf Neugebauer - Intel Research Cambridge
3 * (C) 2002-2003 University of Cambridge
4 * (C) 2004 - Mark Williamson - Intel Research Cambridge
5 ****************************************************************************
6 *
7 * File: common/schedule.c
8 * Author: Rolf Neugebauer & Keir Fraser
9 * Updated for generic API by Mark Williamson
10 *
11 * Description: Generic CPU scheduling code
12 * implements support functionality for the Xen scheduler API.
13 *
14 */
16 #ifndef COMPAT
17 #include <xen/config.h>
18 #include <xen/init.h>
19 #include <xen/lib.h>
20 #include <xen/sched.h>
21 #include <xen/domain.h>
22 #include <xen/delay.h>
23 #include <xen/event.h>
24 #include <xen/time.h>
25 #include <xen/timer.h>
26 #include <xen/perfc.h>
27 #include <xen/sched-if.h>
28 #include <xen/softirq.h>
29 #include <xen/trace.h>
30 #include <xen/mm.h>
31 #include <xen/errno.h>
32 #include <xen/guest_access.h>
33 #include <xen/multicall.h>
34 #include <public/sched.h>
36 /* opt_sched: scheduler - default to credit */
37 static char opt_sched[10] = "credit";
38 string_param("sched", opt_sched);
40 /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
41 static unsigned int opt_dom0_vcpus_pin;
42 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
44 #define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */
46 /* Various timer handlers. */
47 static void s_timer_fn(void *unused);
48 static void vcpu_periodic_timer_fn(void *data);
49 static void vcpu_singleshot_timer_fn(void *data);
50 static void poll_timer_fn(void *data);
52 /* This is global for now so that private implementations can reach it */
53 DEFINE_PER_CPU(struct schedule_data, schedule_data);
55 extern struct scheduler sched_sedf_def;
56 extern struct scheduler sched_credit_def;
57 static struct scheduler *schedulers[] = {
58 &sched_sedf_def,
59 &sched_credit_def,
60 NULL
61 };
63 static struct scheduler ops;
65 #define SCHED_OP(fn, ...) \
66 (( ops.fn != NULL ) ? ops.fn( __VA_ARGS__ ) \
67 : (typeof(ops.fn(__VA_ARGS__)))0 )
69 static inline void vcpu_runstate_change(
70 struct vcpu *v, int new_state, s_time_t new_entry_time)
71 {
72 ASSERT(v->runstate.state != new_state);
73 ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock));
75 v->runstate.time[v->runstate.state] +=
76 new_entry_time - v->runstate.state_entry_time;
77 v->runstate.state_entry_time = new_entry_time;
78 v->runstate.state = new_state;
79 }
81 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
82 {
83 if ( likely(v == current) )
84 {
85 /* Fast lock-free path. */
86 memcpy(runstate, &v->runstate, sizeof(*runstate));
87 ASSERT(runstate->state == RUNSTATE_running);
88 runstate->time[RUNSTATE_running] += NOW() - runstate->state_entry_time;
89 }
90 else
91 {
92 vcpu_schedule_lock_irq(v);
93 memcpy(runstate, &v->runstate, sizeof(*runstate));
94 runstate->time[runstate->state] += NOW() - runstate->state_entry_time;
95 vcpu_schedule_unlock_irq(v);
96 }
97 }
99 int sched_init_vcpu(struct vcpu *v, unsigned int processor)
100 {
101 struct domain *d = v->domain;
103 /*
104 * Initialize processor and affinity settings. The idler, and potentially
105 * domain-0 VCPUs, are pinned onto their respective physical CPUs.
106 */
107 v->processor = processor;
108 if ( is_idle_domain(d) || ((d->domain_id == 0) && opt_dom0_vcpus_pin) )
109 v->cpu_affinity = cpumask_of_cpu(processor);
110 else
111 cpus_setall(v->cpu_affinity);
113 /* Initialise the per-vcpu timers. */
114 init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
115 v, v->processor);
116 init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn,
117 v, v->processor);
118 init_timer(&v->poll_timer, poll_timer_fn,
119 v, v->processor);
121 /* Idle VCPUs are scheduled immediately. */
122 if ( is_idle_domain(d) )
123 {
124 per_cpu(schedule_data, v->processor).curr = v;
125 per_cpu(schedule_data, v->processor).idle = v;
126 set_bit(_VCPUF_running, &v->vcpu_flags);
127 }
129 TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
131 return SCHED_OP(init_vcpu, v);
132 }
134 void sched_destroy_vcpu(struct vcpu *v)
135 {
136 kill_timer(&v->periodic_timer);
137 kill_timer(&v->singleshot_timer);
138 kill_timer(&v->poll_timer);
139 SCHED_OP(destroy_vcpu, v);
140 }
142 int sched_init_domain(struct domain *d)
143 {
144 return SCHED_OP(init_domain, d);
145 }
147 void sched_destroy_domain(struct domain *d)
148 {
149 SCHED_OP(destroy_domain, d);
150 }
152 void vcpu_sleep_nosync(struct vcpu *v)
153 {
154 unsigned long flags;
156 vcpu_schedule_lock_irqsave(v, flags);
158 if ( likely(!vcpu_runnable(v)) )
159 {
160 if ( v->runstate.state == RUNSTATE_runnable )
161 vcpu_runstate_change(v, RUNSTATE_offline, NOW());
163 SCHED_OP(sleep, v);
164 }
166 vcpu_schedule_unlock_irqrestore(v, flags);
168 TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
169 }
171 void vcpu_sleep_sync(struct vcpu *v)
172 {
173 vcpu_sleep_nosync(v);
175 while ( !vcpu_runnable(v) && test_bit(_VCPUF_running, &v->vcpu_flags) )
176 cpu_relax();
178 sync_vcpu_execstate(v);
179 }
181 void vcpu_wake(struct vcpu *v)
182 {
183 unsigned long flags;
185 vcpu_schedule_lock_irqsave(v, flags);
187 if ( likely(vcpu_runnable(v)) )
188 {
189 if ( v->runstate.state >= RUNSTATE_blocked )
190 vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
191 SCHED_OP(wake, v);
192 }
193 else if ( !test_bit(_VCPUF_blocked, &v->vcpu_flags) )
194 {
195 if ( v->runstate.state == RUNSTATE_blocked )
196 vcpu_runstate_change(v, RUNSTATE_offline, NOW());
197 }
199 vcpu_schedule_unlock_irqrestore(v, flags);
201 TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
202 }
204 static void vcpu_migrate(struct vcpu *v)
205 {
206 unsigned long flags;
207 int old_cpu;
209 vcpu_schedule_lock_irqsave(v, flags);
211 if ( test_bit(_VCPUF_running, &v->vcpu_flags) ||
212 !test_and_clear_bit(_VCPUF_migrating, &v->vcpu_flags) )
213 {
214 vcpu_schedule_unlock_irqrestore(v, flags);
215 return;
216 }
218 /* Switch to new CPU, then unlock old CPU. */
219 old_cpu = v->processor;
220 v->processor = SCHED_OP(pick_cpu, v);
221 spin_unlock_irqrestore(
222 &per_cpu(schedule_data, old_cpu).schedule_lock, flags);
224 /* Wake on new CPU. */
225 vcpu_wake(v);
226 }
228 /*
229 * Force a VCPU through a deschedule/reschedule path.
230 * For example, using this when setting the periodic timer period means that
231 * most periodic-timer state need only be touched from within the scheduler
232 * which can thus be done without need for synchronisation.
233 */
234 void vcpu_force_reschedule(struct vcpu *v)
235 {
236 vcpu_schedule_lock_irq(v);
237 if ( test_bit(_VCPUF_running, &v->vcpu_flags) )
238 set_bit(_VCPUF_migrating, &v->vcpu_flags);
239 vcpu_schedule_unlock_irq(v);
241 if ( test_bit(_VCPUF_migrating, &v->vcpu_flags) )
242 {
243 vcpu_sleep_nosync(v);
244 vcpu_migrate(v);
245 }
246 }
248 int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
249 {
250 cpumask_t online_affinity;
252 if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin )
253 return -EINVAL;
255 cpus_and(online_affinity, *affinity, cpu_online_map);
256 if ( cpus_empty(online_affinity) )
257 return -EINVAL;
259 vcpu_schedule_lock_irq(v);
261 v->cpu_affinity = *affinity;
262 if ( !cpu_isset(v->processor, v->cpu_affinity) )
263 set_bit(_VCPUF_migrating, &v->vcpu_flags);
265 vcpu_schedule_unlock_irq(v);
267 if ( test_bit(_VCPUF_migrating, &v->vcpu_flags) )
268 {
269 vcpu_sleep_nosync(v);
270 vcpu_migrate(v);
271 }
273 return 0;
274 }
276 /* Block the currently-executing domain until a pertinent event occurs. */
277 static long do_block(void)
278 {
279 struct vcpu *v = current;
281 local_event_delivery_enable();
282 set_bit(_VCPUF_blocked, &v->vcpu_flags);
284 /* Check for events /after/ blocking: avoids wakeup waiting race. */
285 if ( local_events_need_delivery() )
286 {
287 clear_bit(_VCPUF_blocked, &v->vcpu_flags);
288 }
289 else
290 {
291 TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
292 raise_softirq(SCHEDULE_SOFTIRQ);
293 }
295 return 0;
296 }
298 static long do_poll(struct sched_poll *sched_poll)
299 {
300 struct vcpu *v = current;
301 struct domain *d = v->domain;
302 evtchn_port_t port;
303 long rc = 0;
304 unsigned int i;
306 /* Fairly arbitrary limit. */
307 if ( sched_poll->nr_ports > 128 )
308 return -EINVAL;
310 if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
311 return -EFAULT;
313 /* These operations must occur in order. */
314 set_bit(_VCPUF_blocked, &v->vcpu_flags);
315 set_bit(_VCPUF_polling, &v->vcpu_flags);
316 set_bit(_DOMF_polling, &d->domain_flags);
318 /* Check for events /after/ setting flags: avoids wakeup waiting race. */
319 for ( i = 0; i < sched_poll->nr_ports; i++ )
320 {
321 rc = -EFAULT;
322 if ( __copy_from_guest_offset(&port, sched_poll->ports, i, 1) )
323 goto out;
325 rc = -EINVAL;
326 if ( port >= MAX_EVTCHNS(d) )
327 goto out;
329 rc = 0;
330 if ( test_bit(port, shared_info_addr(d, evtchn_pending)) )
331 goto out;
332 }
334 if ( sched_poll->timeout != 0 )
335 set_timer(&v->poll_timer, sched_poll->timeout);
337 TRACE_2D(TRC_SCHED_BLOCK, d->domain_id, v->vcpu_id);
338 raise_softirq(SCHEDULE_SOFTIRQ);
340 return 0;
342 out:
343 clear_bit(_VCPUF_polling, &v->vcpu_flags);
344 clear_bit(_VCPUF_blocked, &v->vcpu_flags);
345 return rc;
346 }
348 /* Voluntarily yield the processor for this allocation. */
349 static long do_yield(void)
350 {
351 TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
352 raise_softirq(SCHEDULE_SOFTIRQ);
353 return 0;
354 }
356 long do_sched_op_compat(int cmd, unsigned long arg)
357 {
358 long ret = 0;
360 switch ( cmd )
361 {
362 case SCHEDOP_yield:
363 {
364 ret = do_yield();
365 break;
366 }
368 case SCHEDOP_block:
369 {
370 ret = do_block();
371 break;
372 }
374 case SCHEDOP_shutdown:
375 {
376 TRACE_3D(TRC_SCHED_SHUTDOWN,
377 current->domain->domain_id, current->vcpu_id, arg);
378 domain_shutdown(current->domain, (u8)arg);
379 break;
380 }
382 default:
383 ret = -ENOSYS;
384 }
386 return ret;
387 }
389 typedef long ret_t;
391 #endif /* !COMPAT */
393 ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE(void) arg)
394 {
395 ret_t ret = 0;
397 switch ( cmd )
398 {
399 case SCHEDOP_yield:
400 {
401 ret = do_yield();
402 break;
403 }
405 case SCHEDOP_block:
406 {
407 ret = do_block();
408 break;
409 }
411 case SCHEDOP_shutdown:
412 {
413 struct sched_shutdown sched_shutdown;
415 ret = -EFAULT;
416 if ( copy_from_guest(&sched_shutdown, arg, 1) )
417 break;
419 ret = 0;
420 TRACE_3D(TRC_SCHED_SHUTDOWN,
421 current->domain->domain_id, current->vcpu_id,
422 sched_shutdown.reason);
423 domain_shutdown(current->domain, (u8)sched_shutdown.reason);
425 break;
426 }
428 case SCHEDOP_poll:
429 {
430 struct sched_poll sched_poll;
432 ret = -EFAULT;
433 if ( copy_from_guest(&sched_poll, arg, 1) )
434 break;
436 ret = do_poll(&sched_poll);
438 break;
439 }
441 case SCHEDOP_remote_shutdown:
442 {
443 struct domain *d;
444 struct sched_remote_shutdown sched_remote_shutdown;
446 if ( !IS_PRIV(current->domain) )
447 return -EPERM;
449 ret = -EFAULT;
450 if ( copy_from_guest(&sched_remote_shutdown, arg, 1) )
451 break;
453 ret = -ESRCH;
454 d = rcu_lock_domain_by_id(sched_remote_shutdown.domain_id);
455 if ( d == NULL )
456 break;
458 domain_shutdown(d, (u8)sched_remote_shutdown.reason);
459 rcu_unlock_domain(d);
460 ret = 0;
462 break;
463 }
465 default:
466 ret = -ENOSYS;
467 }
469 return ret;
470 }
472 #ifndef COMPAT
474 /* Per-vcpu oneshot-timer hypercall. */
475 long do_set_timer_op(s_time_t timeout)
476 {
477 struct vcpu *v = current;
478 s_time_t offset = timeout - NOW();
480 if ( timeout == 0 )
481 {
482 stop_timer(&v->singleshot_timer);
483 }
484 else if ( unlikely(timeout < 0) || /* overflow into 64th bit? */
485 unlikely((offset > 0) && ((uint32_t)(offset >> 50) != 0)) )
486 {
487 /*
488 * Linux workaround: occasionally we will see timeouts a long way in
489 * the future due to wrapping in Linux's jiffy time handling. We check
490 * for timeouts wrapped negative, and for positive timeouts more than
491 * about 13 days in the future (2^50ns). The correct fix is to trigger
492 * an interrupt immediately (since Linux in fact has pending work to
493 * do in this situation). However, older guests also set a long timeout
494 * when they have *no* pending timers at all: setting an immediate
495 * timeout in this case can burn a lot of CPU. We therefore go for a
496 * reasonable middleground of triggering a timer event in 100ms.
497 */
498 gdprintk(XENLOG_INFO, "Warning: huge timeout set by domain %d "
499 "(vcpu %d): %"PRIx64"\n",
500 v->domain->domain_id, v->vcpu_id, (uint64_t)timeout);
501 set_timer(&v->singleshot_timer, NOW() + MILLISECS(100));
502 }
503 else
504 {
505 if ( v->singleshot_timer.cpu != smp_processor_id() )
506 {
507 stop_timer(&v->singleshot_timer);
508 v->singleshot_timer.cpu = smp_processor_id();
509 }
511 set_timer(&v->singleshot_timer, timeout);
512 }
514 return 0;
515 }
517 /* sched_id - fetch ID of current scheduler */
518 int sched_id(void)
519 {
520 return ops.sched_id;
521 }
523 /* Adjust scheduling parameter for a given domain. */
524 long sched_adjust(struct domain *d, struct xen_domctl_scheduler_op *op)
525 {
526 struct vcpu *v;
527 long ret;
529 if ( (op->sched_id != ops.sched_id) ||
530 ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
531 (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
532 return -EINVAL;
534 /*
535 * Most VCPUs we can simply pause. If we are adjusting this VCPU then
536 * we acquire the local schedule_lock to guard against concurrent updates.
537 *
538 * We only acquire the local schedule lock after we have paused all other
539 * VCPUs in this domain. There are two reasons for this:
540 * 1- We don't want to hold up interrupts as pausing a VCPU can
541 * trigger a tlb shootdown.
542 * 2- Pausing other VCPUs involves briefly locking the schedule
543 * lock of the CPU they are running on. This CPU could be the
544 * same as ours.
545 */
547 for_each_vcpu ( d, v )
548 {
549 if ( v != current )
550 vcpu_pause(v);
551 }
553 if ( d == current->domain )
554 vcpu_schedule_lock_irq(current);
556 if ( (ret = SCHED_OP(adjust, d, op)) == 0 )
557 TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
559 if ( d == current->domain )
560 vcpu_schedule_unlock_irq(current);
562 for_each_vcpu ( d, v )
563 {
564 if ( v != current )
565 vcpu_unpause(v);
566 }
568 return ret;
569 }
571 static void vcpu_periodic_timer_work(struct vcpu *v)
572 {
573 s_time_t now = NOW();
574 uint64_t periodic_next_event;
576 ASSERT(!active_timer(&v->periodic_timer));
578 if ( v->periodic_period == 0 )
579 return;
581 periodic_next_event = v->periodic_last_event + v->periodic_period;
582 if ( now > periodic_next_event )
583 {
584 send_timer_event(v);
585 v->periodic_last_event = now;
586 periodic_next_event = now + v->periodic_period;
587 }
589 v->periodic_timer.cpu = smp_processor_id();
590 set_timer(&v->periodic_timer, periodic_next_event);
591 }
593 /*
594 * The main function
595 * - deschedule the current domain (scheduler independent).
596 * - pick a new domain (scheduler dependent).
597 */
598 static void schedule(void)
599 {
600 struct vcpu *prev = current, *next = NULL;
601 s_time_t now = NOW();
602 struct schedule_data *sd;
603 struct task_slice next_slice;
604 s32 r_time; /* time for new dom to run */
606 ASSERT(!in_irq());
607 ASSERT(this_cpu(mc_state).flags == 0);
609 perfc_incrc(sched_run);
611 sd = &this_cpu(schedule_data);
613 spin_lock_irq(&sd->schedule_lock);
615 stop_timer(&sd->s_timer);
617 /* get policy-specific decision on scheduling... */
618 next_slice = ops.do_schedule(now);
620 r_time = next_slice.time;
621 next = next_slice.task;
623 sd->curr = next;
625 set_timer(&sd->s_timer, now + r_time);
627 if ( unlikely(prev == next) )
628 {
629 spin_unlock_irq(&sd->schedule_lock);
630 return continue_running(prev);
631 }
633 TRACE_2D(TRC_SCHED_SWITCH_INFPREV,
634 prev->domain->domain_id,
635 now - prev->runstate.state_entry_time);
636 TRACE_3D(TRC_SCHED_SWITCH_INFNEXT,
637 next->domain->domain_id,
638 (next->runstate.state == RUNSTATE_runnable) ?
639 (now - next->runstate.state_entry_time) : 0,
640 r_time);
642 ASSERT(prev->runstate.state == RUNSTATE_running);
643 vcpu_runstate_change(
644 prev,
645 (test_bit(_VCPUF_blocked, &prev->vcpu_flags) ? RUNSTATE_blocked :
646 (vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)),
647 now);
649 ASSERT(next->runstate.state != RUNSTATE_running);
650 vcpu_runstate_change(next, RUNSTATE_running, now);
652 ASSERT(!test_bit(_VCPUF_running, &next->vcpu_flags));
653 set_bit(_VCPUF_running, &next->vcpu_flags);
655 spin_unlock_irq(&sd->schedule_lock);
657 perfc_incrc(sched_ctx);
659 stop_timer(&prev->periodic_timer);
661 /* Ensure that the domain has an up-to-date time base. */
662 if ( !is_idle_vcpu(next) )
663 {
664 update_vcpu_system_time(next);
665 vcpu_periodic_timer_work(next);
666 }
668 TRACE_4D(TRC_SCHED_SWITCH,
669 prev->domain->domain_id, prev->vcpu_id,
670 next->domain->domain_id, next->vcpu_id);
672 context_switch(prev, next);
673 }
675 void context_saved(struct vcpu *prev)
676 {
677 clear_bit(_VCPUF_running, &prev->vcpu_flags);
679 if ( unlikely(test_bit(_VCPUF_migrating, &prev->vcpu_flags)) )
680 vcpu_migrate(prev);
681 }
683 /* The scheduler timer: force a run through the scheduler */
684 static void s_timer_fn(void *unused)
685 {
686 raise_softirq(SCHEDULE_SOFTIRQ);
687 perfc_incrc(sched_irq);
688 }
690 /* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
691 static void vcpu_periodic_timer_fn(void *data)
692 {
693 struct vcpu *v = data;
694 vcpu_periodic_timer_work(v);
695 }
697 /* Per-VCPU single-shot timer function: sends a virtual timer interrupt. */
698 static void vcpu_singleshot_timer_fn(void *data)
699 {
700 struct vcpu *v = data;
701 send_timer_event(v);
702 }
704 /* SCHEDOP_poll timeout callback. */
705 static void poll_timer_fn(void *data)
706 {
707 struct vcpu *v = data;
708 if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) )
709 vcpu_unblock(v);
710 }
712 /* Initialise the data structures. */
713 void __init scheduler_init(void)
714 {
715 int i;
717 open_softirq(SCHEDULE_SOFTIRQ, schedule);
719 for_each_cpu ( i )
720 {
721 spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
722 init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
723 }
725 for ( i = 0; schedulers[i] != NULL; i++ )
726 {
727 ops = *schedulers[i];
728 if ( strcmp(ops.opt_name, opt_sched) == 0 )
729 break;
730 }
732 if ( schedulers[i] == NULL )
733 printk("Could not find scheduler: %s\n", opt_sched);
735 printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
736 SCHED_OP(init);
737 }
739 void dump_runq(unsigned char key)
740 {
741 s_time_t now = NOW();
742 int i;
743 unsigned long flags;
745 local_irq_save(flags);
747 printk("Scheduler: %s (%s)\n", ops.name, ops.opt_name);
748 SCHED_OP(dump_settings);
749 printk("NOW=0x%08X%08X\n", (u32)(now>>32), (u32)now);
751 for_each_online_cpu ( i )
752 {
753 spin_lock(&per_cpu(schedule_data, i).schedule_lock);
754 printk("CPU[%02d] ", i);
755 SCHED_OP(dump_cpu_state, i);
756 spin_unlock(&per_cpu(schedule_data, i).schedule_lock);
757 }
759 local_irq_restore(flags);
760 }
762 #ifdef CONFIG_COMPAT
763 #include "compat/schedule.c"
764 #endif
766 #endif /* !COMPAT */
768 /*
769 * Local variables:
770 * mode: C
771 * c-set-style: "BSD"
772 * c-basic-offset: 4
773 * tab-width: 4
774 * indent-tabs-mode: nil
775 * End:
776 */