ia64/xen-unstable

changeset 785:b45bc774c22c

bitkeeper revision 1.478 (3f7da585sVsHrnjL3JadH9su8Cax_g)

Many files:
We now have one ring-0 stack per CPU, rather than one per domain. Register state is saved to a guest-accessible context slot, so we can do scheduler activations or similar real soon now...
author kaf24@scramble.cl.cam.ac.uk
date Fri Oct 03 16:36:21 2003 +0000 (2003-10-03)
parents 531015eb9ae7
children d666e315c859 75d9daa46c7a
files xen/arch/i386/boot/boot.S xen/arch/i386/entry.S xen/arch/i386/idle0_task.c xen/arch/i386/process.c xen/arch/i386/setup.c xen/arch/i386/smpboot.c xen/arch/i386/traps.c xen/common/kernel.c xen/common/schedule.c xen/include/asm-i386/current.h xen/include/asm-i386/irq.h xen/include/asm-i386/processor.h xen/include/asm-i386/ptrace.h xen/include/asm-i386/system.h xen/include/hypervisor-ifs/hypervisor-if.h xen/include/xeno/perfc_defn.h xen/include/xeno/sched.h
line diff
     1.1 --- a/xen/arch/i386/boot/boot.S	Wed Oct 01 15:54:32 2003 +0000
     1.2 +++ b/xen/arch/i386/boot/boot.S	Fri Oct 03 16:36:21 2003 +0000
     1.3 @@ -69,7 +69,7 @@ 1:      lss     stack_start-__PAGE_OFFSE
     1.4          
     1.5          call    start_paging
     1.6          lidt    idt_descr                        
     1.7 -        jmp     initialize_secondary
     1.8 +        jmp     start_secondary
     1.9  #endif
    1.10          
    1.11  continue_boot_cpu:
    1.12 @@ -212,7 +212,7 @@ 1:      jmp     1b
    1.13  /*** STACK LOCATION ***/
    1.14          
    1.15  ENTRY(stack_start)
    1.16 -        .long SYMBOL_NAME(idle0_task_union)+8192-__PAGE_OFFSET
    1.17 +        .long SYMBOL_NAME(cpu0_stack) + 4000 - __PAGE_OFFSET
    1.18          .long __HYPERVISOR_DS
    1.19          
    1.20  /*** DESCRIPTOR TABLES ***/
    1.21 @@ -258,7 +258,7 @@ ENTRY(gdt_table)
    1.22          .org 0x1000
    1.23  ENTRY(idle0_pg_table)
    1.24          .org 0x2000
    1.25 -ENTRY(idle0_task_union)
    1.26 -        .org 0x4000
    1.27 +ENTRY(cpu0_stack)
    1.28 +        .org 0x3000
    1.29  ENTRY(stext)
    1.30  ENTRY(_stext)
     2.1 --- a/xen/arch/i386/entry.S	Wed Oct 01 15:54:32 2003 +0000
     2.2 +++ b/xen/arch/i386/entry.S	Fri Oct 03 16:36:21 2003 +0000
     2.3 @@ -194,11 +194,12 @@ 6:      pushl %eax;     \
     2.4  	.long 5b,10b;	             \
     2.5  .previous
     2.6  
     2.7 -#define GET_CURRENT(reg)  \
     2.8 -	movl $-8192, reg; \
     2.9 -	andl %esp, reg
    2.10 +#define GET_CURRENT(reg)   \
    2.11 +	movl $4096-4, reg; \
    2.12 +        orl  %esp, reg;    \
    2.13 +        movl (reg),reg     \
    2.14  
    2.15 -ENTRY(ret_from_newdomain)
    2.16 +ENTRY(continue_nonidle_task)
    2.17  	GET_CURRENT(%ebx)
    2.18  	jmp test_all_events
    2.19  
     3.1 --- a/xen/arch/i386/idle0_task.c	Wed Oct 01 15:54:32 2003 +0000
     3.2 +++ b/xen/arch/i386/idle0_task.c	Fri Oct 03 16:36:21 2003 +0000
     3.3 @@ -2,12 +2,7 @@
     3.4  #include <xeno/sched.h>
     3.5  #include <asm/desc.h>
     3.6  
     3.7 -/*
     3.8 - * Initial task structure. XXX KAF: To get this 8192-byte aligned without
     3.9 - * linker tricks I copy it into aligned BSS area at boot time.
    3.10 - * Actual name idle0_task_union now declared in boot.S.
    3.11 - */
    3.12 -struct task_struct first_task_struct = IDLE0_TASK(idle0_task_union.task);
    3.13 +struct task_struct idle0_task = IDLE0_TASK(idle0_task);
    3.14  
    3.15  /*
    3.16   * per-CPU TSS segments. Threads are completely 'soft' on Linux,
     4.1 --- a/xen/arch/i386/process.c	Wed Oct 01 15:54:32 2003 +0000
     4.2 +++ b/xen/arch/i386/process.c	Fri Oct 03 16:36:21 2003 +0000
     4.3 @@ -31,11 +31,6 @@
     4.4  #include <xeno/irq.h>
     4.5  #include <xeno/event.h>
     4.6  
     4.7 -#define GET_SYSCALL_REGS(_p) \
     4.8 -    (((struct pt_regs *)(THREAD_SIZE + (unsigned long)(_p))) - 1)
     4.9 -
    4.10 -asmlinkage void ret_from_newdomain(void) __asm__("ret_from_newdomain");
    4.11 -
    4.12  int hlt_counter;
    4.13  
    4.14  void disable_hlt(void)
    4.15 @@ -63,16 +58,21 @@ static void default_idle(void)
    4.16      }
    4.17  }
    4.18  
    4.19 -/*
    4.20 - * The idle thread. There's no useful work to be
    4.21 - * done, so just try to conserve power and have a
    4.22 - * low exit latency (ie sit in a loop waiting for
    4.23 - * somebody to say that they'd like to reschedule)
    4.24 - */
    4.25 -void cpu_idle (void)
    4.26 +void continue_cpu_idle_loop(void)
    4.27  {
    4.28      int cpu = smp_processor_id();
    4.29 +    for ( ; ; )
    4.30 +    {
    4.31 +        irq_stat[cpu].idle_timestamp = jiffies;
    4.32 +        while (!current->hyp_events && !softirq_pending(cpu))
    4.33 +            default_idle();
    4.34 +        do_hyp_events();
    4.35 +        do_softirq();
    4.36 +    }
    4.37 +}
    4.38  
    4.39 +void startup_cpu_idle_loop(void)
    4.40 +{
    4.41      /* Just some sanity to ensure that the scheduler is set up okay. */
    4.42      ASSERT(current->domain == IDLE_DOMAIN_ID);
    4.43      (void)wake_up(current);
    4.44 @@ -85,14 +85,7 @@ void cpu_idle (void)
    4.45      smp_mb();
    4.46      init_idle();
    4.47  
    4.48 -    for ( ; ; )
    4.49 -    {
    4.50 -        irq_stat[cpu].idle_timestamp = jiffies;
    4.51 -        while (!current->hyp_events && !softirq_pending(cpu))
    4.52 -            default_idle();
    4.53 -        do_hyp_events();
    4.54 -        do_softirq();
    4.55 -    }
    4.56 +    continue_cpu_idle_loop();
    4.57  }
    4.58  
    4.59  static long no_idt[2];
    4.60 @@ -186,43 +179,6 @@ void machine_power_off(void)
    4.61      machine_restart(0);
    4.62  }
    4.63  
    4.64 -extern void show_trace(unsigned long* esp);
    4.65 -
    4.66 -void show_regs(struct pt_regs * regs)
    4.67 -{
    4.68 -    unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
    4.69 -
    4.70 -    printk("\n");
    4.71 -    printk("EIP: %04x:[<%08lx>] CPU: %d",0xffff & regs->xcs,regs->eip, smp_processor_id());
    4.72 -    if (regs->xcs & 3)
    4.73 -        printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
    4.74 -    printk(" EFLAGS: %08lx\n",regs->eflags);
    4.75 -    printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
    4.76 -           regs->eax,regs->ebx,regs->ecx,regs->edx);
    4.77 -    printk("ESI: %08lx EDI: %08lx EBP: %08lx",
    4.78 -           regs->esi, regs->edi, regs->ebp);
    4.79 -    printk(" DS: %04x ES: %04x FS: %04x GS: %04x\n",
    4.80 -           0xffff & regs->xds, 0xffff & regs->xes,
    4.81 -           0xffff & regs->xfs, 0xffff & regs->xgs);
    4.82 -
    4.83 -    __asm__("movl %%cr0, %0": "=r" (cr0));
    4.84 -    __asm__("movl %%cr2, %0": "=r" (cr2));
    4.85 -    __asm__("movl %%cr3, %0": "=r" (cr3));
    4.86 -    /* This could fault if %cr4 does not exist */
    4.87 -    __asm__("1: movl %%cr4, %0		\n"
    4.88 -            "2:				\n"
    4.89 -            ".section __ex_table,\"a\"	\n"
    4.90 -            ".long 1b,2b			\n"
    4.91 -            ".previous			\n"
    4.92 -            : "=r" (cr4): "0" (0));
    4.93 -    printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
    4.94 -    show_trace(&regs->esp);
    4.95 -}
    4.96 -
    4.97 -
    4.98 -/*
    4.99 - * Free current thread data structures etc..
   4.100 - */
   4.101  void exit_thread(void)
   4.102  {
   4.103      /* nothing to do ... */
   4.104 @@ -249,8 +205,7 @@ void new_thread(struct task_struct *p,
   4.105                  unsigned long start_stack,
   4.106                  unsigned long start_info)
   4.107  {
   4.108 -    struct pt_regs *regs = GET_SYSCALL_REGS(p);
   4.109 -    memset(regs, 0, sizeof(*regs));
   4.110 +    execution_context_t *ec = &p->shared_info->execution_context;
   4.111  
   4.112      /*
   4.113       * Initial register values:
   4.114 @@ -260,20 +215,14 @@ void new_thread(struct task_struct *p,
   4.115       *          ESI = start_info
   4.116       *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
   4.117       */
   4.118 -    p->thread.fs = p->thread.gs = FLAT_RING1_DS;
   4.119 -    regs->xds = regs->xes = regs->xfs = regs->xgs = regs->xss = FLAT_RING1_DS;
   4.120 -    regs->xcs = FLAT_RING1_CS;
   4.121 -    regs->eip = start_pc;
   4.122 -    regs->esp = start_stack;
   4.123 -    regs->esi = start_info;
   4.124 +    ec->ds = ec->es = ec->fs = ec->gs = ec->ss = FLAT_RING1_DS;
   4.125 +    ec->cs = FLAT_RING1_CS;
   4.126 +    ec->eip = start_pc;
   4.127 +    ec->esp = start_stack;
   4.128 +    ec->esi = start_info;
   4.129  
   4.130 -    p->thread.esp = (unsigned long) regs;
   4.131 -    p->thread.esp0 = (unsigned long) (regs+1);
   4.132 -
   4.133 -    p->thread.eip = (unsigned long) ret_from_newdomain;
   4.134 -
   4.135 -    __save_flags(regs->eflags);
   4.136 -    regs->eflags |= X86_EFLAGS_IF;
   4.137 +    __save_flags(ec->eflags);
   4.138 +    ec->eflags |= X86_EFLAGS_IF;
   4.139  
   4.140      /* No fast trap at start of day. */
   4.141      SET_DEFAULT_FAST_TRAP(&p->thread);
   4.142 @@ -288,34 +237,21 @@ void new_thread(struct task_struct *p,
   4.143  			: /* no output */ \
   4.144  			:"r" (thread->debugreg[register]))
   4.145  
   4.146 -/*
   4.147 - *	switch_to(x,yn) should switch tasks from x to y.
   4.148 - *
   4.149 - * We fsave/fwait so that an exception goes off at the right time
   4.150 - * (as a call from the fsave or fwait in effect) rather than to
   4.151 - * the wrong process. Lazy FP saving no longer makes any sense
   4.152 - * with modern CPU's, and this simplifies a lot of things (SMP
   4.153 - * and UP become the same).
   4.154 - *
   4.155 - * NOTE! We used to use the x86 hardware context switching. The
   4.156 - * reason for not using it any more becomes apparent when you
   4.157 - * try to recover gracefully from saved state that is no longer
   4.158 - * valid (stale segment register values in particular). With the
   4.159 - * hardware task-switch, there is no way to fix up bad state in
   4.160 - * a reasonable manner.
   4.161 - *
   4.162 - * The fact that Intel documents the hardware task-switching to
   4.163 - * be slow is a fairly red herring - this code is not noticeably
   4.164 - * faster. However, there _is_ some room for improvement here,
   4.165 - * so the performance issues may eventually be a valid point.
   4.166 - * More important, however, is the fact that this allows us much
   4.167 - * more flexibility.
   4.168 - */
   4.169 -/* NB. prev_p passed in %eax, next_p passed in %edx */
   4.170 -void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
   4.171 +void switch_to(struct task_struct *prev_p, struct task_struct *next_p)
   4.172  {
   4.173      struct thread_struct *next = &next_p->thread;
   4.174      struct tss_struct *tss = init_tss + smp_processor_id();
   4.175 +    execution_context_t *stack_ec = get_execution_context();
   4.176 +
   4.177 +    __cli();
   4.178 +
   4.179 +    /* Switch guest general-register state. */
   4.180 +    memcpy(&prev_p->shared_info->execution_context, 
   4.181 +           stack_ec, 
   4.182 +           sizeof(*stack_ec));
   4.183 +    memcpy(stack_ec,
   4.184 +           &next_p->shared_info->execution_context,
   4.185 +           sizeof(*stack_ec));
   4.186  
   4.187      unlazy_fpu(prev_p);
   4.188  
   4.189 @@ -323,18 +259,22 @@ void __switch_to(struct task_struct *pre
   4.190      CLEAR_FAST_TRAP(&prev_p->thread);
   4.191      SET_FAST_TRAP(&next_p->thread);
   4.192  
   4.193 -    tss->esp0 = next->esp0;
   4.194 +    /* Switch the guest OS ring-1 stack. */
   4.195      tss->esp1 = next->esp1;
   4.196      tss->ss1  = next->ss1;
   4.197  
   4.198 +    /* Switch page tables.  */
   4.199 +    __write_cr3_counted(pagetable_val(next_p->mm.pagetable));
   4.200 +
   4.201 +    set_current(next_p);
   4.202 +
   4.203      /* Switch GDT and LDT. */
   4.204      __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->mm.gdt));
   4.205      load_LDT();
   4.206  
   4.207 -    /*
   4.208 -     * Now maybe reload the debug registers
   4.209 -     */
   4.210 -    if (next->debugreg[7]){
   4.211 +    /* Maybe switch the debug registers. */
   4.212 +    if ( next->debugreg[7] )
   4.213 +    {
   4.214          loaddebug(next, 0);
   4.215          loaddebug(next, 1);
   4.216          loaddebug(next, 2);
   4.217 @@ -344,13 +284,14 @@ void __switch_to(struct task_struct *pre
   4.218          loaddebug(next, 7);
   4.219      }
   4.220  
   4.221 +    __sti();
   4.222  }
   4.223  
   4.224  
   4.225  /* XXX Currently the 'domain' field is ignored! XXX */
   4.226  long do_iopl(unsigned int domain, unsigned int new_io_pl)
   4.227  {
   4.228 -    struct pt_regs *regs = GET_SYSCALL_REGS(current);
   4.229 -    regs->eflags = (regs->eflags & 0xffffcfff) | ((new_io_pl&3) << 12);
   4.230 +    execution_context_t *ec = get_execution_context();
   4.231 +    ec->eflags = (ec->eflags & 0xffffcfff) | ((new_io_pl&3) << 12);
   4.232      return 0;
   4.233  }
     5.1 --- a/xen/arch/i386/setup.c	Wed Oct 01 15:54:32 2003 +0000
     5.2 +++ b/xen/arch/i386/setup.c	Fri Oct 03 16:36:21 2003 +0000
     5.3 @@ -254,7 +254,7 @@ void __init cpu_init(void)
     5.4  
     5.5      /* Set up and load the per-CPU TSS and LDT. */
     5.6      t->ss0  = __HYPERVISOR_DS;
     5.7 -    t->esp0 = current->thread.esp0;
     5.8 +    t->esp0 = get_stack_top();
     5.9      set_tss_desc(nr,t);
    5.10      load_TR(nr);
    5.11      __asm__ __volatile__("lldt %%ax"::"a" (0));
    5.12 @@ -414,14 +414,11 @@ void __init start_of_day(void)
    5.13      check_nmi_watchdog();
    5.14  
    5.15      zap_low_mappings();
    5.16 -    kmem_cache_init();
    5.17 -    kmem_cache_sizes_init(max_page);
    5.18 +
    5.19  #ifdef CONFIG_PCI
    5.20      pci_init();
    5.21  #endif
    5.22      do_initcalls();
    5.23 -
    5.24 -
    5.25      initialize_serial();   /* setup serial 'driver' (for debugging) */
    5.26      initialize_keyboard(); /* setup keyboard (also for debugging)   */
    5.27  
     6.1 --- a/xen/arch/i386/smpboot.c	Wed Oct 01 15:54:32 2003 +0000
     6.2 +++ b/xen/arch/i386/smpboot.c	Fri Oct 03 16:36:21 2003 +0000
     6.3 @@ -395,19 +395,21 @@ void __init smp_callin(void)
     6.4      synchronize_tsc_ap();
     6.5  }
     6.6  
     6.7 -int cpucount;
     6.8 +static int cpucount;
     6.9  
    6.10  /*
    6.11   * Activate a secondary processor.
    6.12   */
    6.13 -int __init start_secondary(void *unused)
    6.14 +void __init start_secondary(void)
    6.15  {
    6.16 -    unsigned int cpu = smp_processor_id();
    6.17 +    unsigned int cpu = cpucount;
    6.18      /* 6 bytes suitable for passing to LIDT instruction. */
    6.19      unsigned char idt_load[6];
    6.20  
    6.21      extern void cpu_init(void);
    6.22  
    6.23 +    set_current(idle_task[cpu]);
    6.24 +
    6.25      /*
    6.26       * Dont put anything before smp_callin(), SMP
    6.27       * booting is too fragile that we want to limit the
    6.28 @@ -435,34 +437,13 @@ int __init start_secondary(void *unused)
    6.29       */
    6.30      local_flush_tlb();
    6.31  
    6.32 -    cpu_idle();
    6.33 -    BUG();
    6.34 -
    6.35 -    return 0;
    6.36 -}
    6.37 +    startup_cpu_idle_loop();
    6.38  
    6.39 -/*
    6.40 - * Everything has been set up for the secondary
    6.41 - * CPUs - they just need to reload everything
    6.42 - * from the task structure
    6.43 - * This function must not return.
    6.44 - */
    6.45 -void __init initialize_secondary(void)
    6.46 -{
    6.47 -    /*
    6.48 -     * We don't actually need to load the full TSS,
    6.49 -     * basically just the stack pointer and the eip.
    6.50 -     */
    6.51 -    asm volatile(
    6.52 -        "movl %0,%%esp\n\t"
    6.53 -        "jmp *%1"
    6.54 -        :
    6.55 -        :"r" (current->thread.esp),"r" (current->thread.eip));
    6.56 +    BUG();
    6.57  }
    6.58  
    6.59  extern struct {
    6.60 -    void * esp;
    6.61 -    unsigned short ss;
    6.62 +    unsigned long esp, ss;
    6.63  } stack_start;
    6.64  
    6.65  /* which physical APIC ID maps to which logical CPU number */
    6.66 @@ -688,9 +669,7 @@ static void __init do_boot_cpu (int apic
    6.67      l2_pgentry_t *pagetable;
    6.68  
    6.69      cpu = ++cpucount;
    6.70 -    /*
    6.71 -     * We can't use kernel_thread since we must avoid to reschedule the child.
    6.72 -     */
    6.73 +
    6.74      if ( (idle = do_newdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
    6.75          panic("failed 'newdomain' for CPU %d", cpu);
    6.76   
    6.77 @@ -701,9 +680,6 @@ static void __init do_boot_cpu (int apic
    6.78  
    6.79      map_cpu_to_boot_apicid(cpu, apicid);
    6.80  
    6.81 -    idle->thread.esp = idle->thread.esp0 = (unsigned long)idle + THREAD_SIZE;
    6.82 -    idle->thread.eip = (unsigned long) start_secondary;
    6.83 -
    6.84      SET_DEFAULT_FAST_TRAP(&idle->thread);
    6.85  
    6.86      idle_task[cpu] = idle;
    6.87 @@ -713,7 +689,7 @@ static void __init do_boot_cpu (int apic
    6.88  
    6.89      /* So we see what's up   */
    6.90      printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
    6.91 -    stack_start.esp = (void *) (1024+PAGE_SIZE+(char *)idle-__PAGE_OFFSET);
    6.92 +    stack_start.esp = __pa(get_free_page(GFP_KERNEL)) + 4000;
    6.93  
    6.94      /*
    6.95       * This grunge runs the startup process for
    6.96 @@ -735,7 +711,8 @@ static void __init do_boot_cpu (int apic
    6.97      /*
    6.98       * Be paranoid about clearing APIC errors.
    6.99       */
   6.100 -    if (APIC_INTEGRATED(apic_version[apicid])) {
   6.101 +    if ( APIC_INTEGRATED(apic_version[apicid]) )
   6.102 +    {
   6.103          apic_read_around(APIC_SPIV);
   6.104          apic_write(APIC_ESR, 0);
   6.105          apic_read(APIC_ESR);
   6.106 @@ -774,8 +751,8 @@ static void __init do_boot_cpu (int apic
   6.107              printk("CPU%d has booted.\n", cpu);
   6.108          } else {
   6.109              boot_error= 1;
   6.110 -            if (*((volatile unsigned char *)phys_to_virt(8192))
   6.111 -                == 0xA5)
   6.112 +            if (*((volatile unsigned long *)phys_to_virt(start_eip))
   6.113 +                == 0xA5A5A5A5)
   6.114  				/* trampoline started but...? */
   6.115                  printk("Stuck ??\n");
   6.116              else
   6.117 @@ -794,9 +771,6 @@ static void __init do_boot_cpu (int apic
   6.118          clear_bit(cpu, &cpu_online_map);  /* was set in smp_callin() */
   6.119          cpucount--;
   6.120      }
   6.121 -
   6.122 -    /* mark "stuck" area as not stuck */
   6.123 -    *((volatile unsigned long *)phys_to_virt(8192)) = 0;
   6.124  }
   6.125  
   6.126  
     7.1 --- a/xen/arch/i386/traps.c	Wed Oct 01 15:54:32 2003 +0000
     7.2 +++ b/xen/arch/i386/traps.c	Fri Oct 03 16:36:21 2003 +0000
     7.3 @@ -110,12 +110,9 @@ void show_trace(unsigned long * stack)
     7.4      int i;
     7.5      unsigned long addr;
     7.6  
     7.7 -    if (!stack)
     7.8 -        stack = (unsigned long*)&stack;
     7.9 -
    7.10      printk("Call Trace: ");
    7.11      i = 1;
    7.12 -    while (((long) stack & (THREAD_SIZE-1)) != 0) {
    7.13 +    while (((long) stack & (STACK_SIZE-1)) != 0) {
    7.14          addr = *stack++;
    7.15          if (kernel_text_address(addr)) {
    7.16              if (i && ((i % 6) == 0))
    7.17 @@ -127,30 +124,17 @@ void show_trace(unsigned long * stack)
    7.18      printk("\n");
    7.19  }
    7.20  
    7.21 -void show_trace_task(struct task_struct *tsk)
    7.22 -{
    7.23 -    unsigned long esp = tsk->thread.esp;
    7.24 -
    7.25 -    /* User space on another CPU? */
    7.26 -    if ((esp ^ (unsigned long)tsk) & (PAGE_MASK<<1))
    7.27 -        return;
    7.28 -    show_trace((unsigned long *)esp);
    7.29 -}
    7.30 -
    7.31 -void show_stack(unsigned long * esp)
    7.32 +void show_stack(unsigned long *esp)
    7.33  {
    7.34      unsigned long *stack;
    7.35      int i;
    7.36  
    7.37 -    if ( esp == NULL )
    7.38 -        esp = (unsigned long *)&esp;
    7.39 -
    7.40      printk("Stack trace from ESP=%p:\n", esp);
    7.41  
    7.42      stack = esp;
    7.43      for ( i = 0; i < kstack_depth_to_print; i++ )
    7.44      {
    7.45 -        if ( ((long)stack & (THREAD_SIZE-1)) == 0 )
    7.46 +        if ( ((long)stack & (STACK_SIZE-1)) == 0 )
    7.47              break;
    7.48          if ( i && ((i % 8) == 0) )
    7.49              printk("\n       ");
     8.1 --- a/xen/common/kernel.c	Wed Oct 01 15:54:32 2003 +0000
     8.2 +++ b/xen/common/kernel.c	Fri Oct 03 16:36:21 2003 +0000
     8.3 @@ -30,6 +30,8 @@
     8.4  #include <xeno/console.h>
     8.5  #include <xeno/net_headers.h>
     8.6  
     8.7 +kmem_cache_t *task_struct_cachep;
     8.8 +
     8.9  static int xpos, ypos;
    8.10  static volatile unsigned char *video;
    8.11  
    8.12 @@ -176,7 +178,7 @@ void cmain (unsigned long magic, multibo
    8.13          for ( ; ; ) ;
    8.14      }
    8.15  
    8.16 -    memcpy(&idle0_task_union, &first_task_struct, sizeof(first_task_struct));
    8.17 +    set_current(&idle0_task);
    8.18  
    8.19      max_page = (mbi->mem_upper+1024) >> (PAGE_SHIFT - 10);
    8.20      init_frametable(max_page);
    8.21 @@ -190,6 +192,16 @@ void cmain (unsigned long magic, multibo
    8.22      memset(current->shared_info, 0, sizeof(shared_info_t));
    8.23      set_fs(USER_DS);
    8.24  
    8.25 +    /* Initialise the slab allocator. */
    8.26 +    kmem_cache_init();
    8.27 +    kmem_cache_sizes_init(max_page);
    8.28 +
    8.29 +    task_struct_cachep = kmem_cache_create(
    8.30 +        "task_struct_cache", sizeof(struct task_struct),
    8.31 +        0, SLAB_HWCACHE_ALIGN, NULL, NULL);
    8.32 +    if ( task_struct_cachep == NULL )
    8.33 +        panic("No slab cache for task structs.");
    8.34 +
    8.35      start_of_day();
    8.36  
    8.37      /* Create initial domain 0. */
    8.38 @@ -215,7 +227,7 @@ void cmain (unsigned long magic, multibo
    8.39      update_dom_time(new_dom->shared_info);
    8.40      wake_up(new_dom);
    8.41  
    8.42 -    cpu_idle();
    8.43 +    startup_cpu_idle_loop();
    8.44  }
    8.45  
    8.46  
     9.1 --- a/xen/common/schedule.c	Wed Oct 01 15:54:32 2003 +0000
     9.2 +++ b/xen/common/schedule.c	Fri Oct 03 16:36:21 2003 +0000
     9.3 @@ -46,7 +46,7 @@ typedef struct schedule_data_st
     9.4  {
     9.5      spinlock_t          lock;           /* lock for protecting this */
     9.6      struct list_head    runqueue;       /* runqueue */
     9.7 -    struct task_struct *prev, *curr;    /* previous and current task */
     9.8 +    struct task_struct *curr;           /* current task */
     9.9      struct task_struct *idle;           /* idle task for this cpu */
    9.10      u32                 svt;            /* system virtual time. per CPU??? */
    9.11      struct ac_timer     s_timer;        /* scheduling timer  */
    9.12 @@ -254,7 +254,7 @@ void reschedule(struct task_struct *p)
    9.13      unsigned long flags;
    9.14      s_time_t now, min_time;
    9.15  
    9.16 -    if (p->has_cpu)
    9.17 +    if ( p->has_cpu )
    9.18          return;
    9.19  
    9.20      spin_lock_irqsave(&schedule_data[cpu].lock, flags);
    9.21 @@ -264,25 +264,25 @@ void reschedule(struct task_struct *p)
    9.22      /* domain should run at least for ctx_allow */
    9.23      min_time = curr->lastschd + ctx_allow;
    9.24  
    9.25 -    if ( is_idle_task(curr) || (min_time <= now) ) {
    9.26 +    if ( is_idle_task(curr) || (min_time <= now) )
    9.27 +    {
    9.28          /* reschedule */
    9.29          set_bit(_HYP_EVENT_NEED_RESCHED, &curr->hyp_events);
    9.30  
    9.31          spin_unlock_irqrestore(&schedule_data[cpu].lock, flags);
    9.32  
    9.33 -        if (cpu != smp_processor_id())
    9.34 +        if ( cpu != smp_processor_id() )
    9.35              smp_send_event_check_cpu(cpu);
    9.36 +
    9.37          return;
    9.38      }
    9.39  
    9.40      /* current hasn't been running for long enough -> reprogram timer.
    9.41       * but don't bother if timer would go off soon anyway */
    9.42 -    if (schedule_data[cpu].s_timer.expires > min_time + TIME_SLOP) {
    9.43 +    if ( schedule_data[cpu].s_timer.expires > min_time + TIME_SLOP )
    9.44          mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
    9.45 -    }
    9.46      
    9.47      spin_unlock_irqrestore(&schedule_data[cpu].lock, flags);
    9.48 -    return;
    9.49  }
    9.50  
    9.51  
    9.52 @@ -304,9 +304,7 @@ asmlinkage void schedule(void)
    9.53      s32                 mcus;
    9.54      u32                 next_evt, next_prime_evt, min_avt;
    9.55  
    9.56 -    perfc_incrc(sched_run1);
    9.57 - need_resched_back:
    9.58 -    perfc_incrc(sched_run2);
    9.59 +    perfc_incrc(sched_run);
    9.60  
    9.61      prev = current;
    9.62      next = NULL;
    9.63 @@ -325,7 +323,7 @@ asmlinkage void schedule(void)
    9.64      ASSERT(!in_interrupt());
    9.65      ASSERT(__task_on_runqueue(prev));
    9.66  
    9.67 -    if (is_idle_task(prev)) 
    9.68 +    if ( is_idle_task(prev) ) 
    9.69          goto deschedule_done;
    9.70  
    9.71      /* do some accounting */
    9.72 @@ -343,9 +341,12 @@ asmlinkage void schedule(void)
    9.73  
    9.74      /* dequeue */
    9.75      __del_from_runqueue(prev);
    9.76 -    switch (prev->state) {
    9.77 +    
    9.78 +    switch ( prev->state )
    9.79 +    {
    9.80      case TASK_INTERRUPTIBLE:
    9.81 -        if (signal_pending(prev)) {
    9.82 +        if ( signal_pending(prev) )
    9.83 +        {
    9.84              prev->state = TASK_RUNNING; /* but has events pending */
    9.85              break;
    9.86          }
    9.87 @@ -362,7 +363,6 @@ asmlinkage void schedule(void)
    9.88      /* requeue */
    9.89      __add_to_runqueue_tail(prev);
    9.90      
    9.91 -
    9.92   deschedule_done:
    9.93      clear_bit(_HYP_EVENT_NEED_RESCHED, &prev->hyp_events);
    9.94  
    9.95 @@ -456,7 +456,6 @@ asmlinkage void schedule(void)
    9.96      prev->has_cpu = 0;
    9.97      next->has_cpu = 1;
    9.98  
    9.99 -    schedule_data[this_cpu].prev = prev;
   9.100      schedule_data[this_cpu].curr = next;
   9.101  
   9.102      next->lastschd = now;
   9.103 @@ -472,7 +471,8 @@ asmlinkage void schedule(void)
   9.104      {
   9.105          /* We won't go through the normal tail, so do this by hand */
   9.106          prev->policy &= ~SCHED_YIELD;
   9.107 -        goto same_process;
   9.108 +        update_dom_time(prev->shared_info);
   9.109 +        return;
   9.110      }
   9.111  
   9.112      perfc_incrc(sched_ctx);
   9.113 @@ -489,23 +489,17 @@ asmlinkage void schedule(void)
   9.114      }
   9.115  #endif
   9.116  
   9.117 -
   9.118 -    prepare_to_switch();
   9.119      switch_to(prev, next);
   9.120 -    prev = schedule_data[this_cpu].prev;
   9.121      
   9.122      prev->policy &= ~SCHED_YIELD;
   9.123      if ( prev->state == TASK_DYING ) 
   9.124          put_task_struct(prev);
   9.125  
   9.126 - same_process:
   9.127 -    /* update the domains notion of time  */
   9.128 -    update_dom_time(current->shared_info);
   9.129 +    update_dom_time(next->shared_info);
   9.130  
   9.131 -    if ( test_bit(_HYP_EVENT_NEED_RESCHED, &current->hyp_events) ) {
   9.132 -        goto need_resched_back;
   9.133 -    }
   9.134 -    return;
   9.135 +    schedule_tail(next);
   9.136 +
   9.137 +    BUG();
   9.138  }
   9.139  
   9.140  /* No locking needed -- pointer comparison is safe :-) */
   9.141 @@ -566,7 +560,6 @@ void __init scheduler_init(void)
   9.142      {
   9.143          INIT_LIST_HEAD(&schedule_data[i].runqueue);
   9.144          spin_lock_init(&schedule_data[i].lock);
   9.145 -        schedule_data[i].prev = &idle0_task;
   9.146          schedule_data[i].curr = &idle0_task;
   9.147          
   9.148          /* a timer for each CPU  */
    10.1 --- a/xen/include/asm-i386/current.h	Wed Oct 01 15:54:32 2003 +0000
    10.2 +++ b/xen/include/asm-i386/current.h	Fri Oct 03 16:36:21 2003 +0000
    10.3 @@ -5,11 +5,42 @@ struct task_struct;
    10.4  
    10.5  static inline struct task_struct * get_current(void)
    10.6  {
    10.7 -	struct task_struct *current;
    10.8 -	__asm__("andl %%esp,%0; ":"=r" (current) : "0" (~8191UL));
    10.9 -	return current;
   10.10 - }
   10.11 +    struct task_struct *current;
   10.12 +    __asm__ ( "orl %%esp,%0; movl (%0),%0" 
   10.13 +              : "=r" (current) : "0" (4092UL) );
   10.14 +    return current;
   10.15 +}
   10.16   
   10.17  #define current get_current()
   10.18  
   10.19 +static inline void set_current(struct task_struct *p)
   10.20 +{
   10.21 +    __asm__ ( "orl %%esp,%0; movl %1,(%0)" 
   10.22 +              : : "r" (4092UL), "r" (p) );    
   10.23 +}
   10.24 +
   10.25 +static inline execution_context_t *get_execution_context(void)
   10.26 +{
   10.27 +    execution_context_t *execution_context;
   10.28 +    __asm__ ( "andl %%esp,%0; addl $4096-72,%0"
   10.29 +              : "=r" (execution_context) : "0" (~4095UL) );
   10.30 +    return execution_context;
   10.31 +}
   10.32 +
   10.33 +static inline unsigned long get_stack_top(void)
   10.34 +{
   10.35 +    unsigned long p;
   10.36 +    __asm__ ( "orl %%esp,%0" 
   10.37 +              : "=r" (p) : "0" (4092UL) );
   10.38 +    return p;
   10.39 +}
   10.40 +
   10.41 +#define schedule_tail(_p)                                         \
   10.42 +    __asm__ __volatile__ (                                        \
   10.43 +        "andl %%esp,%0; addl $4096-72,%0; movl %0,%%esp; jmp *%1" \
   10.44 +        : : "r" (~4095UL), "r" (unlikely(is_idle_task((_p))) ?    \
   10.45 +                                continue_cpu_idle_loop :          \
   10.46 +                                continue_nonidle_task) )
   10.47 +
   10.48 +
   10.49  #endif /* !(_I386_CURRENT_H) */
    11.1 --- a/xen/include/asm-i386/irq.h	Wed Oct 01 15:54:32 2003 +0000
    11.2 +++ b/xen/include/asm-i386/irq.h	Fri Oct 03 16:36:21 2003 +0000
    11.3 @@ -120,10 +120,6 @@ extern char _stext, _etext;
    11.4  #define IRQ_NAME2(nr) nr##_interrupt(void)
    11.5  #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
    11.6  
    11.7 -#define GET_CURRENT \
    11.8 -	"movl %esp, %ebx\n\t" \
    11.9 -	"andl $-8192, %ebx\n\t"
   11.10 -
   11.11  /*
   11.12   *	SMP has a few special interrupts for IPI messages
   11.13   */
    12.1 --- a/xen/include/asm-i386/processor.h	Wed Oct 01 15:54:32 2003 +0000
    12.2 +++ b/xen/include/asm-i386/processor.h	Fri Oct 03 16:36:21 2003 +0000
    12.3 @@ -340,16 +340,9 @@ struct tss_struct {
    12.4  };
    12.5  
    12.6  struct thread_struct {
    12.7 -    unsigned long	esp0; /* top of the stack */
    12.8 -    unsigned long	eip;  /* in kernel space, saved on task switch */
    12.9 -    unsigned long	esp;  /* "" */
   12.10 -    unsigned long	fs;   /* "" (NB. DS/ES constant in mon, so no save) */
   12.11 -    unsigned long	gs;   /* "" ("") */
   12.12      unsigned long esp1, ss1;
   12.13  /* Hardware debugging registers */
   12.14      unsigned long	debugreg[8];  /* %%db0-7 debug registers */
   12.15 -/* fault info */
   12.16 -    unsigned long	cr2, trap_no, error_code;
   12.17  /* floating point info */
   12.18      union i387_union	i387;
   12.19  /* Trap info. */
   12.20 @@ -376,10 +369,8 @@ extern struct desc_struct *idt_tables[];
   12.21       &((_p)->fast_trap_desc), 8))
   12.22  
   12.23  #define INIT_THREAD  {						\
   12.24 -	sizeof(idle0_stack) + (long) &idle0_stack, /* esp0 */   \
   12.25 -	0, 0, 0, 0, 0, 0,		      			\
   12.26 +	0, 0,		      					\
   12.27  	{ [0 ... 7] = 0 },	/* debugging registers */	\
   12.28 -	0, 0, 0,						\
   12.29  	{ { 0, }, },		/* 387 state */			\
   12.30  	0x20, { 0, 0 },		/* DEFAULT_FAST_TRAP */		\
   12.31  	{ {0} }			/* io permissions */		\
   12.32 @@ -387,8 +378,8 @@ extern struct desc_struct *idt_tables[];
   12.33  
   12.34  #define INIT_TSS  {						\
   12.35  	0,0, /* back_link, __blh */				\
   12.36 -	sizeof(idle0_stack) + (long) &idle0_stack, /* esp0 */	\
   12.37 -	__HYPERVISOR_DS, 0, /* ss0 */				\
   12.38 +	0, /* esp0 */						\
   12.39 +	0, 0, /* ss0 */						\
   12.40  	0,0,0,0,0,0, /* stack1, stack2 */			\
   12.41  	0, /* cr3 */						\
   12.42  	0,0, /* eip,eflags */					\
   12.43 @@ -416,29 +407,10 @@ extern int kernel_thread(int (*fn)(void 
   12.44  extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
   12.45  extern void release_segments(struct mm_struct * mm);
   12.46  
   12.47 -/*
   12.48 - * Return saved PC of a blocked thread.
   12.49 - */
   12.50 -static inline unsigned long thread_saved_pc(struct thread_struct *t)
   12.51 -{
   12.52 -    return ((unsigned long *)t->esp)[3];
   12.53 -}
   12.54 -
   12.55  unsigned long get_wchan(struct task_struct *p);
   12.56  #define KSTK_EIP(tsk)	(((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
   12.57  #define KSTK_ESP(tsk)	(((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
   12.58  
   12.59 -#define THREAD_SIZE (2*PAGE_SIZE)
   12.60 -#define alloc_task_struct()  \
   12.61 -  ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
   12.62 -#define put_task_struct(_p) \
   12.63 -  if ( atomic_dec_and_test(&(_p)->refcnt) ) release_task(_p)
   12.64 -#define get_task_struct(_p)  \
   12.65 -  atomic_inc(&(_p)->refcnt)
   12.66 -
   12.67 -#define idle0_task	(idle0_task_union.task)
   12.68 -#define idle0_stack	(idle0_task_union.stack)
   12.69 -
   12.70  struct microcode {
   12.71      unsigned int hdrver;
   12.72      unsigned int rev;
    13.1 --- a/xen/include/asm-i386/ptrace.h	Wed Oct 01 15:54:32 2003 +0000
    13.2 +++ b/xen/include/asm-i386/ptrace.h	Fri Oct 03 16:36:21 2003 +0000
    13.3 @@ -46,7 +46,6 @@ enum EFLAGS {
    13.4  
    13.5  #ifdef __KERNEL__
    13.6  #define user_mode(regs) ((3 & (regs)->xcs))
    13.7 -extern void show_regs(struct pt_regs *);
    13.8  #endif
    13.9  
   13.10  #endif
    14.1 --- a/xen/include/asm-i386/system.h	Wed Oct 01 15:54:32 2003 +0000
    14.2 +++ b/xen/include/asm-i386/system.h	Fri Oct 03 16:36:21 2003 +0000
    14.3 @@ -5,34 +5,8 @@
    14.4  #include <asm/bitops.h>
    14.5  
    14.6  struct task_struct;
    14.7 -extern void FASTCALL(__switch_to(struct task_struct *prev, 
    14.8 -                                 struct task_struct *next));
    14.9 -
   14.10 -#define prepare_to_switch()	do { } while(0)
   14.11 -#define switch_to(prev,next) do {					\
   14.12 -	asm volatile("pushl %%ebp\n\t"					\
   14.13 -                     "pushl %%ebx\n\t"                                  \
   14.14 -                     "pushl %%esi\n\t"                                  \
   14.15 -                     "pushl %%edi\n\t"                                  \
   14.16 -		     "movl %%esp,%0\n\t"	/* save ESP */		\
   14.17 -                     "cli\n\t"                                          \
   14.18 -		     "movl %2,%%esp\n\t"	/* restore ESP */	\
   14.19 -                     "movl %6,%%cr3\n\t"        /* restore pagetables */\
   14.20 -                     "sti\n\t"                                          \
   14.21 -		     "movl $1f,%1\n\t"		/* save EIP */		\
   14.22 -		     "pushl %3\n\t"		/* restore EIP */	\
   14.23 -		     "jmp __switch_to\n"				\
   14.24 -		     "1:\t"     					\
   14.25 -		     "popl %%edi\n\t"					\
   14.26 -		     "popl %%esi\n\t"					\
   14.27 -		     "popl %%ebx\n\t"					\
   14.28 -		     "popl %%ebp\n\t"					\
   14.29 -		     :"=m" (prev->thread.esp),"=m" (prev->thread.eip)	\
   14.30 -		     :"m" (next->thread.esp),"m" (next->thread.eip),	\
   14.31 -		      "a" (prev), "d" (next),				\
   14.32 -                      "c" (pagetable_val(next->mm.pagetable))		\
   14.33 -                     :"memory");                                        \
   14.34 -} while (0)
   14.35 +extern void switch_to(struct task_struct *prev, 
   14.36 +                      struct task_struct *next);
   14.37  
   14.38  /* Clear and set 'TS' bit respectively */
   14.39  #define clts() __asm__ __volatile__ ("clts")
    15.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h	Wed Oct 01 15:54:32 2003 +0000
    15.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h	Fri Oct 03 16:36:21 2003 +0000
    15.3 @@ -186,6 +186,27 @@ typedef struct
    15.4      unsigned long args[7];
    15.5  } multicall_entry_t;
    15.6  
    15.7 +typedef struct
    15.8 +{
    15.9 +    unsigned long ebx;
   15.10 +    unsigned long ecx;
   15.11 +    unsigned long edx;
   15.12 +    unsigned long esi;
   15.13 +    unsigned long edi;
   15.14 +    unsigned long ebp;
   15.15 +    unsigned long eax;
   15.16 +    unsigned long ds;
   15.17 +    unsigned long es;
   15.18 +    unsigned long fs;
   15.19 +    unsigned long gs;
   15.20 +    unsigned long _unused;
   15.21 +    unsigned long eip;
   15.22 +    unsigned long cs;
   15.23 +    unsigned long eflags;
   15.24 +    unsigned long esp;
   15.25 +    unsigned long ss;
   15.26 +} execution_context_t;
   15.27 +
   15.28  /*
   15.29   * Xen/guestos shared data -- pointer provided in start_info.
   15.30   * NB. We expect that this struct is smaller than a page.
   15.31 @@ -241,6 +262,8 @@ typedef struct shared_info_st {
   15.32       */
   15.33      net_idx_t net_idx[MAX_DOMAIN_VIFS];
   15.34  
   15.35 +    execution_context_t execution_context;
   15.36 +
   15.37  } shared_info_t;
   15.38  
   15.39  /*
    16.1 --- a/xen/include/xeno/perfc_defn.h	Wed Oct 01 15:54:32 2003 +0000
    16.2 +++ b/xen/include/xeno/perfc_defn.h	Fri Oct 03 16:36:21 2003 +0000
    16.3 @@ -5,8 +5,7 @@ PERFCOUNTER_CPU( irq_time,     "cycles s
    16.4  PERFCOUNTER_CPU( apic_timer,   "apic timer interrupts" )
    16.5  PERFCOUNTER_CPU( ac_timer_max, "ac_timer max error (ns)" )
    16.6  PERFCOUNTER_CPU( sched_irq,    "sched: timer" )
    16.7 -PERFCOUNTER_CPU( sched_run1,   "sched: calls to schedule" )
    16.8 -PERFCOUNTER_CPU( sched_run2,   "sched: runs through scheduler" )
    16.9 +PERFCOUNTER_CPU( sched_run,    "sched: runs through scheduler" )
   16.10  PERFCOUNTER_CPU( sched_ctx,    "sched: context switches" )
   16.11  
   16.12  PERFCOUNTER( net_hypercalls, "network hypercalls" )
    17.1 --- a/xen/include/xeno/sched.h	Wed Oct 01 15:54:32 2003 +0000
    17.2 +++ b/xen/include/xeno/sched.h	Fri Oct 03 16:36:21 2003 +0000
    17.3 @@ -16,6 +16,7 @@
    17.4  #include <xeno/time.h>
    17.5  #include <xeno/ac_timer.h>
    17.6  #include <xeno/delay.h>
    17.7 +#include <xeno/slab.h>
    17.8  
    17.9  #define MAX_DOMAIN_NAME 16
   17.10  
   17.11 @@ -209,21 +210,21 @@ struct task_struct
   17.12      next_task:   &(_t)           \
   17.13  }
   17.14  
   17.15 +extern struct task_struct idle0_task;
   17.16 +
   17.17  extern struct task_struct *idle_task[NR_CPUS];
   17.18  #define IDLE_DOMAIN_ID   (~0)
   17.19  #define is_idle_task(_p) ((_p)->domain == IDLE_DOMAIN_ID)
   17.20  
   17.21 -#ifndef IDLE0_TASK_SIZE
   17.22 -#define IDLE0_TASK_SIZE 2048*sizeof(long)
   17.23 -#endif
   17.24 +#define STACK_SIZE PAGE_SIZE
   17.25  
   17.26 -union task_union {
   17.27 -    struct task_struct task;
   17.28 -    unsigned long stack[IDLE0_TASK_SIZE/sizeof(long)];
   17.29 -};
   17.30 -
   17.31 -extern union task_union idle0_task_union;
   17.32 -extern struct task_struct first_task_struct;
   17.33 +extern kmem_cache_t *task_struct_cachep;
   17.34 +#define alloc_task_struct()  \
   17.35 +  ((struct task_struct *)kmem_cache_alloc(task_struct_cachep,GFP_KERNEL))
   17.36 +#define put_task_struct(_p) \
   17.37 +  if ( atomic_dec_and_test(&(_p)->refcnt) ) release_task(_p)
   17.38 +#define get_task_struct(_p)  \
   17.39 +  atomic_inc(&(_p)->refcnt)
   17.40  
   17.41  extern struct task_struct *do_newdomain(unsigned int dom_id, unsigned int cpu);
   17.42  extern int setup_guestos(
   17.43 @@ -292,7 +293,11 @@ static inline long schedule_timeout(long
   17.44  void domain_init(void);
   17.45  
   17.46  int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
   17.47 -void cpu_idle(void);   /* Idle loop. */
   17.48 +
   17.49 +void startup_cpu_idle_loop(void);
   17.50 +void continue_cpu_idle_loop(void);
   17.51 +
   17.52 +void continue_nonidle_task(void);
   17.53  
   17.54  /* This hash table is protected by the tasklist_lock. */
   17.55  #define TASK_HASH_SIZE 256