ia64/xen-unstable

changeset 194:0dab1361ac2b

bitkeeper revision 1.55 (3e4f9af1gsfKUo4Lbpb4s7Zx6hWWrw)

schedule.c, setup.c, process.c:
Fixed SMP domain building.
author kaf24@labyrinth.cl.cam.ac.uk
date Sun Feb 16 14:06:41 2003 +0000 (2003-02-16)
parents 243ed70f18a7
children 7542fa654c67
files xen-2.4.16/arch/i386/process.c xen-2.4.16/arch/i386/setup.c xen-2.4.16/common/schedule.c
line diff
     1.1 --- a/xen-2.4.16/arch/i386/process.c	Sun Feb 16 12:02:09 2003 +0000
     1.2 +++ b/xen-2.4.16/arch/i386/process.c	Sun Feb 16 14:06:41 2003 +0000
     1.3 @@ -68,7 +68,10 @@ static void default_idle(void)
     1.4  void cpu_idle (void)
     1.5  {
     1.6      ASSERT(current->domain == IDLE_DOMAIN_ID);
     1.7 +
     1.8      current->has_cpu = 1;
     1.9 +    (void)wake_up(current);
    1.10 +    schedule();
    1.11  
    1.12      /*
    1.13       * Declares CPU setup done to the boot processor.
     2.1 --- a/xen-2.4.16/arch/i386/setup.c	Sun Feb 16 12:02:09 2003 +0000
     2.2 +++ b/xen-2.4.16/arch/i386/setup.c	Sun Feb 16 14:06:41 2003 +0000
     2.3 @@ -227,9 +227,6 @@ void __init cpu_init(void)
     2.4      mapcache[nr] = (unsigned long *)get_free_page(GFP_KERNEL);
     2.5      clear_page(mapcache[nr]);
     2.6      *pl2e = mk_l2_pgentry(__pa(mapcache[nr]) | PAGE_HYPERVISOR);
     2.7 -
     2.8 -    /* Stick the idle task on the run queue. */
     2.9 -    (void)wake_up(current);
    2.10  }
    2.11  
    2.12  static void __init do_initcalls(void)
     3.1 --- a/xen-2.4.16/common/schedule.c	Sun Feb 16 12:02:09 2003 +0000
     3.2 +++ b/xen-2.4.16/common/schedule.c	Sun Feb 16 14:06:41 2003 +0000
     3.3 @@ -93,7 +93,7 @@ void sched_add_domain(struct task_struct
     3.4   */
     3.5  void sched_rem_domain(struct task_struct *p) 
     3.6  {
     3.7 -	p->state = TASK_DYING;
     3.8 +    p->state = TASK_DYING;
     3.9  }
    3.10  
    3.11  
    3.12 @@ -183,9 +183,7 @@ long do_sched_op(void)
    3.13      return 0;
    3.14  }
    3.15  
    3.16 -/*
    3.17 - * 
    3.18 - */
    3.19 +
    3.20  void reschedule(struct task_struct *p)
    3.21  {
    3.22      int cpu = p->processor;
    3.23 @@ -226,7 +224,7 @@ asmlinkage void schedule(void)
    3.24  
    3.25      spin_lock_irq(&schedule_data[this_cpu].lock);
    3.26  
    3.27 -    //ASSERT(!in_interrupt());
    3.28 +    /*ASSERT(!in_interrupt());*/
    3.29      ASSERT(__task_on_runqueue(prev));
    3.30  
    3.31  	__move_last_runqueue(prev);
    3.32 @@ -275,7 +273,7 @@ asmlinkage void schedule(void)
    3.33      if ( prev->state == TASK_DYING ) release_task(prev);
    3.34  
    3.35   same_process:
    3.36 -	update_dom_time(current->shared_info);
    3.37 +    update_dom_time(current->shared_info);
    3.38  
    3.39      if ( test_bit(_HYP_EVENT_NEED_RESCHED, &current->hyp_events) )
    3.40          goto need_resched_back;
    3.41 @@ -288,50 +286,50 @@ asmlinkage void schedule(void)
    3.42  static __cacheline_aligned int count[NR_CPUS];
    3.43  static void sched_timer(unsigned long foo)
    3.44  {
    3.45 -	int 				cpu  = smp_processor_id();
    3.46 +    int 				cpu  = smp_processor_id();
    3.47      struct task_struct *curr = schedule_data[cpu].curr;
    3.48 -	s_time_t			now;
    3.49 -	int 				res;
    3.50 +    s_time_t			now;
    3.51 +    int 				res;
    3.52  
    3.53 -	/* reschedule after each 5 ticks */
    3.54 -	if (count[cpu] >= 5) {
    3.55 -		set_bit(_HYP_EVENT_NEED_RESCHED, &curr->hyp_events);
    3.56 -		count[cpu] = 0;
    3.57 -	}
    3.58 -	count[cpu]++;
    3.59 +    /* reschedule after each 5 ticks */
    3.60 +    if (count[cpu] >= 5) {
    3.61 +        set_bit(_HYP_EVENT_NEED_RESCHED, &curr->hyp_events);
    3.62 +        count[cpu] = 0;
    3.63 +    }
    3.64 +    count[cpu]++;
    3.65  
    3.66 -	/*
    3.67 -     * deliver virtual timer interrups to domains if we are CPU 0
    3.68 -     * XXX RN: We don't have a per CPU list of domains yet. Otherwise 
    3.69 -     * would use that. Plus, this should be removed anyway once
    3.70 -     * Domains "know" about virtual time and timeouts. But, it's better
    3.71 -     * here then where it was before.
    3.72 +    /*
    3.73 +     * deliver virtual timer interrups to domains if we are CPU 0 XXX RN: We
    3.74 +     * don't have a per CPU list of domains yet. Otherwise would use that.
    3.75 +     * Plus, this should be removed anyway once Domains "know" about virtual
    3.76 +     * time and timeouts. But, it's better here then where it was before.
    3.77       */
    3.78 -	if (cpu == 0) {
    3.79 -		struct task_struct *p;
    3.80 -		unsigned long cpu_mask = 0;
    3.81 +    if (cpu == 0) {
    3.82 +        struct task_struct *p;
    3.83 +        unsigned long cpu_mask = 0;
    3.84  
    3.85 -		/* send virtual timer interrupt */
    3.86 -		read_lock(&tasklist_lock);
    3.87 -		p = &idle0_task;
    3.88 -		do {
    3.89 -			cpu_mask |= mark_guest_event(p, _EVENT_TIMER);
    3.90 -		}
    3.91 -		while ( (p = p->next_task) != &idle0_task );
    3.92 -		read_unlock(&tasklist_lock);
    3.93 -		guest_event_notify(cpu_mask);
    3.94 -	}
    3.95 +        /* send virtual timer interrupt */
    3.96 +        read_lock(&tasklist_lock);
    3.97 +        p = &idle0_task;
    3.98 +        do {
    3.99 +            if ( is_idle_task(p) ) continue;
   3.100 +            cpu_mask |= mark_guest_event(p, _EVENT_TIMER);
   3.101 +        }
   3.102 +        while ( (p = p->next_task) != &idle0_task );
   3.103 +        read_unlock(&tasklist_lock);
   3.104 +        guest_event_notify(cpu_mask);
   3.105 +    }
   3.106  
   3.107   again:
   3.108 -	now = NOW();
   3.109 -	s_timer[cpu].expires  = now + MILLISECS(10);
   3.110 -	res=add_ac_timer(&s_timer[cpu]);
   3.111 +    now = NOW();
   3.112 +    s_timer[cpu].expires  = now + MILLISECS(10);
   3.113 +    res=add_ac_timer(&s_timer[cpu]);
   3.114  
   3.115 - 	TRC(printk("SCHED[%02d] timer(): now=0x%08X%08X timo=0x%08X%08X\n",
   3.116 - 			   cpu, (u32)(now>>32), (u32)now,
   3.117 - 			   (u32)(s_timer[cpu].expires>>32), (u32)s_timer[cpu].expires));
   3.118 -	if (res==1)
   3.119 -		goto again;
   3.120 +    TRC(printk("SCHED[%02d] timer(): now=0x%08X%08X timo=0x%08X%08X\n",
   3.121 +               cpu, (u32)(now>>32), (u32)now,
   3.122 +               (u32)(s_timer[cpu].expires>>32), (u32)s_timer[cpu].expires));
   3.123 +    if (res==1)
   3.124 +        goto again;
   3.125  
   3.126  }
   3.127  
   3.128 @@ -343,7 +341,7 @@ void __init scheduler_init(void)
   3.129  {
   3.130      int i;
   3.131  
   3.132 -	printk("Initialising schedulers\n");
   3.133 +    printk("Initialising schedulers\n");
   3.134  
   3.135      for ( i = 0; i < NR_CPUS; i++ )
   3.136      {
   3.137 @@ -352,9 +350,9 @@ void __init scheduler_init(void)
   3.138          schedule_data[i].prev = &idle0_task;
   3.139          schedule_data[i].curr = &idle0_task;
   3.140  		
   3.141 -		/* a timer for each CPU  */
   3.142 -		init_ac_timer(&s_timer[i]);
   3.143 -		s_timer[i].function = &sched_timer;
   3.144 +        /* a timer for each CPU  */
   3.145 +        init_ac_timer(&s_timer[i]);
   3.146 +        s_timer[i].function = &sched_timer;
   3.147      }
   3.148  }
   3.149  
   3.150 @@ -362,11 +360,11 @@ void __init scheduler_init(void)
   3.151   * Start a scheduler for each CPU
   3.152   * This has to be done *after* the timers, e.g., APICs, have been initialised
   3.153   */
   3.154 -void schedulers_start(void) {
   3.155 -	
   3.156 -	printk("Start schedulers\n");
   3.157 -	__cli();
   3.158 -	sched_timer(0);
   3.159 -	smp_call_function((void *)sched_timer, NULL, 1, 1);
   3.160 -	__sti();
   3.161 +void schedulers_start(void) 
   3.162 +{	
   3.163 +    printk("Start schedulers\n");
   3.164 +    __cli();
   3.165 +    sched_timer(0);
   3.166 +    smp_call_function((void *)sched_timer, NULL, 1, 1);
   3.167 +    __sti();
   3.168  }