ia64/xen-unstable

changeset 331:03dc7864109b

bitkeeper revision 1.148 (3e78bf69oU3LgkH_AAzL1qYB6OK3GA)

honour context swicth allowance
author rn@wyvis.research.intel-research.net
date Wed Mar 19 19:05:13 2003 +0000 (2003-03-19)
parents 74c24fb522be
children a72c5a70fe2b 8c3e9c5563dd
files xen/arch/i386/time.c xen/common/ac_timer.c xen/common/keyhandler.c xen/common/schedule.c xen/include/xeno/sched.h
line diff
     1.1 --- a/xen/arch/i386/time.c	Wed Mar 19 18:09:57 2003 +0000
     1.2 +++ b/xen/arch/i386/time.c	Wed Mar 19 19:05:13 2003 +0000
     1.3 @@ -401,6 +401,7 @@ int __init init_xeno_time()
     1.4  
     1.5      /* start timer to update time periodically */
     1.6      init_ac_timer(&update_timer, 0);
     1.7 +    update_timer.data = 1;
     1.8      update_timer.function = &update_time;
     1.9      update_time(0);
    1.10  
     2.1 --- a/xen/common/ac_timer.c	Wed Mar 19 18:09:57 2003 +0000
     2.2 +++ b/xen/common/ac_timer.c	Wed Mar 19 19:05:13 2003 +0000
     2.3 @@ -256,30 +256,6 @@ void do_ac_timer(void)
     2.4  }
     2.5  
     2.6  
     2.7 -/*****************************************************************************
     2.8 - * debug dump_queue
     2.9 - * arguments: queue head, name of queue
    2.10 - *****************************************************************************/
    2.11 -static void dump_tqueue(struct list_head *queue, char *name)
    2.12 -{
    2.13 -    struct list_head *list;
    2.14 -    int loop = 0;
    2.15 -    struct ac_timer  *t;
    2.16 -
    2.17 -    printk ("QUEUE %s %lx   n: %lx, p: %lx\n", name,  (unsigned long)queue,
    2.18 -            (unsigned long) queue->next, (unsigned long) queue->prev);
    2.19 -    list_for_each (list, queue) {
    2.20 -        t = list_entry(list, struct ac_timer, timer_list);
    2.21 -        printk ("  %s %d : %lx ex=0x%08X%08X %lu  n: %lx, p: %lx\n",
    2.22 -                name, loop++, 
    2.23 -                (unsigned long)list,
    2.24 -                (u32)(t->expires>>32), (u32)t->expires, t->data,
    2.25 -                (unsigned long)list->next, (unsigned long)list->prev);
    2.26 -    }
    2.27 -    return; 
    2.28 -}
    2.29 -
    2.30 -
    2.31  static void ac_timer_softirq_action(struct softirq_action *a)
    2.32  {
    2.33      int           cpu = smp_processor_id();
    2.34 @@ -312,6 +288,28 @@ static void ac_timer_softirq_action(stru
    2.35      }
    2.36  }
    2.37  
    2.38 +/*****************************************************************************
    2.39 + * debug dump_queue
    2.40 + * arguments: queue head, name of queue
    2.41 + *****************************************************************************/
    2.42 +static void dump_tqueue(struct list_head *queue, char *name)
    2.43 +{
    2.44 +    struct list_head *list;
    2.45 +    int loop = 0;
    2.46 +    struct ac_timer  *t;
    2.47 +
    2.48 +    printk ("QUEUE %s %lx   n: %lx, p: %lx\n", name,  (unsigned long)queue,
    2.49 +            (unsigned long) queue->next, (unsigned long) queue->prev);
    2.50 +    list_for_each (list, queue) {
    2.51 +        t = list_entry(list, struct ac_timer, timer_list);
    2.52 +        printk ("  %s %d : %lx ex=0x%08X%08X %lu  n: %lx, p: %lx\n",
    2.53 +                name, loop++, 
    2.54 +                (unsigned long)list,
    2.55 +                (u32)(t->expires>>32), (u32)t->expires, t->data,
    2.56 +                (unsigned long)list->next, (unsigned long)list->prev);
    2.57 +    }
    2.58 +    return; 
    2.59 +}
    2.60  
    2.61  void dump_timerq(u_char key, void *dev_id, struct pt_regs *regs)
    2.62  {
     3.1 --- a/xen/common/keyhandler.c	Wed Mar 19 18:09:57 2003 +0000
     3.2 +++ b/xen/common/keyhandler.c	Wed Mar 19 19:05:13 2003 +0000
     3.3 @@ -118,6 +118,8 @@ extern void perfc_printall (u_char key, 
     3.4  extern void perfc_reset (u_char key, void *dev_id, struct pt_regs *regs);
     3.5  extern void dump_timerq(u_char key, void *dev_id, struct pt_regs *regs);
     3.6  extern void dump_runq(u_char key, void *dev_id, struct pt_regs *regs);
     3.7 +extern void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs);
     3.8 +extern void reset_sched_histo(u_char key, void *dev_id, struct pt_regs *regs);
     3.9  
    3.10  
    3.11  void initialize_keytable() 
    3.12 @@ -132,6 +134,8 @@ void initialize_keytable()
    3.13      add_key_handler('a', dump_timerq,    "dump ac_timer queues");
    3.14      add_key_handler('d', dump_registers, "dump registers"); 
    3.15      add_key_handler('h', show_handlers, "show this message");
    3.16 +    add_key_handler('l', print_sched_histo, "print sched latency histogram");
    3.17 +    add_key_handler('L', reset_sched_histo, "reset sched latency histogram");
    3.18      add_key_handler('p', perfc_printall, "print performance counters"); 
    3.19      add_key_handler('P', perfc_reset,    "reset performance counters"); 
    3.20      add_key_handler('q', do_task_queues, "dump task queues + guest state");
     4.1 --- a/xen/common/schedule.c	Wed Mar 19 18:09:57 2003 +0000
     4.2 +++ b/xen/common/schedule.c	Wed Mar 19 19:05:13 2003 +0000
     4.3 @@ -39,9 +39,15 @@
     4.4  #define TRC(_x)
     4.5  #endif
     4.6  
     4.7 +#define SCHED_HISTO
     4.8 +#ifdef SCHED_HISTO
     4.9 +#define BUCKETS 31
    4.10 +#endif
    4.11  
    4.12 -#define MCU         (s32)MICROSECS(100)     /* Minimum unit */
    4.13 -static s32 ctx_allow=(s32)MILLISECS(10);    /* context switch allowance */
    4.14 +
    4.15 +#define MCU          (s32)MICROSECS(100)    /* Minimum unit */
    4.16 +#define TIME_SLOP    (s32)MICROSECS(50)     /* allow time to slip a bit */
    4.17 +static s32 ctx_allow=(s32)MILLISECS(5);     /* context switch allowance */
    4.18  
    4.19  /*****************************************************************************
    4.20   * per CPU data for the scheduler.
    4.21 @@ -54,6 +60,9 @@ typedef struct schedule_data_st
    4.22      struct task_struct *idle;           /* idle task for this cpu */
    4.23      u32                 svt;            /* system virtual time. per CPU??? */
    4.24      struct ac_timer     s_timer;        /* scheduling timer  */
    4.25 +#ifdef SCHED_HISTO
    4.26 +    u32                 hist[BUCKETS];  /* for scheduler latency histogram */
    4.27 +#endif
    4.28  
    4.29  } __cacheline_aligned schedule_data_t;
    4.30  schedule_data_t schedule_data[NR_CPUS];
    4.31 @@ -140,8 +149,11 @@ int wake_up(struct task_struct *p)
    4.32  
    4.33      p->evt = p->avt; /* RN: XXX BVT deal with warping here */
    4.34  
    4.35 +#ifdef SCHED_HISTO
    4.36 +    p->wokenup = NOW();
    4.37 +#endif
    4.38 +
    4.39      ret = 1;
    4.40 -
    4.41   out:
    4.42      spin_unlock_irqrestore(&schedule_data[p->processor].lock, flags);
    4.43      return ret;
    4.44 @@ -194,36 +206,46 @@ long sched_adjdom(int dom, unsigned long
    4.45   * cause a run through the scheduler when appropriate
    4.46   * Appropriate is:
    4.47   * - current task is idle task
    4.48 - * - new processes evt is lower than current one
    4.49   * - the current task already ran for it's context switch allowance
    4.50 - * XXX RN: not quite sure about the last two. Strictly, if p->evt < curr->evt
    4.51 - * should still let curr run for at least ctx_allow. But that gets quite messy.
    4.52 + * Otherwise we do a run through the scheduler after the current tasks 
    4.53 + * context switch allowance is over.
    4.54   ****************************************************************************/
    4.55  void reschedule(struct task_struct *p)
    4.56  {
    4.57 -    int cpu = p->processor;
    4.58 +    int cpu = p->processor;;
    4.59      struct task_struct *curr;
    4.60      unsigned long flags;
    4.61 +    s_time_t now, min_time;
    4.62  
    4.63      if (p->has_cpu)
    4.64          return;
    4.65  
    4.66      spin_lock_irqsave(&schedule_data[cpu].lock, flags);
    4.67 +    
    4.68 +    now = NOW();
    4.69      curr = schedule_data[cpu].curr;
    4.70 +    /* domain should run at least for ctx_allow */
    4.71 +    min_time = curr->lastschd + ctx_allow;
    4.72  
    4.73 -    if ( is_idle_task(curr) ||
    4.74 -         (p->evt < curr->evt) ||
    4.75 -         (curr->lastschd + ctx_allow >= NOW()) ) {
    4.76 +    if ( is_idle_task(curr) || (min_time <= now) ) {
    4.77          /* reschedule */
    4.78          set_bit(_HYP_EVENT_NEED_RESCHED, &curr->hyp_events);
    4.79 +
    4.80          spin_unlock_irqrestore(&schedule_data[cpu].lock, flags);
    4.81 -#ifdef CONFIG_SMP
    4.82 +
    4.83          if (cpu != smp_processor_id())
    4.84              smp_send_event_check_cpu(cpu);
    4.85 -#endif
    4.86 -    } else {
    4.87 -        spin_unlock_irqrestore(&schedule_data[cpu].lock, flags);
    4.88 +        return;
    4.89      }
    4.90 +
    4.91 +    /* current hasn't been running for long enough -> reprogram timer.
    4.92 +     * but don't bother if timer would go off soon anyway */
    4.93 +    if (schedule_data[cpu].s_timer.expires > min_time + TIME_SLOP) {
    4.94 +        mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
    4.95 +    }
    4.96 +    
    4.97 +    spin_unlock_irqrestore(&schedule_data[cpu].lock, flags);
    4.98 +    return;
    4.99  }
   4.100  
   4.101  
   4.102 @@ -258,7 +280,8 @@ asmlinkage void schedule(void)
   4.103  
   4.104      now = NOW();
   4.105  
   4.106 -    /* remove timer  */
   4.107 +    /* remove timer, if till on list  */
   4.108 +    //if (active_ac_timer(&schedule_data[this_cpu].s_timer))
   4.109      rem_ac_timer(&schedule_data[this_cpu].s_timer);
   4.110  
   4.111      /* deschedule the current domain */
   4.112 @@ -369,6 +392,13 @@ asmlinkage void schedule(void)
   4.113   sched_done:
   4.114      ASSERT(r_time >= ctx_allow);
   4.115  
   4.116 +#ifndef NDEBUG
   4.117 +    if (r_time < ctx_allow) {
   4.118 +        printk("[%02d]: %lx\n", this_cpu, r_time);
   4.119 +        dump_rqueue(&schedule_data[this_cpu].runqueue, "foo");
   4.120 +    }
   4.121 +#endif
   4.122 +
   4.123      prev->has_cpu = 0;
   4.124      next->has_cpu = 1;
   4.125  
   4.126 @@ -391,6 +421,19 @@ asmlinkage void schedule(void)
   4.127      }
   4.128  
   4.129      perfc_incrc(sched_ctx);
   4.130 +#ifdef SCHED_HISTO
   4.131 +    {
   4.132 +        ulong diff; /* should fit in 32bits */
   4.133 +        if (!is_idle_task(next) && next->wokenup) {
   4.134 +            diff = (ulong)(now - next->wokenup);
   4.135 +            diff /= (ulong)MILLISECS(1);
   4.136 +            if (diff <= BUCKETS-2)  schedule_data[this_cpu].hist[diff]++;
   4.137 +            else                    schedule_data[this_cpu].hist[BUCKETS-1]++;
   4.138 +        }
   4.139 +        next->wokenup = (s_time_t)0;
   4.140 +    }
   4.141 +#endif
   4.142 +
   4.143  
   4.144      prepare_to_switch();
   4.145      switch_to(prev, next);
   4.146 @@ -450,7 +493,7 @@ static void virt_timer(unsigned long foo
   4.147      guest_event_notify(cpu_mask);
   4.148  
   4.149      now = NOW();
   4.150 -    v_timer.expires  = now + MILLISECS(10);
   4.151 +    v_timer.expires  = now + MILLISECS(20);
   4.152      add_ac_timer(&v_timer);
   4.153  }
   4.154  
   4.155 @@ -472,11 +515,13 @@ void __init scheduler_init(void)
   4.156          
   4.157          /* a timer for each CPU  */
   4.158          init_ac_timer(&schedule_data[i].s_timer, i);
   4.159 +        schedule_data[i].s_timer.data = 2;
   4.160          schedule_data[i].s_timer.function = &sched_timer;
   4.161  
   4.162      }
   4.163      schedule_data[0].idle = &idle0_task; /* idle on CPU 0 is special */
   4.164      init_ac_timer(&v_timer, 0);
   4.165 +    v_timer.data = 3;
   4.166      v_timer.function = &virt_timer;
   4.167  }
   4.168  
   4.169 @@ -603,3 +648,39 @@ void dump_runq(u_char key, void *dev_id,
   4.170      return; 
   4.171  }
   4.172  
   4.173 +#ifdef SCHED_HISTO
   4.174 +void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
   4.175 +{
   4.176 +    int loop, i, j;
   4.177 +    for (loop = 0; loop < smp_num_cpus; loop++) {
   4.178 +        j = 0;
   4.179 +        printf ("CPU[%02d]: scheduler latency histogram (ms:[count])\n", loop);
   4.180 +        for (i=0; i<BUCKETS; i++) {
   4.181 +            if (schedule_data[loop].hist[i]) {
   4.182 +                if (i < BUCKETS-1)
   4.183 +                    printk("%2d:[%7u]    ", i, schedule_data[loop].hist[i]);
   4.184 +                else
   4.185 +                    printk(" >:[%7u]    ", schedule_data[loop].hist[i]);
   4.186 +                j++;
   4.187 +                if (!(j % 5)) printk("\n");
   4.188 +            }
   4.189 +        }
   4.190 +        printk("\n");
   4.191 +    }
   4.192 +      
   4.193 +}
   4.194 +void reset_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
   4.195 +{
   4.196 +    int loop, i;
   4.197 +    for (loop = 0; loop < smp_num_cpus; loop++)
   4.198 +        for (i=0; i<BUCKETS; i++) 
   4.199 +            schedule_data[loop].hist[i]=0;
   4.200 +}
   4.201 +#else
   4.202 +void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
   4.203 +{
   4.204 +}
   4.205 +void reset_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
   4.206 +{
   4.207 +}
   4.208 +#endif
     5.1 --- a/xen/include/xeno/sched.h	Wed Mar 19 18:09:57 2003 +0000
     5.2 +++ b/xen/include/xeno/sched.h	Wed Mar 19 19:05:13 2003 +0000
     5.3 @@ -94,6 +94,7 @@ struct task_struct {
     5.4  
     5.5      s_time_t lastschd;              /* time this domain was last scheduled */
     5.6      s_time_t cpu_time;              /* total CPU time received till now */
     5.7 +    s_time_t wokenup;               /* time domain got woken up */
     5.8  
     5.9      unsigned long mcu_advance;      /* inverse of weight */
    5.10      s32  avt;                       /* actual virtual time */