ia64/xen-unstable

changeset 18908:6401c9533ef5

Avoid negative runstate pieces.

Also consolidate all places to get cpu idle time.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 10 14:05:41 2008 +0000 (2008-12-10)
parents 2a349db39496
children 1419a73316e1
files xen/arch/x86/acpi/cpu_idle.c xen/arch/x86/platform_hypercall.c xen/common/schedule.c xen/common/sysctl.c xen/drivers/cpufreq/cpufreq_ondemand.c xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/x86/acpi/cpu_idle.c	Wed Dec 10 13:41:34 2008 +0000
     1.2 +++ b/xen/arch/x86/acpi/cpu_idle.c	Wed Dec 10 14:05:41 2008 +0000
     1.3 @@ -749,7 +749,6 @@ uint32_t pmstat_get_cx_nr(uint32_t cpuid
     1.4  int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat)
     1.5  {
     1.6      const struct acpi_processor_power *power = processor_powers[cpuid];
     1.7 -    struct vcpu *v = idle_vcpu[cpuid];
     1.8      uint64_t usage;
     1.9      int i;
    1.10  
    1.11 @@ -763,9 +762,7 @@ int pmstat_get_cx_stat(uint32_t cpuid, s
    1.12  
    1.13      stat->last = power->last_state ? power->last_state->idx : 0;
    1.14      stat->nr = power->count;
    1.15 -    stat->idle_time = v->runstate.time[RUNSTATE_running];
    1.16 -    if ( v->is_running )
    1.17 -        stat->idle_time += NOW() - v->runstate.state_entry_time;
    1.18 +    stat->idle_time = get_cpu_idle_time(cpuid);
    1.19  
    1.20      for ( i = 0; i < power->count; i++ )
    1.21      {
     2.1 --- a/xen/arch/x86/platform_hypercall.c	Wed Dec 10 13:41:34 2008 +0000
     2.2 +++ b/xen/arch/x86/platform_hypercall.c	Wed Dec 10 14:05:41 2008 +0000
     2.3 @@ -337,16 +337,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
     2.4          for_each_cpu_mask ( cpu, cpumap )
     2.5          {
     2.6              if ( (v = idle_vcpu[cpu]) != NULL )
     2.7 -            {
     2.8 -                idletime = v->runstate.time[RUNSTATE_running];
     2.9 -                if ( v->is_running )
    2.10 -                    idletime += now - v->runstate.state_entry_time;
    2.11 -            }
    2.12 -            else
    2.13 -            {
    2.14 -                idletime = 0;
    2.15                  cpu_clear(cpu, cpumap);
    2.16 -            }
    2.17 +            idletime = get_cpu_idle_time(cpu);
    2.18  
    2.19              ret = -EFAULT;
    2.20              if ( copy_to_guest_offset(idletimes, cpu, &idletime, 1) )
     3.1 --- a/xen/common/schedule.c	Wed Dec 10 13:41:34 2008 +0000
     3.2 +++ b/xen/common/schedule.c	Wed Dec 10 14:05:41 2008 +0000
     3.3 @@ -84,33 +84,49 @@ static inline void trace_runstate_change
     3.4  static inline void vcpu_runstate_change(
     3.5      struct vcpu *v, int new_state, s_time_t new_entry_time)
     3.6  {
     3.7 +    s_time_t delta;
     3.8 +
     3.9      ASSERT(v->runstate.state != new_state);
    3.10      ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock));
    3.11  
    3.12      trace_runstate_change(v, new_state);
    3.13  
    3.14 -    v->runstate.time[v->runstate.state] +=
    3.15 -        new_entry_time - v->runstate.state_entry_time;
    3.16 -    v->runstate.state_entry_time = new_entry_time;
    3.17      v->runstate.state = new_state;
    3.18 +
    3.19 +    delta = new_entry_time - v->runstate.state_entry_time;
    3.20 +    if ( delta > 0 )
    3.21 +    {
    3.22 +        v->runstate.time[v->runstate.state] += delta;
    3.23 +        v->runstate.state_entry_time = new_entry_time;
    3.24 +    }
    3.25  }
    3.26  
    3.27  void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
    3.28  {
    3.29 -    if ( likely(v == current) )
    3.30 -    {
    3.31 -        /* Fast lock-free path. */
    3.32 -        memcpy(runstate, &v->runstate, sizeof(*runstate));
    3.33 -        ASSERT(runstate->state == RUNSTATE_running);
    3.34 -        runstate->time[RUNSTATE_running] += NOW() - runstate->state_entry_time;
    3.35 -    }
    3.36 -    else
    3.37 -    {
    3.38 +    s_time_t delta;
    3.39 +
    3.40 +    if ( unlikely(v != current) )
    3.41          vcpu_schedule_lock_irq(v);
    3.42 -        memcpy(runstate, &v->runstate, sizeof(*runstate));
    3.43 -        runstate->time[runstate->state] += NOW() - runstate->state_entry_time;
    3.44 +
    3.45 +    memcpy(runstate, &v->runstate, sizeof(*runstate));
    3.46 +    delta = NOW() - runstate->state_entry_time;
    3.47 +    if ( delta > 0 )
    3.48 +        runstate->time[runstate->state] += delta;
    3.49 +
    3.50 +    if ( unlikely(v != current) )
    3.51          vcpu_schedule_unlock_irq(v);
    3.52 -    }
    3.53 +}
    3.54 +
    3.55 +uint64_t get_cpu_idle_time(unsigned int cpu)
    3.56 +{
    3.57 +    struct vcpu_runstate_info state = { .state = RUNSTATE_running };
    3.58 +    struct vcpu *v;
    3.59 +
    3.60 +    if ( (v = idle_vcpu[cpu]) == NULL )
    3.61 +        return 0;
    3.62 +
    3.63 +    vcpu_runstate_get(v, &state);
    3.64 +    return state.time[RUNSTATE_running];
    3.65  }
    3.66  
    3.67  int sched_init_vcpu(struct vcpu *v, unsigned int processor) 
     4.1 --- a/xen/common/sysctl.c	Wed Dec 10 13:41:34 2008 +0000
     4.2 +++ b/xen/common/sysctl.c	Wed Dec 10 14:05:41 2008 +0000
     4.3 @@ -167,7 +167,6 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc
     4.4      {
     4.5          uint32_t i, nr_cpus;
     4.6          struct xen_sysctl_cpuinfo cpuinfo;
     4.7 -        struct vcpu *v;
     4.8  
     4.9          nr_cpus = min_t(uint32_t, op->u.getcpuinfo.max_cpus, NR_CPUS);
    4.10  
    4.11 @@ -177,13 +176,7 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc
    4.12  
    4.13          for ( i = 0; i < nr_cpus; i++ )
    4.14          {
    4.15 -            /* Assume no holes in idle-vcpu map. */
    4.16 -            if ( (v = idle_vcpu[i]) == NULL )
    4.17 -                break;
    4.18 -
    4.19 -            cpuinfo.idletime = v->runstate.time[RUNSTATE_running];
    4.20 -            if ( v->is_running )
    4.21 -                cpuinfo.idletime += NOW() - v->runstate.state_entry_time;
    4.22 +            cpuinfo.idletime = get_cpu_idle_time(i);
    4.23  
    4.24              ret = -EFAULT;
    4.25              if ( copy_to_guest_offset(op->u.getcpuinfo.info, i, &cpuinfo, 1) )
     5.1 --- a/xen/drivers/cpufreq/cpufreq_ondemand.c	Wed Dec 10 13:41:34 2008 +0000
     5.2 +++ b/xen/drivers/cpufreq/cpufreq_ondemand.c	Wed Dec 10 14:05:41 2008 +0000
     5.3 @@ -95,21 +95,6 @@ int get_cpufreq_ondemand_para(uint32_t *
     5.4      return 0;
     5.5  }
     5.6  
     5.7 -uint64_t get_cpu_idle_time(unsigned int cpu)
     5.8 -{
     5.9 -    uint64_t idle_ns;
    5.10 -    struct vcpu *v;
    5.11 -
    5.12 -    if ((v = idle_vcpu[cpu]) == NULL)
    5.13 -        return 0;
    5.14 -
    5.15 -    idle_ns = v->runstate.time[RUNSTATE_running];
    5.16 -    if (v->is_running)
    5.17 -        idle_ns += NOW() - v->runstate.state_entry_time;
    5.18 -
    5.19 -    return idle_ns;
    5.20 -}
    5.21 -
    5.22  static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
    5.23  {
    5.24      unsigned int load = 0;
     6.1 --- a/xen/include/xen/sched.h	Wed Dec 10 13:41:34 2008 +0000
     6.2 +++ b/xen/include/xen/sched.h	Wed Dec 10 14:05:41 2008 +0000
     6.3 @@ -538,6 +538,7 @@ int vcpu_locked_change_affinity(struct v
     6.4  void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);
     6.5  
     6.6  void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
     6.7 +uint64_t get_cpu_idle_time(unsigned int cpu);
     6.8  
     6.9  #define IS_PRIV(_d) ((_d)->is_privileged)
    6.10  #define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target == (_t)))