ia64/xen-unstable

changeset 9022:2303fb4682e7

New VCPUOP_get_runstate_info hypercall. Returns information about the current
run state of a VCPU (running, runnable, blocked, etc.) and the total time
spent in each state since the VCPU was created.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Feb 25 17:58:37 2006 +0100 (2006-02-25)
parents a9f3abcc4149
children c375c2109452
files xen/common/dom0_ops.c xen/common/domain.c xen/common/keyhandler.c xen/common/sched_bvt.c xen/common/sched_sedf.c xen/common/schedule.c xen/include/public/vcpu.h xen/include/xen/sched-if.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/common/dom0_ops.c	Sat Feb 25 12:27:53 2006 +0100
     1.2 +++ b/xen/common/dom0_ops.c	Sat Feb 25 17:58:37 2006 +0100
     1.3 @@ -46,6 +46,7 @@ static void getdomaininfo(struct domain 
     1.4      struct vcpu   *v;
     1.5      u64 cpu_time = 0;
     1.6      int flags = DOMFLAGS_BLOCKED;
     1.7 +    struct vcpu_runstate_info runstate;
     1.8      
     1.9      info->domain = d->domain_id;
    1.10      info->nr_online_vcpus = 0;
    1.11 @@ -55,7 +56,8 @@ static void getdomaininfo(struct domain 
    1.12       * - domain is marked as running if any of its vcpus is running
    1.13       */
    1.14      for_each_vcpu ( d, v ) {
    1.15 -        cpu_time += v->cpu_time;
    1.16 +        vcpu_runstate_get(v, &runstate);
    1.17 +        cpu_time += runstate.time[RUNSTATE_running];
    1.18          info->max_vcpu_id = v->vcpu_id;
    1.19          if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
    1.20          {
    1.21 @@ -497,6 +499,7 @@ long do_dom0_op(struct dom0_op *u_dom0_o
    1.22      { 
    1.23          struct domain *d;
    1.24          struct vcpu   *v;
    1.25 +        struct vcpu_runstate_info runstate;
    1.26  
    1.27          ret = -ESRCH;
    1.28          if ( (d = find_domain_by_id(op->u.getvcpuinfo.domain)) == NULL )
    1.29 @@ -510,10 +513,12 @@ long do_dom0_op(struct dom0_op *u_dom0_o
    1.30          if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
    1.31              goto getvcpuinfo_out;
    1.32  
    1.33 +        vcpu_runstate_get(v, &runstate);
    1.34 +
    1.35          op->u.getvcpuinfo.online   = !test_bit(_VCPUF_down, &v->vcpu_flags);
    1.36          op->u.getvcpuinfo.blocked  = test_bit(_VCPUF_blocked, &v->vcpu_flags);
    1.37          op->u.getvcpuinfo.running  = test_bit(_VCPUF_running, &v->vcpu_flags);
    1.38 -        op->u.getvcpuinfo.cpu_time = v->cpu_time;
    1.39 +        op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
    1.40          op->u.getvcpuinfo.cpu      = v->processor;
    1.41          op->u.getvcpuinfo.cpumap   = 0;
    1.42          memcpy(&op->u.getvcpuinfo.cpumap,
     2.1 --- a/xen/common/domain.c	Sat Feb 25 12:27:53 2006 +0100
     2.2 +++ b/xen/common/domain.c	Sat Feb 25 17:58:37 2006 +0100
     2.3 @@ -451,6 +451,19 @@ long do_vcpu_op(int cmd, int vcpuid, voi
     2.4      case VCPUOP_is_up:
     2.5          rc = !test_bit(_VCPUF_down, &v->vcpu_flags);
     2.6          break;
     2.7 +
     2.8 +    case VCPUOP_get_runstate_info:
     2.9 +    {
    2.10 +        struct vcpu_runstate_info runstate;
    2.11 +        vcpu_runstate_get(v, &runstate);
    2.12 +        if ( copy_to_user(arg, &runstate, sizeof(runstate)) )
    2.13 +            rc = -EFAULT;
    2.14 +        break;
    2.15 +    }
    2.16 +
    2.17 +    default:
    2.18 +        rc = -ENOSYS;
    2.19 +        break;
    2.20      }
    2.21  
    2.22      return rc;
     3.1 --- a/xen/common/keyhandler.c	Sat Feb 25 12:27:53 2006 +0100
     3.2 +++ b/xen/common/keyhandler.c	Sat Feb 25 17:58:37 2006 +0100
     3.3 @@ -169,8 +169,6 @@ static void dump_domains(unsigned char k
     3.4  }
     3.5  
     3.6  extern void dump_runq(unsigned char key);
     3.7 -extern void print_sched_histo(unsigned char key);
     3.8 -extern void reset_sched_histo(unsigned char key);
     3.9  #ifndef NDEBUG
    3.10  extern void audit_domains_key(unsigned char key);
    3.11  #endif
    3.12 @@ -207,10 +205,6 @@ void initialize_keytable(void)
    3.13      register_keyhandler(
    3.14          'h', show_handlers, "show this message");
    3.15      register_keyhandler(
    3.16 -        'l', print_sched_histo, "print sched latency histogram");
    3.17 -    register_keyhandler(
    3.18 -        'L', reset_sched_histo, "reset sched latency histogram");
    3.19 -    register_keyhandler(
    3.20          'q', dump_domains, "dump domain (and guest debug) info");
    3.21      register_keyhandler(
    3.22          'r', dump_runq,      "dump run queues");
     4.1 --- a/xen/common/sched_bvt.c	Sat Feb 25 12:27:53 2006 +0100
     4.2 +++ b/xen/common/sched_bvt.c	Sat Feb 25 17:58:37 2006 +0100
     4.3 @@ -132,13 +132,13 @@ static void unwarp_timer_fn(void *data)
     4.4      vcpu_schedule_unlock_irq(v);
     4.5  }
     4.6  
     4.7 -static inline u32 calc_avt(struct vcpu *d, s_time_t now)
     4.8 +static inline u32 calc_avt(struct vcpu *v, s_time_t now)
     4.9  {
    4.10      u32 ranfor, mcus;
    4.11 -    struct bvt_dom_info *inf = BVT_INFO(d->domain);
    4.12 -    struct bvt_vcpu_info *einf = EBVT_INFO(d);
    4.13 +    struct bvt_dom_info *inf = BVT_INFO(v->domain);
    4.14 +    struct bvt_vcpu_info *einf = EBVT_INFO(v);
    4.15      
    4.16 -    ranfor = (u32)(now - d->lastschd);
    4.17 +    ranfor = (u32)(now - v->runstate.state_entry_time);
    4.18      mcus = (ranfor + MCU - 1)/MCU;
    4.19  
    4.20      return einf->avt + mcus * inf->mcu_advance;
    4.21 @@ -262,7 +262,7 @@ static void bvt_wake(struct vcpu *v)
    4.22      curr_evt = calc_evt(curr, calc_avt(curr, now));
    4.23      /* Calculate the time the current domain would run assuming
    4.24         the second smallest evt is of the newly woken domain */
    4.25 -    r_time = curr->lastschd +
    4.26 +    r_time = curr->runstate.state_entry_time +
    4.27          ((einf->evt - curr_evt) / BVT_INFO(curr->domain)->mcu_advance) +
    4.28          ctx_allow;
    4.29  
    4.30 @@ -558,7 +558,6 @@ static void bvt_dump_cpu_state(int i)
    4.31          printk("%3d: %u has=%c ", loop++, v->domain->domain_id,
    4.32                 test_bit(_VCPUF_running, &v->vcpu_flags) ? 'T':'F');
    4.33          bvt_dump_runq_el(v);
    4.34 -        printk("c=0x%X%08X\n", (u32)(v->cpu_time>>32), (u32)v->cpu_time);
    4.35          printk("         l: %p n: %p  p: %p\n",
    4.36                 &vcpu_inf->run_list, vcpu_inf->run_list.next,
    4.37                 vcpu_inf->run_list.prev);
     5.1 --- a/xen/common/sched_sedf.c	Sat Feb 25 12:27:53 2006 +0100
     5.2 +++ b/xen/common/sched_sedf.c	Sat Feb 25 17:58:37 2006 +0100
     5.3 @@ -1408,18 +1408,14 @@ static void sedf_dump_domain(struct vcpu
     5.4  {
     5.5      printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id,
     5.6             test_bit(_VCPUF_running, &d->vcpu_flags) ? 'T':'F');
     5.7 -    printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64" w=%hu c=%"PRIu64
     5.8 +    printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64" w=%hu"
     5.9             " sc=%i xtr(%s)=%"PRIu64" ew=%hu",
    5.10             EDOM_INFO(d)->period, EDOM_INFO(d)->slice, EDOM_INFO(d)->deadl_abs,
    5.11 -           EDOM_INFO(d)->weight, d->cpu_time,
    5.12 +           EDOM_INFO(d)->weight,
    5.13             EDOM_INFO(d)->score[EXTRA_UTIL_Q],
    5.14             (EDOM_INFO(d)->status & EXTRA_AWARE) ? "yes" : "no",
    5.15             EDOM_INFO(d)->extra_time_tot, EDOM_INFO(d)->extraweight);
    5.16      
    5.17 -    if ( d->cpu_time != 0 )
    5.18 -        printf(" (%"PRIu64"%%)", (EDOM_INFO(d)->extra_time_tot * 100)
    5.19 -               / d->cpu_time);
    5.20 -
    5.21  #ifdef SEDF_STATS
    5.22      if ( EDOM_INFO(d)->block_time_tot != 0 )
    5.23          printf(" pen=%"PRIu64"%%", (EDOM_INFO(d)->penalty_time_tot * 100) /
     6.1 --- a/xen/common/schedule.c	Sat Feb 25 12:27:53 2006 +0100
     6.2 +++ b/xen/common/schedule.c	Sat Feb 25 17:58:37 2006 +0100
     6.3 @@ -36,14 +36,6 @@ extern void arch_getdomaininfo_ctxt(stru
     6.4  static char opt_sched[10] = "sedf";
     6.5  string_param("sched", opt_sched);
     6.6  
     6.7 -/*#define WAKE_HISTO*/
     6.8 -/*#define BLOCKTIME_HISTO*/
     6.9 -#if defined(WAKE_HISTO)
    6.10 -#define BUCKETS 31
    6.11 -#elif defined(BLOCKTIME_HISTO)
    6.12 -#define BUCKETS 200
    6.13 -#endif
    6.14 -
    6.15  #define TIME_SLOP      (s32)MICROSECS(50)     /* allow time to slip a bit */
    6.16  
    6.17  /* Various timer handlers. */
    6.18 @@ -73,6 +65,36 @@ static struct scheduler ops;
    6.19  /* Per-CPU periodic timer sends an event to the currently-executing domain. */
    6.20  static struct timer t_timer[NR_CPUS]; 
    6.21  
    6.22 +static inline void vcpu_runstate_change(
    6.23 +    struct vcpu *v, int new_state, s_time_t new_entry_time)
    6.24 +{
    6.25 +    ASSERT(v->runstate.state != new_state);
    6.26 +    ASSERT(spin_is_locked(&schedule_data[v->processor].schedule_lock));
    6.27 +
    6.28 +    v->runstate.time[v->runstate.state] +=
    6.29 +        new_entry_time - v->runstate.state_entry_time;
    6.30 +    v->runstate.state_entry_time = new_entry_time;
    6.31 +    v->runstate.state = new_state;
    6.32 +}
    6.33 +
    6.34 +void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
    6.35 +{
    6.36 +    if ( likely(v == current) )
    6.37 +    {
    6.38 +        /* Fast lock-free path. */
    6.39 +        memcpy(runstate, &v->runstate, sizeof(*runstate));
    6.40 +        ASSERT(runstate->state = RUNSTATE_running);
    6.41 +        runstate->time[RUNSTATE_running] += NOW() - runstate->state_entry_time;
    6.42 +    }
    6.43 +    else
    6.44 +    {
    6.45 +        vcpu_schedule_lock_irq(v);
    6.46 +        memcpy(runstate, &v->runstate, sizeof(*runstate));
    6.47 +        runstate->time[runstate->state] += NOW() - runstate->state_entry_time;
    6.48 +        vcpu_schedule_unlock_irq(v);
    6.49 +    }
    6.50 +}
    6.51 +
    6.52  struct domain *alloc_domain(void)
    6.53  {
    6.54      struct domain *d;
    6.55 @@ -119,6 +141,9 @@ struct vcpu *alloc_vcpu(
    6.56      v->cpu_affinity = is_idle_domain(d) ?
    6.57          cpumask_of_cpu(cpu_id) : CPU_MASK_ALL;
    6.58  
    6.59 +    v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
    6.60 +    v->runstate.state_entry_time = NOW();
    6.61 +
    6.62      if ( (vcpu_id != 0) && !is_idle_domain(d) )
    6.63          set_bit(_VCPUF_down, &v->vcpu_flags);
    6.64  
    6.65 @@ -165,8 +190,15 @@ void vcpu_sleep_nosync(struct vcpu *v)
    6.66      unsigned long flags;
    6.67  
    6.68      vcpu_schedule_lock_irqsave(v, flags);
    6.69 +
    6.70      if ( likely(!vcpu_runnable(v)) )
    6.71 +    {
    6.72 +        if ( v->runstate.state == RUNSTATE_runnable )
    6.73 +            vcpu_runstate_change(v, RUNSTATE_offline, NOW());
    6.74 +
    6.75          SCHED_OP(sleep, v);
    6.76 +    }
    6.77 +
    6.78      vcpu_schedule_unlock_irqrestore(v, flags);
    6.79  
    6.80      TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
    6.81 @@ -187,11 +219,19 @@ void vcpu_wake(struct vcpu *v)
    6.82      unsigned long flags;
    6.83  
    6.84      vcpu_schedule_lock_irqsave(v, flags);
    6.85 +
    6.86      if ( likely(vcpu_runnable(v)) )
    6.87      {
    6.88 +        if ( v->runstate.state >= RUNSTATE_blocked )
    6.89 +            vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
    6.90          SCHED_OP(wake, v);
    6.91 -        v->wokenup = NOW();
    6.92      }
    6.93 +    else if ( !test_bit(_VCPUF_blocked, &v->vcpu_flags) )
    6.94 +    {
    6.95 +        if ( v->runstate.state == RUNSTATE_blocked )
    6.96 +            vcpu_runstate_change(v, RUNSTATE_offline, NOW());
    6.97 +    }
    6.98 +
    6.99      vcpu_schedule_unlock_irqrestore(v, flags);
   6.100  
   6.101      TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
   6.102 @@ -376,8 +416,6 @@ static void __enter_scheduler(void)
   6.103  
   6.104      stop_timer(&schedule_data[cpu].s_timer);
   6.105      
   6.106 -    prev->cpu_time += now - prev->lastschd;
   6.107 -
   6.108      /* get policy-specific decision on scheduling... */
   6.109      next_slice = ops.do_schedule(now);
   6.110  
   6.111 @@ -386,8 +424,6 @@ static void __enter_scheduler(void)
   6.112  
   6.113      schedule_data[cpu].curr = next;
   6.114      
   6.115 -    next->lastschd = now;
   6.116 -
   6.117      set_timer(&schedule_data[cpu].s_timer, now + r_time);
   6.118  
   6.119      if ( unlikely(prev == next) )
   6.120 @@ -397,38 +433,23 @@ static void __enter_scheduler(void)
   6.121      }
   6.122  
   6.123      TRACE_2D(TRC_SCHED_SWITCH_INFPREV,
   6.124 -             prev->domain->domain_id, now - prev->lastschd);
   6.125 +             prev->domain->domain_id,
   6.126 +             now - prev->runstate.state_entry_time);
   6.127      TRACE_3D(TRC_SCHED_SWITCH_INFNEXT,
   6.128 -             next->domain->domain_id, now - next->wokenup, r_time);
   6.129 -
   6.130 -    /*
   6.131 -     * Logic of wokenup field in domain struct:
   6.132 -     * Used to calculate "waiting time", which is the time that a domain
   6.133 -     * spends being "runnable", but not actually running. wokenup is set
   6.134 -     * set whenever a domain wakes from sleeping. However, if wokenup is not
   6.135 -     * also set here then a preempted runnable domain will get a screwed up
   6.136 -     * "waiting time" value next time it is scheduled.
   6.137 -     */
   6.138 -    prev->wokenup = now;
   6.139 +             next->domain->domain_id,
   6.140 +             (next->runstate.state == RUNSTATE_runnable) ?
   6.141 +             (now - next->runstate.state_entry_time) : 0,
   6.142 +             r_time);
   6.143  
   6.144 -#if defined(WAKE_HISTO)
   6.145 -    if ( !is_idle_vcpu(next) && next->wokenup )
   6.146 -    {
   6.147 -        ulong diff = (ulong)(now - next->wokenup);
   6.148 -        diff /= (ulong)MILLISECS(1);
   6.149 -        if (diff <= BUCKETS-2)  schedule_data[cpu].hist[diff]++;
   6.150 -        else                    schedule_data[cpu].hist[BUCKETS-1]++;
   6.151 -    }
   6.152 -    next->wokenup = (s_time_t)0;
   6.153 -#elif defined(BLOCKTIME_HISTO)
   6.154 -    prev->lastdeschd = now;
   6.155 -    if ( !is_idle_vcpu(next) )
   6.156 -    {
   6.157 -        ulong diff = (ulong)((now - next->lastdeschd) / MILLISECS(10));
   6.158 -        if (diff <= BUCKETS-2)  schedule_data[cpu].hist[diff]++;
   6.159 -        else                    schedule_data[cpu].hist[BUCKETS-1]++;
   6.160 -    }
   6.161 -#endif
   6.162 +    ASSERT(prev->runstate.state == RUNSTATE_running);
   6.163 +    vcpu_runstate_change(
   6.164 +        prev,
   6.165 +        (test_bit(_VCPUF_blocked, &prev->vcpu_flags) ? RUNSTATE_blocked :
   6.166 +         (vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)),
   6.167 +        now);
   6.168 +
   6.169 +    ASSERT(next->runstate.state != RUNSTATE_running);
   6.170 +    vcpu_runstate_change(next, RUNSTATE_running, now);
   6.171  
   6.172      ASSERT(!test_bit(_VCPUF_running, &next->vcpu_flags));
   6.173      set_bit(_VCPUF_running, &next->vcpu_flags);
   6.174 @@ -568,47 +589,6 @@ void dump_runq(unsigned char key)
   6.175      local_irq_restore(flags);
   6.176  }
   6.177  
   6.178 -#if defined(WAKE_HISTO) || defined(BLOCKTIME_HISTO)
   6.179 -
   6.180 -void print_sched_histo(unsigned char key)
   6.181 -{
   6.182 -    int i, j, k;
   6.183 -    for_each_online_cpu ( k )
   6.184 -    {
   6.185 -        j = 0;
   6.186 -        printf ("CPU[%02d]: scheduler latency histogram (ms:[count])\n", k);
   6.187 -        for ( i = 0; i < BUCKETS; i++ )
   6.188 -        {
   6.189 -            if ( schedule_data[k].hist[i] != 0 )
   6.190 -            {
   6.191 -                if ( i < BUCKETS-1 )
   6.192 -                    printk("%2d:[%7u]    ", i, schedule_data[k].hist[i]);
   6.193 -                else
   6.194 -                    printk(" >:[%7u]    ", schedule_data[k].hist[i]);
   6.195 -                if ( !(++j % 5) )
   6.196 -                    printk("\n");
   6.197 -            }
   6.198 -        }
   6.199 -        printk("\n");
   6.200 -    }
   6.201 -      
   6.202 -}
   6.203 -
   6.204 -void reset_sched_histo(unsigned char key)
   6.205 -{
   6.206 -    int i, j;
   6.207 -    for ( j = 0; j < NR_CPUS; j++ )
   6.208 -        for ( i=0; i < BUCKETS; i++ ) 
   6.209 -            schedule_data[j].hist[i] = 0;
   6.210 -}
   6.211 -
   6.212 -#else
   6.213 -
   6.214 -void print_sched_histo(unsigned char key) { }
   6.215 -void reset_sched_histo(unsigned char key) { }
   6.216 -
   6.217 -#endif
   6.218 -
   6.219  /*
   6.220   * Local variables:
   6.221   * mode: C
     7.1 --- a/xen/include/public/vcpu.h	Sat Feb 25 12:27:53 2006 +0100
     7.2 +++ b/xen/include/public/vcpu.h	Sat Feb 25 17:58:37 2006 +0100
     7.3 @@ -51,6 +51,40 @@
     7.4  /* Returns 1 if the given VCPU is up. */
     7.5  #define VCPUOP_is_up                3
     7.6  
     7.7 +/*
     7.8 + * Return information about the state and running time of a VCPU.
     7.9 + * @extra_arg == pointer to xen_vcpu_info structure.
    7.10 + */
    7.11 +#define VCPUOP_get_runstate_info    4
    7.12 +typedef struct vcpu_runstate_info {
    7.13 +    /* VCPU's current state (RUNSTATE_*). */
    7.14 +    int      state;
    7.15 +    /* When was current state entered (system time, ns)? */
    7.16 +    uint64_t state_entry_time;
    7.17 +    /*
    7.18 +     * Time spent in each RUNSTATE_* (ns). The sum of these times is
    7.19 +     * guaranteed not to drift from system time.
    7.20 +     */
    7.21 +    uint64_t time[4];
    7.22 +} vcpu_runstate_info_t;
    7.23 +
    7.24 +/* VCPU is currently running on a physical CPU. */
    7.25 +#define RUNSTATE_running  0
    7.26 +
    7.27 +/* VCPU is runnable, but not currently scheduled on any physical CPU. */
    7.28 +#define RUNSTATE_runnable 1
    7.29 +
    7.30 +/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
    7.31 +#define RUNSTATE_blocked  2
    7.32 +
    7.33 +/*
    7.34 + * VCPU is not runnable, but it is not blocked.
    7.35 + * This is a 'catch all' state for things like hotplug and pauses by the
    7.36 + * system administrator (or for critical sections in the hypervisor).
    7.37 + * RUNSTATE_blocked dominates this state (it is the preferred state).
    7.38 + */
    7.39 +#define RUNSTATE_offline  3
    7.40 +
    7.41  #endif /* __XEN_PUBLIC_VCPU_H__ */
    7.42  
    7.43  /*
     8.1 --- a/xen/include/xen/sched-if.h	Sat Feb 25 12:27:53 2006 +0100
     8.2 +++ b/xen/include/xen/sched-if.h	Sat Feb 25 17:58:37 2006 +0100
     8.3 @@ -8,9 +8,6 @@
     8.4  #ifndef __XEN_SCHED_IF_H__
     8.5  #define __XEN_SCHED_IF_H__
     8.6  
     8.7 -#define BUCKETS  10
     8.8 -/*300*/
     8.9 -
    8.10  struct schedule_data {
    8.11      spinlock_t          schedule_lock;  /* spinlock protecting curr        */
    8.12      struct vcpu        *curr;           /* current task                    */
    8.13 @@ -18,9 +15,6 @@ struct schedule_data {
    8.14      void               *sched_priv;
    8.15      struct timer        s_timer;        /* scheduling timer                */
    8.16      unsigned long       tick;           /* current periodic 'tick'         */
    8.17 -#ifdef BUCKETS
    8.18 -    u32                 hist[BUCKETS];  /* for scheduler latency histogram */
    8.19 -#endif
    8.20  } __cacheline_aligned;
    8.21  
    8.22  extern struct schedule_data schedule_data[];
     9.1 --- a/xen/include/xen/sched.h	Sat Feb 25 12:27:53 2006 +0100
     9.2 +++ b/xen/include/xen/sched.h	Sat Feb 25 17:58:37 2006 +0100
     9.3 @@ -8,6 +8,7 @@
     9.4  #include <xen/smp.h>
     9.5  #include <public/xen.h>
     9.6  #include <public/dom0_ops.h>
     9.7 +#include <public/vcpu.h>
     9.8  #include <xen/time.h>
     9.9  #include <xen/timer.h>
    9.10  #include <xen/grant_table.h>
    9.11 @@ -63,15 +64,13 @@ struct vcpu
    9.12  
    9.13      struct vcpu     *next_in_list;
    9.14  
    9.15 -    struct timer  timer;         /* one-shot timer for timeout values */
    9.16 +    struct timer     timer;         /* one-shot timer for timeout values */
    9.17      unsigned long    sleep_tick;    /* tick at which this vcpu started sleep */
    9.18  
    9.19 -    s_time_t         lastschd;      /* time this domain was last scheduled */
    9.20 -    s_time_t         lastdeschd;    /* time this domain was last descheduled */
    9.21 -    s_time_t         cpu_time;      /* total CPU time received till now */
    9.22 -    s_time_t         wokenup;       /* time domain got woken up */
    9.23      void            *sched_priv;    /* scheduler-specific data */
    9.24  
    9.25 +    struct vcpu_runstate_info runstate;
    9.26 +
    9.27      unsigned long    vcpu_flags;
    9.28  
    9.29      u16              virq_to_evtchn[NR_VIRQS];
    9.30 @@ -397,7 +396,6 @@ extern struct domain *domain_list;
    9.31  #define _DOMF_debugging        4
    9.32  #define DOMF_debugging         (1UL<<_DOMF_debugging)
    9.33  
    9.34 -
    9.35  static inline int vcpu_runnable(struct vcpu *v)
    9.36  {
    9.37      return ( (atomic_read(&v->pausecnt) == 0) &&
    9.38 @@ -415,6 +413,8 @@ void cpu_init(void);
    9.39  
    9.40  int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
    9.41  
    9.42 +void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
    9.43 +
    9.44  static inline void vcpu_unblock(struct vcpu *v)
    9.45  {
    9.46      if ( test_and_clear_bit(_VCPUF_blocked, &v->vcpu_flags) )