From: Juergen Gross Date: Thu, 20 Feb 2020 10:36:16 +0000 (+0100) Subject: sched: add some diagnostic info in the run queue keyhandler X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=1bfa59ba3b0647007e34b3cb8f8daad045467fb7;p=people%2Fdwmw2%2Fxen.git sched: add some diagnostic info in the run queue keyhandler When dumping the run queue information add some more data regarding current and (if known) previous vcpu for each physical cpu. With core scheduling activated the printed data will be e.g.: (XEN) CPUs info: (XEN) CPU[00] current=d[IDLE]v0, curr=d[IDLE]v0, prev=NULL (XEN) CPU[01] current=d[IDLE]v1 (XEN) CPU[02] current=d[IDLE]v2, curr=d[IDLE]v2, prev=NULL (XEN) CPU[03] current=d[IDLE]v3 Signed-off-by: Juergen Gross Reviewed-by: Dario Faggioli --- diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c index de5a6b1a57..7e8e7d2c39 100644 --- a/xen/common/sched/core.c +++ b/xen/common/sched/core.c @@ -3243,7 +3243,7 @@ void scheduler_free(struct scheduler *sched) void schedule_dump(struct cpupool *c) { - unsigned int i; + unsigned int i, j; struct scheduler *sched; cpumask_t *cpus; @@ -3254,7 +3254,7 @@ void schedule_dump(struct cpupool *c) if ( c != NULL ) { sched = c->sched; - cpus = c->cpu_valid; + cpus = c->res_valid; printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name); sched_dump_settings(sched); } @@ -3264,11 +3264,25 @@ void schedule_dump(struct cpupool *c) cpus = &cpupool_free_cpus; } - if ( sched->dump_cpu_state != NULL ) + printk("CPUs info:\n"); + for_each_cpu (i, cpus) { - printk("CPUs info:\n"); - for_each_cpu (i, cpus) - sched_dump_cpu_state(sched, i); + struct sched_resource *sr = get_sched_res(i); + unsigned long flags; + spinlock_t *lock; + + lock = pcpu_schedule_lock_irqsave(i, &flags); + + printk("CPU[%02d] current=%pv, curr=%pv, prev=%pv\n", i, + get_cpu_current(i), sr->curr ? sr->curr->vcpu_list : NULL, + sr->prev ? sr->prev->vcpu_list : NULL); + for_each_cpu (j, sr->cpus) + if ( i != j ) + printk("CPU[%02d] current=%pv\n", j, get_cpu_current(j)); + + pcpu_schedule_unlock_irqrestore(lock, flags, i); + + sched_dump_cpu_state(sched, i); } rcu_read_unlock(&sched_res_rculock);