};
/* Sched data */
-
-enum {
- SCHED_DOM_ADD=1,
- SCHED_DOM_REM,
- SCHED_SLEEP,
- SCHED_WAKE,
- SCHED_YIELD,
- SCHED_BLOCK,
- SCHED_SHUTDOWN,
- SCHED_CTL,
- SCHED_ADJDOM,
- SCHED_SWITCH,
- SCHED_S_TIMER_FN,
- SCHED_T_TIMER_FN,
- SCHED_DOM_TIMER_FN,
- SCHED_SWITCH_INFPREV,
- SCHED_SWITCH_INFNEXT,
- SCHED_SHUTDOWN_CODE,
- SCHED_MAX
-};
-
enum {
RUNSTATE_RUNNING=0,
RUNSTATE_RUNNABLE,
return;
}
+void dump_sched_switch(struct record_info *ri)
+{
+ struct {
+ unsigned int prev_dom, prev_vcpu, next_dom, next_vcpu;
+ } *r = (typeof(r))ri->d;
+
+ printf(" %s sched_switch prev d%uv%u next d%uv%u\n",
+ ri->dump_header, r->prev_dom, r->prev_vcpu,
+ r->next_dom, r->next_vcpu);
+}
+
void sched_switch_process(struct pcpu_info *p)
{
struct vcpu_data *prev, *next;
} * r = (typeof(r))ri->d;
if(opt.dump_all)
- printf("%s sched_switch prev d%uv%u next d%uv%u\n",
- ri->dump_header,
- r->prev_dom, r->prev_vcpu,
- r->next_dom, r->next_vcpu);
+ dump_sched_switch(ri);
if(r->prev_vcpu > MAX_CPUS)
{
}
}
+void dump_sched_vcpu_action(struct record_info *ri, const char *action)
+{
+ struct {
+ unsigned int domid, vcpuid;
+ } *r = (typeof(r))ri->d;
+
+ printf(" %s %s d%uv%u\n", ri->dump_header, action, r->domid, r->vcpuid);
+}
void sched_process(struct pcpu_info *p)
{
default:
process_generic(&p->ri);
}
- } else {
- if(ri->evt.sub == 1)
- sched_runstate_process(p);
- else {
- UPDATE_VOLUME(p, sched_verbose, ri->size);
+ return;
+ }
+
+ if(ri->evt.sub == 1) {
+ /* TRC_SCHED_MIN */
+ sched_runstate_process(p);
+ } else if (ri->evt.sub == 8) {
+ /* TRC_SCHED_VERBOSE */
+ switch(ri->event)
+ {
+ case TRC_SCHED_DOM_ADD:
+ if(opt.dump_all) {
+ struct {
+ unsigned int domid;
+ } *r = (typeof(r))ri->d;
+
+ printf(" %s sched_init_domain d%u\n", ri->dump_header, r->domid);
+ }
+ break;
+ case TRC_SCHED_DOM_REM:
+ if(opt.dump_all) {
+ struct {
+ unsigned int domid, vcpuid;
+ } *r = (typeof(r))ri->d;
+
+ printf(" %s sched_destroy_domain d%u\n", ri->dump_header, r->domid);
+ }
+ break;
+ case TRC_SCHED_SLEEP:
+ if(opt.dump_all)
+ dump_sched_vcpu_action(ri, "vcpu_sleep");
+ break;
+ case TRC_SCHED_WAKE:
+ if(opt.dump_all)
+ dump_sched_vcpu_action(ri, "vcpu_wake");
+ break;
+ case TRC_SCHED_YIELD:
+ if(opt.dump_all)
+ dump_sched_vcpu_action(ri, "vcpu_yield");
+ break;
+ case TRC_SCHED_BLOCK:
+ if(opt.dump_all)
+ dump_sched_vcpu_action(ri, "vcpu_block");
+ break;
+ case TRC_SCHED_SHUTDOWN:
+ case TRC_SCHED_SHUTDOWN_CODE:
+ if(opt.dump_all) {
+ struct {
+ unsigned int domid, vcpuid, reason;
+ } *r = (typeof(r))ri->d;
+
+ printf(" %s %s d%uv%u, reason = %u\n", ri->dump_header,
+ ri->event == TRC_SCHED_SHUTDOWN ? "sched_shutdown" :
+ "sched_shutdown_code", r->domid, r->vcpuid, r->reason);
+ }
+ break;
+ case TRC_SCHED_ADJDOM:
+ if(opt.dump_all) {
+ struct {
+ unsigned int domid;
+ } *r = (typeof(r))ri->d;
+
+ printf(" %s sched_adjust d%u\n", ri->dump_header, r->domid);
+ }
+ break;
+ case TRC_SCHED_SWITCH:
+ dump_sched_switch(ri);
+ break;
+ case TRC_SCHED_SWITCH_INFPREV:
+ if(opt.dump_all) {
+ struct {
+ unsigned int domid, runtime;
+ } *r = (typeof(r))ri->d;
+
+ printf(" %s sched_switch prev d%u, run for %u.%uus\n",
+ ri->dump_header, r->domid, r->runtime / 1000,
+ r->runtime % 1000);
+ }
+ break;
+ case TRC_SCHED_SWITCH_INFNEXT:
+ if(opt.dump_all)
+ {
+ struct {
+ unsigned int domid, rsince;
+ int slice;
+ } *r = (typeof(r))ri->d;
+
+ printf(" %s sched_switch next d%u", ri->dump_header, r->domid);
+ if ( r->rsince != 0 )
+ printf(", was runnable for %u.%uus, ", r->rsince / 1000,
+ r->rsince % 1000);
+ if ( r->slice > 0 )
+ printf("next slice %u.%uus\n", r->slice / 1000,
+ r->slice % 1000);
+ printf("\n");
+ }
+ break;
+ case TRC_SCHED_CTL:
+ case TRC_SCHED_S_TIMER_FN:
+ case TRC_SCHED_T_TIMER_FN:
+ case TRC_SCHED_DOM_TIMER_FN:
+ break;
+ default:
process_generic(&p->ri);
}
+ } else {
+ UPDATE_VOLUME(p, sched_verbose, ri->size);
+ process_generic(&p->ri);
}
}