* xenalyze.c: Analyzing xentrace output
*
* Written by George Dunlap.
- *
+ *
* Copyright (c) 2006-2007, XenSource Inc.
* Copyright (c) 2007-2008, Citrix Systems R&D Ltd, UK
*
fprintf(warn, ##_x); \
} \
} while(0) \
-
+
/* -- Global variables -- */
struct {
int fd;
.progress = { .update_offset = 0 },
};
-/*
+/*
Kinds of errors:
Unexpected values
- RIP with information in high bits (not all 0 or 1)
- domain runstates
- runstate / tsc skew
- vcpu_{prev,next}_update p->current{==,!=}null
- - vcpu start conditions
+ - vcpu start conditions
- lost_cpu count higher than # of seen cpus / < 0
- lost cpu has non-null p->current
Symbol file
int verbosity = 5;
struct {
- unsigned
+ unsigned
scatterplot_interrupt_eip:1,
scatterplot_cpi:1,
scatterplot_unpin_promote:1,
} opt = {
.scatterplot_interrupt_eip=0,
.scatterplot_cpi=0,
- .scatterplot_unpin_promote=0,
+ .scatterplot_unpin_promote=0,
.scatterplot_cr3_switch=0,
.scatterplot_wake_to_halt=0,
.scatterplot_vmexit_eip=0,
error(ERR_ASSERT, NULL);
} else
last_addr = (*p)->symbols[(*p)->count].addr;
-
+
(*p)->count++;
/* If this struct is full, point to the next. It will be allocated
void (*dump)(struct eip_list_struct *);
} eip_list_type[EIP_LIST_TYPE_MAX] = {
[EIP_LIST_TYPE_NONE] = {
- .update=NULL,
+ .update=NULL,
.new=NULL,
.dump=NULL },
};
/* --- HVM class of events --- */
/*
- * -- Algorithms --
+ * -- Algorithms --
*
* Interrupt Wake-to-halt detection
*
*
* The "waking" interrupts we want to sub-classify into
* "wake-only" (when interrupt was the only interrupt from wake to halt) and
- * "wake-all" (whether this was the only interrupt or not).
+ * "wake-all" (whether this was the only interrupt or not).
*/
/* VMX data */
"pf_inject",
"inj_exc",
"inj_virq",
- "reinj_virq",
+ "reinj_virq",
"io_read",
"io_write",
"cr_read", /* 8 */
size);
error(ERR_SYSTEM, NULL);
}
-
+
}
for(i=0; i<GUEST_INTERRUPT_MAX+1; i++)
h->summary.guest_interrupt[i].count=0;
struct cr3_value_struct *cr3_value_head;
struct eip_list_struct *emulate_eip_list;
struct eip_list_struct *interrupt_eip_list;
-
+
int guest_interrupt[GUEST_INTERRUPT_MAX+1];
struct hvm_short_summary_struct hvm_short;
struct {
printf(" +-%-7s: %10lld\n",
hvm_vol_name[k], vol->hvm[k]);
}
-
+
break;
}
}
I++; J--;
}
} while (I <= J); /* Keep going until our pointers meet or pass */
-
+
/* Re-adjust L and R, based on which element we're looking for */
if(J<K)
L=I;
} while (I <= J); /* Keep going until our pointers meet or pass */
/* Re-adjust L and R, based on which element we're looking for */
- if(J_weight<K_weight)
+ if(J_weight<K_weight)
L=I; L_weight = I_weight;
- if(K_weight<I_weight)
+ if(K_weight<I_weight)
R=J; R_weight = J_weight;
}
static inline void print_cpu_affinity(struct cycle_summary *s, char *p) {
if(s->count) {
long long avg;
-
+
avg = s->cycles / s->count;
if ( opt.sample_size ) {
avg, p5, p50, p95);
} else {
printf("%s: %7d %5.2lfs %5.2lf%% %6lld\n",
- p, s->count,
+ p, s->count,
seconds,
percent,
avg);
long long avg;
avg = s->cycles / s->count;
-
+
if ( opt.sample_size ) {
long long p5, p50, p95;
int data_size = s->count;
struct interval_list *p;
fprintf(warn, "%s: Adding element '%s'\n", __func__, desc);
-
+
if((p=malloc(sizeof(*p)))==NULL) {
- fprintf(stderr, "malloc() failed.\n");
+ fprintf(stderr, "malloc() failed.\n");
error(ERR_SYSTEM, NULL);
}
__func__, cr3->gmfn);
P.interval.array.values[i] = &cr3->total_time.interval;
- }
+ }
}
} else if(opt.interval.mode == INTERVAL_MODE_LIST) {
char desc[32];
struct cr3_value_struct *p = P.interval.cr3;
interval_time_output();
-
+
hss_array = p->hvm.s;
printf(" %.02lf",
__cycles_percent(p->total_time.interval.cycles,
opt.interval.cycles));
- for(i=0; i<HVM_SHORT_SUMMARY_MAX; i++)
+ for(i=0; i<HVM_SHORT_SUMMARY_MAX; i++)
__interval_cycle_percent_output(&hss_array[i].interval,
p->total_time.interval.cycles);
}
P.interval.array.values[i] = &d->total_time.interval;
- }
+ }
}
} else if(opt.interval.mode == INTERVAL_MODE_LIST) {
char desc[32];
interval_cycle_percent_output(&d->total_time.interval);
- for(i=0; i<HVM_SHORT_SUMMARY_MAX; i++)
+ for(i=0; i<HVM_SHORT_SUMMARY_MAX; i++)
interval_cycle_percent_output(&d->hvm_short.s[i].interval);
printf("\n");
printf("\n");
}
-
+
}
void interval_domain_grant_maps_output(void) {
printf("\n");
}
}
-
+
/* General interval gateways */
void interval_callback(void) {
void update_eip(struct eip_list_struct **head, unsigned long long eip,
unsigned long long cycles, int type, void * extra) {
struct eip_list_struct *p, **last=head;
-
+
for(p=*head; p; last = (&p->next), p=p->next)
if(p->eip >= eip)
break;
-
+
if(!p || p->eip != eip) {
p=malloc(sizeof(*p));
if(!p) {
#endif
printf(" Total samples: %d\n", total);
-
+
for(i=0; i<N; i++) {
p = qsort_array[i];
if ( p->summary.cycles )
((double)p->summary.count*100)/total);
}
-
+
if(eip_list_type[p->type].dump) {
eip_list_type[p->type].dump(p);
}
void hvm_update_short_summary(struct hvm_data *h, int element) {
struct vcpu_data *v = h->v;
- if(v->cr3.data)
+ if(v->cr3.data)
update_cycles(&v->cr3.data->hvm.s[element], h->arc_cycles);
update_cycles(&v->d->hvm_short.s[element], h->arc_cycles);
e->corresponding_va = CORR_VA_INVALID;
e->pt_level = 0;
-
+
/* Detect accesses to Windows linear pagetables */
switch(guest_paging_levels)
{
| e->pt_index[1]<<22
| e->pt_index[0]<<12;
}
- }
+ }
break;
case 3:
if(e->pt_index[3]==3 && (e->pt_index[2]>>2==0))
| e->pt_index[1]<<21
| e->pt_index[2]<<30;
}
- }
+ }
break;
case 4:
if(e->pt_index[4] == 0x1ed)
}
}
else
- {
+ {
e->pt_level = 2;
e->corresponding_va = ((1ULL<<21)-1)
| (unsigned long long)e->pt_index[0]<<21
| (unsigned long long)e->pt_index[2]<<30
| (unsigned long long)e->pt_index[3]<<39;
}
-
+
if(HIGH_BIT(e->corresponding_va))
e->corresponding_va |= SIGN_EXTENDED_BITS;
}
h->v->guest_paging_levels);
hvm_pf_xen_preprocess(ri->event, h);
-
+
if(opt.dump_all)
{
if(e->pf_case == PF_XEN_EMULATE)
void hvm_vlapic_vmentry_cleanup(struct vcpu_data *v, tsc_t tsc)
{
int i;
-
+
struct vlapic_struct *vla = &v->vlapic;
for(i=0; i<MAX_VLAPIC_LIST; i++)
return;
}
- if(!o->first_tsc)
+ if(!o->first_tsc)
o->first_tsc = P.now;
if(opt.dump_all && o->count == 0 && o->injected)
if((opt.dump_all)
#if 0
&& (ov->runstate.state != RUNSTATE_RUNNING
- || ov->hvm.vmexit_valid)
+ || ov->hvm.vmexit_valid)
#endif
)
printf(" [vla] d%dv%d vec %d state %s (outstanding ipis %d)\n",
ri->dump_header,
r->vector, r->fake?"fake":"real");
}
-
+
if(opt.summary_info)
{
int vector = r->vector;
h->w2h.vector = vector;
h->summary.guest_interrupt[vector].is_wake = 1;
}
-
+
if( h->summary.guest_interrupt[vector].start_tsc == 0 ) {
/* Note that we want start_tsc set at the next vmentry */
h->summary.guest_interrupt[vector].start_tsc = 1;
/* Keep list in order */
for(p=*list; p && (p->pa != pa) && (p->pa < pa); q=p, p=p->next);
-
+
/* If we didn't find it, make a new element. */
if(!p || (p->pa != pa)) {
if((p=malloc(sizeof(*p)))==NULL) {
fprintf(stderr, "malloc() failed.\n");
error(ERR_SYSTEM, NULL);
}
-
+
bzero(p, sizeof(*p));
p->pa=pa;
p->va=va;
-
+
/* If we stopped in the middle or at the end, add it in */
if(q) {
p->next=q->next;
if(gmfn) {
struct cr3_value_struct *p, **last=&v->d->cr3_value_head;
-
+
/* Always add to tail, so that we get consistent interval
ouptut as the number of cr3s grow */
for(p=*last; p; last = (&p->next), p=p->next)
else
P.cr3.head = p;
P.cr3.tail = &p->gnext;
-
+
P.cr3.id++;
/* Add to the interval list if appropriate */
struct cr3_value_struct *cr3;
/* Look for it in the list */
- for(cr3 = v->d->cr3_value_head; cr3; cr3=cr3->next)
+ for(cr3 = v->d->cr3_value_head; cr3; cr3=cr3->next)
if(cr3->gmfn == gmfn)
break;
if(!cr3)
return;
- if(cr3->prealloc_unpin.now)
+ if(cr3->prealloc_unpin.now)
fprintf(warn, "Strange, gmfn %llx multiple unpins w/o access!\n",
gmfn);
cr3->prealloc_unpin.now = 1;
cr3->prealloc_unpin.count++;
-
+
if(opt.dump_all)
printf(" cr3 %llx unpinned %d times\n",
gmfn, cr3->prealloc_unpin.count);
abs_cycles_to_time(p->first_time, &first);
abs_cycles_to_time(p->last_time, &last);
-
+
snprintf(desc, 30, " %8llx (id %d)", p->gmfn, p->cr3_id);
print_cycle_summary(&p->total_time, desc);
snprintf(desc, 30, " guest");
int flush=0;
if(v->cr3.val) {
- oval = v->cr3.val;
+ oval = v->cr3.val;
if(new_val == oval) {
if(v->cr3.data) {
#define case_cr(_x) \
case (_x): \
hvm_set_summary_handler(h, hvm_cr_write_summary, (void *)(_x)); \
- break
+ break
case_cr(0);
case_cr(1);
case_cr(2);
{
if(cr == 3 && h->v->cr3.val) {
printf("]%s cr_write cr3 val %llx oval %llx %s\n",
- ri->dump_header,
+ ri->dump_header,
val,
h->v->cr3.val,
(h->v->cr3.val == val)?"flush":"switch");
} else {
printf(" %s cr_write cr%d val %llx\n",
- ri->dump_header,
+ ri->dump_header,
cr, val);
}
ri->t.s, ri->t.ns, pcpu_string(ri->cpu),
r->vec, r->ec);
}
-
+
}
void hvm_intr_summary(struct hvm_data *h, void *d)
/* Want absolute tsc to global tsc */
abs_cycles_to_time(h->exit_tsc, &t);
- printf("d%dv%d %u.%09u %lld\n",
+ printf("d%dv%d %u.%09u %lld\n",
h->v->d->did, h->v->vid,
- t.s, t.ns,
+ t.s, t.ns,
rip);
}
/* Truncate to 40 bits */
unsigned long long rip = h->rip & ((1ULL << ADDR_SPACE_BITS)-1);
unsigned index = rip / opt.histogram_interrupt_increment;
-
+
h->summary.extint_histogram[index]++;
}
fprintf(warn, "%s: Strange, h->postprocess set!\n",
__func__);
h->inflight.generic.event = ri->event;
- bcopy(h->d, h->inflight.generic.d, sizeof(unsigned int) * 4);
+ bcopy(h->d, h->inflight.generic.d, sizeof(unsigned int) * 4);
}
void hvm_generic_postprocess(struct hvm_data *h)
ri->dump_header,
evt_string,
is_64?"64":"");
-
+
for(i=0; i<ri->extra_words; i++) {
printf(" %x", ri->d[i]);
}
return;
-needs_vmexit:
+needs_vmexit:
/* Wait for the next vmexit */
if(!h->vmexit_valid)
{
int runstates[RUNSTATE_MAX];
int ret=-1;
int max_vcpus = 0;
-
+
if(d->did == DEFAULT_DOMAIN)
return 0;
fprintf(warn, " %s: %d\n",
runstate_name[i], runstates[i]);
}
-
+
if(ret >= 0)
return ret;
t.s, t.ns,
runstate_graph[new_runstate]);
}
-
+
if(v->runstate.tsc > 0 && v->runstate.tsc < tsc) {
update_cycles(v->runstates + v->runstate.state, tsc - v->runstate.tsc);
if ( opt.scatterplot_runstate_time )
{
struct time_struct t, dt;
-
+
abs_cycles_to_time(tsc, &t);
cycles_to_time(tsc - v->runstate.tsc, &dt);
-
+
printf("%dv%d %u.%09u %u.%09u\n",
d->did, v->vid,
t.s, t.ns,
v->runstate.tsc = tsc;
/* Determine the domain runstate */
- if(d->runstate_tsc > 0 && d->runstate_tsc < tsc)
+ if(d->runstate_tsc > 0 && d->runstate_tsc < tsc)
update_cycles(d->runstates + d->runstate, tsc - d->runstate_tsc);
d->runstate = domain_runstate(d);
if(opt.dump_all) {
if ( h->exit_reason < h->exit_reason_max
- && h->exit_reason_name[h->exit_reason] != NULL)
+ && h->exit_reason_name[h->exit_reason] != NULL)
printf("]%s vmexit exit_reason %s eip %llx%s\n",
ri->dump_header,
h->exit_reason_name[h->exit_reason],
break;
}
}
-
+
if(h->v->cr3.data) {
h->v->cr3.data->run_time += h->arc_cycles;
- if(opt.summary_info)
+ if(opt.summary_info)
update_cycles(&h->v->cr3.data->hv_time,
h->arc_cycles);
}
return;
}
- /* Vista bug
+ /* Vista bug
* This has to be done here because irqs are injected on the path out
* to vmexit. */
hvm_vlapic_vmentry_cleanup(h->v, ri->tsc);
- if(h->w2h.waking && opt.dump_all)
+ if(h->w2h.waking && opt.dump_all)
printf(" [w2h] d%dv%d Finishing waking\n",
h->v->d->did, h->v->vid);
}
/* ---- Shadow records ---- */
-union shadow_event
+union shadow_event
{
unsigned event;
struct {
flagstring[i]='-';
i=0;
-
+
if(e->flag_set_ad)
flagstring[i]='d';
else if(e->flag_set_a)
unsigned flags:29, emulation_count:3;
} gpl4;
} *r = (typeof(r))ri->d;
-
+
union shadow_event sevt = { .event = ri->event };
int rec_gpl = sevt.paging_levels + 2;
fprintf(warn, "%s: expected %zd bytes for %d-level guest, got %d!\n",
__func__, sizeof(r->gpl2), h->v->guest_paging_levels,
ri->extra_words * 4);
-
+
error(ERR_RECORD, ri);
return;
}
void shadow_unsync_postprocess(struct hvm_data *h)
{
struct pf_xen_extra *e = &h->inflight.pf_xen;
-
+
if(h->resyncs > 1)
fprintf(warn, "Strange, %d resyncs for an unsync!\n",
h->resyncs);
}
- if(!(e->flag_promote || h->prealloc_unpin || e->flag_unsync))
+ if(!(e->flag_promote || h->prealloc_unpin || e->flag_unsync))
update_summary(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_UPDATE_ONLY], h->arc_cycles);
/* more summary info */
dump_header,
evt_string,
sevt.paging_levels);
-
- for(i=0; i<4; i++)
+
+ for(i=0; i<4; i++)
{
printf(" %x", d[i]);
}
/* pf-case traces, vs others */
h->inflight.generic.event = ri->event;
- bcopy(ri->d, h->inflight.generic.d, sizeof(unsigned int) * 4);
+ bcopy(ri->d, h->inflight.generic.d, sizeof(unsigned int) * 4);
if(opt.dump_all)
shadow_fault_generic_dump(h->inflight.generic.event,
return;
h = &p->current->hvm;
-
+
if(!h->init || !h->vmexit_valid)
{
if(opt.dump_all)
}
if(opt.summary_info) {
- if(eax < PV_HYPERCALL_MAX)
+ if(eax < PV_HYPERCALL_MAX)
pv->hypercall_count[eax]++;
}
}
if(opt.summary_info) {
- if(trapnr < PV_TRAP_MAX)
+ if(trapnr < PV_TRAP_MAX)
pv->trap_count[trapnr]++;
}
struct {
unsigned long long pte, addr, eip;
} e;
-
+
switch ( pevt.minor ) {
case PV_PTWR_EMULATION_PAE:
if ( pevt.x64 )
printf("%x ", (unsigned)ri->d[i]);
}
printf("]");
-
+
}
printf("\n");
}
};
static const char *sched_op_str[] = {
- "yield", "block", "shutdown", "poll", "remote_shutdown", "shutdown_code",
+ "yield", "block", "shutdown", "poll", "remote_shutdown", "shutdown_code",
"watchdog",
};
for (i = 0, word = 1; i < 6 && word < ri->extra_words; i++) {
int present = pv_hypercall_arg_present(ri, i);
-
+
switch (present) {
case ARG_32BIT:
args[i] = ri->d[word];
int op = pv_hypercall_op(ri);
if(opt.summary_info) {
- if(op < PV_HYPERCALL_MAX)
+ if(op < PV_HYPERCALL_MAX)
pv->hypercall_count[op]++;
}
struct record_info *ri = &p->ri;
struct vcpu_data *v = p->current;
struct pv_data *pv = &v->pv;
-
+
union pv_event pevt = { .event = ri->event };
if(vcpu_set_data_type(p->current, VCPU_DATA_PV))
return;
-
+
if(opt.summary_info) {
pv->summary_info=1;
struct domain_data * domain_create(int did)
{
struct domain_data *d;
-
+
fprintf(warn, "Creating domain %d\n", did);
if((d=malloc(sizeof(*d)))==NULL)
/* Make a new domain */
n = domain_create(did);
-
+
/* Insert it into the list */
n->next = d;
*q = n;
prev->runstate.tsc = 0;
goto set;
}
-
+
if(prev->runstate.state != RUNSTATE_RUNNING)
{
fprintf(warn, "Strange, prev d%dv%d not running!\n",
else
{
fprintf(warn, "%s: FATAL: p->current not NULL! (d%dv%d, runstate %s)\n",
- __func__,
+ __func__,
p->current->d->did,
p->current->vid,
runstate_name[p->current->runstate.state]);
{
update_cycles(&next->cpu_affinity_all, tsc - next->pcpu_tsc);
update_cycles(&next->cpu_affinity_pcpu[p->pid], tsc - next->pcpu_tsc);
- }
+ }
next->pcpu_tsc = tsc;
- }
+ }
}
else
{
/* Change default domain to 'queued' */
runstate_update(p->current, RUNSTATE_QUEUED, p->first_tsc);
-
+
/* FIXME: Copy over data from the default domain this interval */
fprintf(warn, "Using first_tsc for d%dv%d (%lld cycles)\n",
v->d->did, v->vid, p->last_tsc - p->first_tsc);
runstate_name[sevt.new_runstate]);
}
}
-
+
if(r->vcpu > MAX_CPUS)
{
fprintf(warn, "%s: vcpu %u > MAX_VCPUS %d!\n",
&& v->hvm.vmexit_valid) {
hvm_close_vmexit(&v->hvm, ri->tsc);
}
-
+
/* Track waking state */
if ( v->data_type == VCPU_DATA_HVM && v->runstate.state != RUNSTATE_LOST ) {
if ( sevt.new_runstate == RUNSTATE_RUNNABLE
i,
g->is_wake,
t);
-
+
if(opt.scatterplot_wake_to_halt
&& t
&& g->is_wake)
scatterplot_vs_time(ri->tsc, t);
-
+
if(opt.summary && t) {
if(g->is_wake) {
if(v->hvm.w2h.interrupts==1)
&& last_oldstate.actual == sevt.old_runstate) {
tsc_t lag, old_offset;
struct pcpu_info *p2;
-
+
if(ri->tsc < last_oldstate.tsc) {
fprintf(warn, "WARNING: new tsc %lld < detected runstate tsc %lld! Not updating\n",
ri->tsc, last_oldstate.tsc);
goto no_update;
}
-
+
p2 = P.pcpu + last_oldstate.pid;
-
+
lag = ri->tsc
- last_oldstate.tsc;
-
+
old_offset = p2->tsc_skew.offset;
-
+
cpumask_union(&p2->tsc_skew.downstream, &p->tsc_skew.downstream);
cpumask_set(&p2->tsc_skew.downstream, p->pid);
-
+
if(cpumask_isset(&p2->tsc_skew.downstream, p2->pid)) {
if ( opt.tsc_loop_fatal )
{
goto no_update;
}
}
-
+
p2->tsc_skew.offset += lag * 2;
-
+
fprintf(warn, "TSC skew detected p%d->p%d, %lld cycles. Changing p%d offset from %lld to %lld\n",
p->pid, p2->pid, lag,
p2->pid,
old_offset,
p2->tsc_skew.offset);
-
+
goto no_update;
} else {
fprintf(warn, "runstate_change old_runstate %s, d%dv%d runstate %s. Possible tsc skew.\n",
runstate_name[sevt.old_runstate],
v->d->did, v->vid,
runstate_name[v->runstate.state]);
-
+
v->runstate.last_oldstate.wrong = sevt.old_runstate;
v->runstate.last_oldstate.actual = v->runstate.state;
v->runstate.last_oldstate.tsc = ri->tsc;
fprintf(stderr, "FATAL: Logic hole in %s\n", __func__);
error(ERR_ASSERT, NULL);
}
-
+
update:
/* Actually update the runstate. Special things to do if we're starting
* or stopping actually running on a physical cpu. */
v->p ? v->p->pid : -1);
error(ERR_FILE, NULL);
}
-
+
runstate_update(v, RUNSTATE_RUNNING, ri->tsc);
}
}
if(perfctrs && v->runstate.tsc) {
unsigned long long run_cycles, run_instr;
double cpi;
-
+
//run_cycles = r->p1 - v->runstate_p1_start;
run_cycles = ri->tsc - v->runstate.tsc;
- run_instr = r->p2 - v->runstate.p2_start;
+ run_instr = r->p2 - v->runstate.p2_start;
cpi = ((double)run_cycles) / run_instr;
printf(" cpi: %2.2lf ( %lld / %lld )\n",
cpi, run_cycles, run_instr);
}
-
+
if(opt.scatterplot_cpi && v->d->did == 1)
printf("%lld,%2.2lf\n",
ri->tsc, cpi);
- if(opt.summary_info)
+ if(opt.summary_info)
update_cpi(&v->cpi, run_instr, run_cycles);
}
#endif
} else {
vcpu_prev_update(v->p, v, ri->tsc, sevt.new_runstate);
}
-
+
if(P.lost_cpus && v->d->did != IDLE_DOMAIN) {
if(opt.dump_all)
fprintf(warn, "%s: %d lost cpus, setting d%dv%d runstate to RUNSTATE_LOST\n",
printf(" Runstates:\n");
for(i=0; i<RUNSTATE_MAX; i++) {
- snprintf(desc,30, " %8s", runstate_name[i]);
+ snprintf(desc,30, " %8s", runstate_name[i]);
print_cycle_summary(v->runstates+i, desc);
if ( i==RUNSTATE_RUNNABLE )
{
for(j=0; j<RUNNABLE_STATE_MAX; j++) {
if ( j == RUNNABLE_STATE_INVALID )
continue;
- snprintf(desc,30, " %8s", runnable_state_name[j]);
+ snprintf(desc,30, " %8s", runnable_state_name[j]);
print_cycle_summary(v->runnable_states+j, desc);
}
}
printf(" Runstates:\n");
for(i=0; i<DOMAIN_RUNSTATE_MAX; i++) {
- snprintf(desc,30, " %8s", domain_runstate_name[i]);
+ snprintf(desc,30, " %8s", domain_runstate_name[i]);
print_cycle_summary(d->runstates+i, desc);
}
}
} else {
order /= 9;
}
- return order;
+ return order;
}
void mem_pod_zero_reclaim_process(struct pcpu_info *p)
order = p2m_canonical_order(r->order);
d->pod.populate_order[order]++;
- }
+ }
}
}
printf(" %s set_p2m_entry d%d o%d t %d g %llx m %llx\n",
ri->dump_header,
r->d, r->order,
- r->p2mt,
+ r->p2mt,
(unsigned long long)r->gfn, (unsigned long long)r->mfn);
}
}
}
break;
}
-
+
}
/* ---- PM ---- */
}
break;
}
-
+
}
/*
n->bus=bus;
n->devfn=devfn;
-
+
/* Insert it into the list */
n->next = d;
*q = n;
if ( opt.scatterplot_irq )
{
struct time_struct t;
-
+
abs_cycles_to_time(ri->tsc, &t);
-
+
printf("i%x %u.%09u %d\n",
(unsigned)r->irq,
t.s, t.ns,
if( irq_table[r->irq].dev )
{
struct pci_dev * pdev=irq_table[r->irq].dev;
-
+
if(pdev->vector_used[r->vec])
fprintf(warn, " Vector collision on %02x.%02x!\n",
pdev->bus, pdev->devfn);
ri->dump_header,
r->irq, r->vec, r->cpu);
}
- if ( r->irq < MAX_IRQ
+ if ( r->irq < MAX_IRQ
&& r->vec < MAX_VECTOR )
{
if ( irq_table[r->irq].type == IRQ_MSI )
if ( irq_table[r->irq].dev )
{
struct pci_dev * pdev=irq_table[r->irq].dev;
-
+
if(!pdev->vector_used[r->vec])
fprintf(warn," Strange, cleanup on non-used vector\n");
pdev->vector_used[r->vec]=0;
ri->event,
ri->evt.main,
ri->evt.sub,
- ri->evt.minor,
+ ri->evt.minor,
ri->extra_words);
for(i=0; i<ri->extra_words; i++) {
else
printf(" ");
}
-
+
printf(" ] | ");
for (i=0; i<8; i++) {
{
static int off_by_one = 0;
int expected_extra = expected_size / sizeof(unsigned int);
-
+
if(ri->extra_words != expected_extra
&& !(off_by_one && ri->extra_words == expected_extra + 1) )
{
error(ERR_RECORD, ri);
return 1;
}
- }
+ }
return 0;
}
#endif
if ( p->current ) {
-
+
hvm_vlapic_clear(&p->current->vlapic);
if(p->current->data_type == VCPU_DATA_HVM) {
p->current->hvm.vmexit_valid=0;
p->lost_record.active = 1;
p->lost_record.tsc = first_tsc;
pcpu_string_draw(p);
-
+
{
/* Any vcpu which is not actively running may be scheduled on the
* lost cpu. To avoid mis-accounting, we need to reset */
printf(" %s lost_records end (domain invalid)---\n",
pcpu_string(p->pid));
}
-
-
+
+
p->lost_record.active = 0;
pcpu_string_draw(p);
P.lost_cpus--;
ssize_t r;
struct trace_record rec;
struct cpu_change_data *cd;
-
+
r=__read_record(&rec, offset);
if(r==0)
/* FIXME: Figure out if we could handle this more gracefully */
error(ERR_ASSERT, NULL);
}
-
+
if(cd->cpu > P.max_active_pcpu || !P.pcpu[cd->cpu].active) {
struct pcpu_info *p = P.pcpu + cd->cpu;
}
}
-/*
+/*
* Conceptually, when we reach a cpu_change record that's not for our pcpu,
* we want to scan forward through the file until we reach one that's for us.
* However, looping through involves reading the file, which we'd rather
fprintf(warn, "%s: Setting max_active_pcpu to %d\n",
__func__, max_active_pcpu);
}
-
+
}
/* Helper function to process tsc-related record info */
c+=r;
len-=r;
}
-
+
r = snprintf(c, len, " %s", pcpu_string(ri->cpu));
c+=r;
len-=r;
-
+
if ( p->current )
{
r = snprintf(c, len, " d%dv%d", p->current->d->did, p->current->vid);
return toplevel;
}
-
+
void process_cpu_change(struct pcpu_info *p) {
struct record_info *ri = &p->ri;
fprintf(warn, "%s: Activating pcpu %d at offset %lld\n",
__func__, r->cpu, (unsigned long long)p->file_offset);
-
+
record_order_insert(p2);
sched_default_vcpu_activate(p2);
__func__, p->pid, (unsigned long long)P.last_epoch_offset);
deactivate_pcpu(p);
}
- }
+ }
else
{
/* Track information about dom0 scheduling and records */
p->volume.buffer_dom0_runstate_tsc;
p->volume.buffer_dom0_runstate_cycles[p->volume.buffer_dom0_runstate]
+= ri->tsc - cycles;
-
+
printf(" - updated p%d dom0_runstate %s to %lld cycles (+%lld)\n",
p->pid, runstate_name[p->volume.buffer_dom0_runstate],
p->volume.buffer_dom0_runstate_cycles[p->volume.buffer_dom0_runstate],
process_record_tsc(p->order_tsc, ri);
- if(opt.dump_all)
+ if(opt.dump_all)
create_dump_header(ri, p);
static inline ssize_t get_rec_size(struct trace_record *rec) {
ssize_t s;
-
+
s = sizeof(uint32_t);
-
+
if(rec->cycle_flag)
s += sizeof(tsc_t);
if(!(pid = fork())) {
progress_child_exec();
-
+
fprintf(stderr, "%s: exec failed (%s), disabling progress bar\n",
__func__, strerror(errno));
opt.progress = 0;
* - Updating the current one
*
* FIXME: Need to deal with pcpu states changing...
- *
+ *
* WARNING not thread-safe
*/
{
char *s = __pcpu_string;
static int max_active_pcpu=-1, last_pcpu=-1;
-
+
assert(P.max_active_pcpu < MAX_CPUS);
assert(pcpu <= P.max_active_pcpu);
if(P.max_active_pcpu > max_active_pcpu)
{
int i;
- for(i=max_active_pcpu + 1; i<= P.max_active_pcpu; i++)
+ for(i=max_active_pcpu + 1; i<= P.max_active_pcpu; i++)
pcpu_string_draw(P.pcpu+i);
max_active_pcpu=P.max_active_pcpu;
}
assert(record_order[i]);
/* And move everyone forward */
- for(; (record_order[i]=record_order[i+1]); i++)
+ for(; (record_order[i]=record_order[i+1]); i++)
;
}
if(opt.show_default_domain_summary) {
d = &default_domain;
printf("|-- Default domain --|\n");
-
+
for( i = 0; i < MAX_CPUS ; i++ )
{
if(d->vcpu[i])
if(!p->summary)
continue;
printf("pcpu %d\n", i);
-
+
print_cycle_summary(&p->time.running, " running");
print_cycle_summary(&p->time.idle, " idle");
print_cycle_summary(&p->time.lost, " lost");
active++;
}
printf("Total active cpus: %d\n", active);
-
+
}
void init_pcpus(void) {
void parse_cpu_hz(char * arg) {
float hz_base;
char * next_ptr;
-
+
hz_base=strtof(arg, &next_ptr);
if(next_ptr == arg) {
fprintf(stderr, "Invalid cpu_hz %s\n", arg);
opt.with_cr3_enumeration = 1;
G.output_defined = 1;
break;
-
+
case OPT_INTERVAL_CR3_SCHEDULE_ORDERED:
opt.interval.output = INTERVAL_CR3_SCHEDULE_ORDERED;
opt.interval.check = INTERVAL_CHECK_CR3;
opt.summary_info = 1;
G.output_defined = 1;
break;
-
+
case OPT_INTERVAL_DOMAIN_SHORT_SUMMARY:
{
if((parse_array(arg, &opt.interval.array) < 0)
char *inval;
opt.default_guest_paging_levels = (int)strtol(arg, &inval, 0);
if ( inval == arg )
- argp_usage(state);
+ argp_usage(state);
}
break;
case OPT_SYMBOL_FILE:
.key = OPT_SUMMARY,
.group = OPT_GROUP_SUMMARY,
.doc = "Output a summary", },
-
+
{ .name = "report-pcpu",
.key = OPT_REPORT_PCPU,
.group = OPT_GROUP_SUMMARY,
.doc = "Report utilization for pcpus", },
-
+
/* Guest info */
{ .name = "default-guest-paging-levels",
.key = OPT_DEFAULT_GUEST_PAGING_LEVELS,
.group = OPT_GROUP_GUEST,
.arg = "L",
.doc = "Default guest paging levels. Mainly necessary for Rio, as Miami traces include guest paging levels where appropriate.", },
-
+
{ .name = "symbol-file",
.key = OPT_SYMBOL_FILE,
.group = OPT_GROUP_GUEST,
if(opt.dump_all)
warn = stdout;
-
+
init_pcpus();
if(opt.progress)
if(opt.progress)
progress_finish();
-
+
return 0;
}
/*