PF_XEN_MAX,
};
+#define SHADOW_WRMAP_BF 12
#define SHADOW_PREALLOC_UNPIN 13
-#define SHADOW_RESYNC_FULL 24
-#define SHADOW_RESYNC_ONLY 25
+#define SHADOW_RESYNC_FULL 14
+#define SHADOW_RESYNC_ONLY 15
char * pf_xen_name[PF_XEN_MAX] = {
[PF_XEN_NOT_SHADOW]="propagate",
unsigned long long rip;
unsigned exit_reason, event_handler;
char dump_header[256];
- int short_summary_done:1, prealloc_unpin:1;
+ int short_summary_done:1, prealloc_unpin:1, wrmap_bf:1;
/* Immediate processing */
void *d;
unsigned long addr;
} *r = (typeof(r))h->d;
- if(ri->extra_words != (sizeof(*r)/sizeof(unsigned long)))
+ if(ri->extra_words != (sizeof(*r)/sizeof(unsigned long) + 1))
{
fprintf(warn, "FATAL: msr_write extra_words %d, expected %d!\n",
ri->extra_words, sizeof(*r)/sizeof(unsigned long));
unsigned long addr;
} *r = (typeof(r))h->d;
- if(ri->extra_words != (sizeof(*r)/sizeof(unsigned long)))
+ if(ri->extra_words != (sizeof(*r)/sizeof(unsigned long) + 1))
{
fprintf(warn, "FATAL: msr_read extra_words %d, expected %d!\n",
ri->extra_words, sizeof(*r)/sizeof(unsigned long));
struct {
union {
struct {
- unsigned long long rip;
unsigned long exit_reason;
+ unsigned long long rip;
} x64;
+#if 0
struct {
unsigned long eip;
unsigned long exit_reason;
} x32;
+#endif
};
} *r;
- if(ri->extra_words != 2 && ri->extra_words != 3)
+ if(ri->extra_words != 4)
{
fprintf(warn, "FATAL: vmexit has unexpected extra words %d!\n",
ri->extra_words);
h->vmexit_valid=1;
if(ri->event == TRC_HVM_VMEXIT64) {
+#if 0
if(v->guest_paging_levels != 4)
{
fprintf(warn, "%s: VMEXIT64, but guest_paging_levels %d. Switching to 4.\n",
__func__, v->guest_paging_levels);
v->guest_paging_levels = 4;
}
+#endif
if(!is_valid_addr64(r->x64.rip))
fprintf(warn, "%s: invalid va %llx",
__func__, r->x64.rip);
h->rip = r->x64.rip;
h->exit_reason = r->x64.exit_reason;
} else {
+#if 0
if(v->guest_paging_levels == 4)
{
int new_paging_levels = opt.default_guest_paging_levels;
}
h->rip = r->x32.eip;
h->exit_reason = r->x32.exit_reason;
+#endif
+ fprintf(stderr, "FATAL: Expected 64-bit-only traces from -unstable!\n");
+ exit(1);
}
if(h->exit_reason > h->exit_reason_max)
h->entry_tsc = 0;
h->resyncs = 0;
h->prealloc_unpin = 0;
+ h->wrmap_bf = 0;
h->short_summary_done = 0;
if(!opt.svm_mode && h->exit_reason == EXIT_REASON_EXCEPTION_NMI)
update_summary(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_OOS_ADD], h->arc_cycles);
if(e->flag_oos_fixup_evict)
update_summary(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_OOS_EVICT], h->arc_cycles);
- if(e->flag_promote) {
+ if(e->flag_promote)
update_summary(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_PROMOTE], h->arc_cycles);
- if(e->flag_wrmap) {
- update_summary(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_WRMAP], h->arc_cycles);
- if(e->flag_wrmap_brute_force)
- update_summary(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_BRUTE_FORCE], h->arc_cycles);
- } else if(e->flag_wrmap_brute_force) {
- fprintf(warn, "Strange: wrmap_bf but not wrmap!\n");
-
- }
+ if(e->flag_wrmap) {
+ update_summary(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_WRMAP], h->arc_cycles);
+ if(e->flag_wrmap_brute_force || h->wrmap_bf)
+ update_summary(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_BRUTE_FORCE], h->arc_cycles);
+ } else if(e->flag_wrmap_brute_force || h->wrmap_bf) {
+ fprintf(warn, "Strange: wrmap_bf but not wrmap!\n");
+ }
- } else if(e->flag_wrmap)
- fprintf(warn, "Strange, wrmap but not promote!\n");
if(!(e->flag_promote || h->prealloc_unpin || e->flag_unsync))
update_summary(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_UPDATE_ONLY], h->arc_cycles);
union shadow_event sevt = { .event = event };
int i;
- if(sevt.minor < PF_XEN_MAX)
+ if(sevt.minor < PF_XEN_MAX && pf_xen_name[sevt.minor])
{
evt_string = pf_xen_name[sevt.minor];
}
cr3_prealloc_unpin(h->v, r->gfn);
}
+void shadow_wrmap_bf_process(struct record_info *ri, struct hvm_data *h) {
+ struct {
+ unsigned long long gfn;
+ } *r = (typeof(r))ri->d;
+
+ if(opt.dump_all || opt.dump_cooked)
+ printf(" %s wrmap-bf gfn %llx\n",
+ ri->dump_header, r->gfn);
+
+ h->wrmap_bf = 1;
+}
+
void shadow_process(struct pcpu_info *p)
{
struct record_info *ri = &p->ri;
case SHADOW_PREALLOC_UNPIN:
shadow_prealloc_unpin_process(ri, h);
break;
+ case SHADOW_WRMAP_BF:
+ shadow_wrmap_bf_process(ri, h);
+ break;
default:
if(sevt.minor <= PF_XEN_LAST_FAULT) {
shadow_fault_generic_process(ri, h);
case TRC_LOST_RECORDS_END:
process_lost_records_end(p);
break;
- case TRC_TRACE_VIRQ:
- if(opt.dump_all || opt.dump_cooked) {
- printf(" %s trace_virq\n", ri->dump_header);
- }
- P.buffer_trace_virq_tsc = ri->tsc;
- break;
default:
process_generic(ri);
}
.doc = "",
};
-const char *argp_program_version = "xenalyze - XenServer Trunk";
+const char *argp_program_version = "xenalyze - Open-source xen-unstable (3.4)";
const char *argp_program_bug_address = "George Dunlap <george.dunlap@eu.citrix.com>";
#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
+#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */
+#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */
+
/* Trace events per class */
#define TRC_LOST_RECORDS (TRC_GEN + 1)
#define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2)
#define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3)
-#define TRC_TRACE_VIRQ (TRC_GEN + 4)
-
-#define TRC_SCHED_MIN 0x0081000
-
-#define TRC_SCHED_DOM_ADD (TRC_SCHED + 1)
-#define TRC_SCHED_DOM_REM (TRC_SCHED + 2)
-#define TRC_SCHED_SLEEP (TRC_SCHED + 3)
-#define TRC_SCHED_WAKE (TRC_SCHED + 4)
-#define TRC_SCHED_YIELD (TRC_SCHED + 5)
-#define TRC_SCHED_BLOCK (TRC_SCHED + 6)
-#define TRC_SCHED_SHUTDOWN (TRC_SCHED + 7)
-#define TRC_SCHED_CTL (TRC_SCHED + 8)
-#define TRC_SCHED_ADJDOM (TRC_SCHED + 9)
-#define TRC_SCHED_SWITCH (TRC_SCHED + 10)
-#define TRC_SCHED_S_TIMER_FN (TRC_SCHED + 11)
-#define TRC_SCHED_T_TIMER_FN (TRC_SCHED + 12)
-#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED + 13)
-#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED + 14)
-#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED + 15)
-#define TRC_SCHED_SHUTDOWN_CODE (TRC_SCHED + 16)
+
+#define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1)
+#define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1)
+#define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2)
+#define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3)
+#define TRC_SCHED_WAKE (TRC_SCHED_VERBOSE + 4)
+#define TRC_SCHED_YIELD (TRC_SCHED_VERBOSE + 5)
+#define TRC_SCHED_BLOCK (TRC_SCHED_VERBOSE + 6)
+#define TRC_SCHED_SHUTDOWN (TRC_SCHED_VERBOSE + 7)
+#define TRC_SCHED_CTL (TRC_SCHED_VERBOSE + 8)
+#define TRC_SCHED_ADJDOM (TRC_SCHED_VERBOSE + 9)
+#define TRC_SCHED_SWITCH (TRC_SCHED_VERBOSE + 10)
+#define TRC_SCHED_S_TIMER_FN (TRC_SCHED_VERBOSE + 11)
+#define TRC_SCHED_T_TIMER_FN (TRC_SCHED_VERBOSE + 12)
+#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13)
+#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14)
+#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15)
#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
#define TRC_PV_PTWR_EMULATION (TRC_PV + 11)
#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12)
/* Indicates that addresses in trace record are 64 bits */
-#define TRC_PV_64_FLAG (0x100)
+#define TRC_64_FLAG (0x100)
#define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1)
#define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2)
#define TRC_SHADOW_EMULATE_UNSHADOW_USER (TRC_SHADOW + 9)
#define TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ (TRC_SHADOW + 10)
#define TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED (TRC_SHADOW + 11)
-#define TRC_SHADOW_EMULATE_UNSHADOW_HEURISTIC (TRC_SHADOW + 12)
+#define TRC_SHADOW_WRMAP_BF (TRC_SHADOW + 12)
#define TRC_SHADOW_PREALLOC_UNPIN (TRC_SHADOW + 13)
-#define TRC_SHADOW_RESYNC_FULL (TRC_SHADOW + 24)
-#define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 25)
+#define TRC_SHADOW_RESYNC_FULL (TRC_SHADOW + 14)
+#define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 15)
/* trace events per subclass */
#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
-#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + 0x03)
+#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
+#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01)
#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
+#define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02)
#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04)
#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05)
#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06)
#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07)
#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08)
+#define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08)
#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09)
+#define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09)
#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A)
#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B)
#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C)
#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12)
#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
+#define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14)
#define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15)
#define TRC_HVM_IO_ASSIST (TRC_HVM_HANDLER + 0x16)
#define TRC_HVM_MMIO_ASSIST (TRC_HVM_HANDLER + 0x17)
#define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18)
#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19)
-#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + 0x20)
-#define TRC_HVM_OP_DESTROY_PROC (TRC_HVM_HANDLER + 0x100)
-
+#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
/* This structure represents a single trace buffer record. */
struct t_rec {
* field, indexes into an array of struct t_rec's.
*/
struct t_buf {
+ /* Assume the data buffer size is X. X is generally not a power of 2.
+ * CONS and PROD are incremented modulo (2*X):
+ * 0 <= cons < 2*X
+ * 0 <= prod < 2*X
+ * This is done because addition modulo X breaks at 2^32 when X is not a
+ * power of 2:
+ * (((2^32 - 1) % X) + 1) % X != (2^32) % X
+ */
uint32_t cons; /* Offset of next item to be consumed by control tools. */
uint32_t prod; /* Offset of next item to be produced by Xen. */
/* Records follow immediately after the meta-data header. */