ia64/xen-unstable

view xen/common/trace.c @ 6316:f7dfaa2af90c

merge?
author cl349@firebug.cl.cam.ac.uk
date Sun Aug 21 11:02:00 2005 +0000 (2005-08-21)
parents 1872e09bfba3
children 6721abf6b16d
line source
1 /******************************************************************************
2 * common/trace.c
3 *
4 * Xen Trace Buffer
5 *
6 * Copyright (C) 2004 by Intel Research Cambridge
7 *
8 * Author: Mark Williamson, mark.a.williamson@intel.com
9 * Date: January 2004
10 *
11 * Copyright (C) 2005 Bin Ren
12 *
13 * The trace buffer code is designed to allow debugging traces of Xen to be
14 * generated on UP / SMP machines. Each trace entry is timestamped so that
15 * it's possible to reconstruct a chronological record of trace events.
16 *
17 * See also include/xen/trace.h and the dom0 op in
18 * include/public/dom0_ops.h
19 */
21 #include <xen/config.h>
22 #include <asm/types.h>
23 #include <asm/io.h>
24 #include <xen/lib.h>
25 #include <xen/sched.h>
26 #include <xen/smp.h>
27 #include <xen/trace.h>
28 #include <xen/errno.h>
29 #include <xen/init.h>
30 #include <asm/atomic.h>
31 #include <public/dom0_ops.h>
33 /* opt_tbuf_size: trace buffer size (in pages) */
34 static unsigned int opt_tbuf_size = 10;
35 integer_param("tbuf_size", opt_tbuf_size);
37 /* Pointers to the meta-data objects for all system trace buffers */
38 struct t_buf *t_bufs[NR_CPUS];
40 /* a flag recording whether initialisation has been done */
41 int tb_init_done = 0;
43 /* which CPUs tracing is enabled on */
44 unsigned long tb_cpu_mask = (~0UL);
46 /* which tracing events are enabled */
47 u32 tb_event_mask = TRC_ALL;
48 /**
49 * init_trace_bufs - performs initialisation of the per-cpu trace buffers.
50 *
51 * This function is called at start of day in order to initialise the per-cpu
52 * trace buffers. The trace buffers are then available for debugging use, via
53 * the %TRACE_xD macros exported in <xen/trace.h>.
54 */
55 void init_trace_bufs(void)
56 {
57 int i, order;
58 unsigned long nr_pages;
59 char *rawbuf;
60 struct t_buf *buf;
62 if ( opt_tbuf_size == 0 )
63 {
64 printk("Xen trace buffers: disabled\n");
65 return;
66 }
68 nr_pages = num_online_cpus() * opt_tbuf_size;
69 order = get_order(nr_pages * PAGE_SIZE);
71 if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
72 {
73 printk("Xen trace buffers: memory allocation failed\n");
74 return;
75 }
77 /* Share pages so that xentrace can map them. */
78 for ( i = 0; i < nr_pages; i++ )
79 SHARE_PFN_WITH_DOMAIN(virt_to_page(rawbuf + i * PAGE_SIZE), dom0);
81 for_each_online_cpu ( i )
82 {
83 buf = t_bufs[i] = (struct t_buf *)&rawbuf[i*opt_tbuf_size*PAGE_SIZE];
85 _atomic_set(buf->rec_idx, 0);
86 buf->rec_num = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf))
87 / sizeof(struct t_rec);
88 buf->rec = (struct t_rec *)(buf + 1);
89 buf->rec_addr = __pa(buf->rec);
90 }
92 printk("Xen trace buffers: initialised\n");
94 wmb(); /* above must be visible before tb_init_done flag set */
96 tb_init_done = 1;
97 }
99 /**
100 * tb_control - DOM0 operations on trace buffers.
101 * @tbc: a pointer to a dom0_tbufcontrol_t to be filled out
102 */
103 int tb_control(dom0_tbufcontrol_t *tbc)
104 {
105 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
106 int rc = 0;
108 if ( !tb_init_done )
109 return -EINVAL;
111 spin_lock(&lock);
113 switch ( tbc->op)
114 {
115 case DOM0_TBUF_GET_INFO:
116 tbc->cpu_mask = tb_cpu_mask;
117 tbc->evt_mask = tb_event_mask;
118 tbc->buffer_mfn = __pa(t_bufs[0]) >> PAGE_SHIFT;
119 tbc->size = opt_tbuf_size * PAGE_SIZE;
120 break;
121 case DOM0_TBUF_SET_CPU_MASK:
122 tb_cpu_mask = tbc->cpu_mask;
123 break;
124 case DOM0_TBUF_SET_EVT_MASK:
125 tb_event_mask = tbc->evt_mask;
126 break;
127 default:
128 rc = -EINVAL;
129 }
131 spin_unlock(&lock);
133 return rc;
134 }
136 /*
137 * Local variables:
138 * mode: C
139 * c-set-style: "BSD"
140 * c-basic-offset: 4
141 * tab-width: 4
142 * indent-tabs-mode: nil
143 * End:
144 */