ia64/xen-unstable

view xen/common/trace.c @ 9706:3c05406f5e0a

In some cases, say for instance for some bizzare reason
the tree was checked out of CVS, which doens't neccessarily
store file permissions, mkbuildtree may not be executable.
So run them explicitly via bash.

Signed-Off-By: Horms <horms@verge.net.au>
author kaf24@firebug.cl.cam.ac.uk
date Thu Apr 13 11:24:00 2006 +0100 (2006-04-13)
parents 12621916d820
children 0f162a135140
line source
1 /******************************************************************************
2 * common/trace.c
3 *
4 * Xen Trace Buffer
5 *
6 * Copyright (C) 2004 by Intel Research Cambridge
7 *
8 * Authors: Mark Williamson, mark.a.williamson@intel.com
9 * Rob Gardner, rob.gardner@hp.com
10 * Date: October 2005
11 *
12 * Copyright (C) 2005 Bin Ren
13 *
14 * The trace buffer code is designed to allow debugging traces of Xen to be
15 * generated on UP / SMP machines. Each trace entry is timestamped so that
16 * it's possible to reconstruct a chronological record of trace events.
17 *
18 * See also include/xen/trace.h and the dom0 op in
19 * include/public/dom0_ops.h
20 */
22 #include <xen/config.h>
23 #include <asm/types.h>
24 #include <asm/io.h>
25 #include <xen/lib.h>
26 #include <xen/sched.h>
27 #include <xen/smp.h>
28 #include <xen/trace.h>
29 #include <xen/errno.h>
30 #include <xen/event.h>
31 #include <xen/softirq.h>
32 #include <xen/init.h>
33 #include <asm/atomic.h>
34 #include <public/dom0_ops.h>
36 /* opt_tbuf_size: trace buffer size (in pages) */
37 static unsigned int opt_tbuf_size = 0;
38 integer_param("tbuf_size", opt_tbuf_size);
40 /* Pointers to the meta-data objects for all system trace buffers */
41 static struct t_buf *t_bufs[NR_CPUS];
42 static struct t_rec *t_recs[NR_CPUS];
43 static int nr_recs;
45 /* High water mark for trace buffers; */
46 /* Send virtual interrupt when buffer level reaches this point */
47 static int t_buf_highwater;
50 /* a flag recording whether initialization has been done */
51 /* or more properly, if the tbuf subsystem is enabled right now */
52 int tb_init_done;
54 /* which CPUs tracing is enabled on */
55 static unsigned long tb_cpu_mask = (~0UL);
57 /* which tracing events are enabled */
58 static u32 tb_event_mask = TRC_ALL;
60 static void trace_notify_guest(void)
61 {
62 send_guest_global_virq(dom0, VIRQ_TBUF);
63 }
66 /**
67 * alloc_trace_bufs - performs initialization of the per-cpu trace buffers.
68 *
69 * This function is called at start of day in order to initialize the per-cpu
70 * trace buffers. The trace buffers are then available for debugging use, via
71 * the %TRACE_xD macros exported in <xen/trace.h>.
72 *
73 * This function may also be called later when enabling trace buffers
74 * via the SET_SIZE hypercall.
75 */
76 static int alloc_trace_bufs(void)
77 {
78 int i, order;
79 unsigned long nr_pages;
80 char *rawbuf;
81 struct t_buf *buf;
83 if ( opt_tbuf_size == 0 )
84 return -EINVAL;
86 nr_pages = num_online_cpus() * opt_tbuf_size;
87 order = get_order_from_pages(nr_pages);
88 nr_recs = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf)) /
89 sizeof(struct t_rec);
91 if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
92 {
93 printk("Xen trace buffers: memory allocation failed\n");
94 return -EINVAL;
95 }
97 /* Share pages so that xentrace can map them. */
98 for ( i = 0; i < nr_pages; i++ )
99 share_xen_page_with_privileged_guests(
100 virt_to_page(rawbuf) + i, XENSHARE_writable);
102 for_each_online_cpu ( i )
103 {
104 buf = t_bufs[i] = (struct t_buf *)&rawbuf[i*opt_tbuf_size*PAGE_SIZE];
105 buf->cons = buf->prod = 0;
106 t_recs[i] = (struct t_rec *)(buf + 1);
107 }
109 t_buf_highwater = nr_recs >> 1; /* 50% high water */
110 open_softirq(TRACE_SOFTIRQ, trace_notify_guest);
112 return 0;
113 }
116 /**
117 * tb_set_size - handle the logic involved with dynamically
118 * allocating and deallocating tbufs
119 *
120 * This function is called when the SET_SIZE hypercall is done.
121 */
122 static int tb_set_size(int size)
123 {
124 /*
125 * Setting size is a one-shot operation. It can be done either at
126 * boot time or via control tools, but not by both. Once buffers
127 * are created they cannot be destroyed.
128 */
129 if ( (opt_tbuf_size != 0) || (size <= 0) )
130 {
131 DPRINTK("tb_set_size from %d to %d not implemented\n",
132 opt_tbuf_size, size);
133 return -EINVAL;
134 }
136 opt_tbuf_size = size;
137 if ( alloc_trace_bufs() != 0 )
138 {
139 opt_tbuf_size = 0;
140 return -EINVAL;
141 }
143 printk("Xen trace buffers: initialized\n");
144 return 0;
145 }
148 /**
149 * init_trace_bufs - performs initialization of the per-cpu trace buffers.
150 *
151 * This function is called at start of day in order to initialize the per-cpu
152 * trace buffers. The trace buffers are then available for debugging use, via
153 * the %TRACE_xD macros exported in <xen/trace.h>.
154 */
155 void init_trace_bufs(void)
156 {
157 if ( opt_tbuf_size == 0 )
158 {
159 printk("Xen trace buffers: disabled\n");
160 return;
161 }
163 if ( alloc_trace_bufs() == 0 )
164 {
165 printk("Xen trace buffers: initialised\n");
166 wmb(); /* above must be visible before tb_init_done flag set */
167 tb_init_done = 1;
168 }
169 }
172 /**
173 * tb_control - DOM0 operations on trace buffers.
174 * @tbc: a pointer to a dom0_tbufcontrol_t to be filled out
175 */
176 int tb_control(dom0_tbufcontrol_t *tbc)
177 {
178 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
179 int rc = 0;
181 spin_lock(&lock);
183 if ( !tb_init_done &&
184 (tbc->op != DOM0_TBUF_SET_SIZE) &&
185 (tbc->op != DOM0_TBUF_ENABLE) )
186 {
187 spin_unlock(&lock);
188 return -EINVAL;
189 }
191 switch ( tbc->op )
192 {
193 case DOM0_TBUF_GET_INFO:
194 tbc->cpu_mask = tb_cpu_mask;
195 tbc->evt_mask = tb_event_mask;
196 tbc->buffer_mfn = __pa(t_bufs[0]) >> PAGE_SHIFT;
197 tbc->size = opt_tbuf_size * PAGE_SIZE;
198 break;
199 case DOM0_TBUF_SET_CPU_MASK:
200 tb_cpu_mask = tbc->cpu_mask;
201 break;
202 case DOM0_TBUF_SET_EVT_MASK:
203 tb_event_mask = tbc->evt_mask;
204 break;
205 case DOM0_TBUF_SET_SIZE:
206 rc = !tb_init_done ? tb_set_size(tbc->size) : -EINVAL;
207 break;
208 case DOM0_TBUF_ENABLE:
209 /* Enable trace buffers. Check buffers are already allocated. */
210 if ( opt_tbuf_size == 0 )
211 rc = -EINVAL;
212 else
213 tb_init_done = 1;
214 break;
215 case DOM0_TBUF_DISABLE:
216 /*
217 * Disable trace buffers. Just stops new records from being written,
218 * does not deallocate any memory.
219 */
220 tb_init_done = 0;
221 break;
222 default:
223 rc = -EINVAL;
224 break;
225 }
227 spin_unlock(&lock);
229 return rc;
230 }
232 /**
233 * trace - Enters a trace tuple into the trace buffer for the current CPU.
234 * @event: the event type being logged
235 * @d1...d5: the data items for the event being logged
236 *
237 * Logs a trace record into the appropriate buffer. Returns nonzero on
238 * failure, otherwise 0. Failure occurs only if the trace buffers are not yet
239 * initialised.
240 */
241 void trace(u32 event, unsigned long d1, unsigned long d2,
242 unsigned long d3, unsigned long d4, unsigned long d5)
243 {
244 struct t_buf *buf;
245 struct t_rec *rec;
246 unsigned long flags;
248 BUG_ON(!tb_init_done);
250 if ( (tb_event_mask & event) == 0 )
251 return;
253 /* match class */
254 if ( ((tb_event_mask >> TRC_CLS_SHIFT) & (event >> TRC_CLS_SHIFT)) == 0 )
255 return;
257 /* then match subclass */
258 if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf )
259 & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
260 return;
262 if ( (tb_cpu_mask & (1UL << smp_processor_id())) == 0 )
263 return;
265 /* Read tb_init_done /before/ t_bufs. */
266 rmb();
268 buf = t_bufs[smp_processor_id()];
270 local_irq_save(flags);
272 if ( (buf->prod - buf->cons) >= nr_recs )
273 {
274 local_irq_restore(flags);
275 return;
276 }
278 rec = &t_recs[smp_processor_id()][buf->prod % nr_recs];
279 rec->cycles = (u64)get_cycles();
280 rec->event = event;
281 rec->data[0] = d1;
282 rec->data[1] = d2;
283 rec->data[2] = d3;
284 rec->data[3] = d4;
285 rec->data[4] = d5;
287 wmb();
288 buf->prod++;
290 local_irq_restore(flags);
292 /*
293 * Notify trace buffer consumer that we've reached the high water mark.
294 *
295 */
296 if ( (buf->prod - buf->cons) == t_buf_highwater )
297 raise_softirq(TRACE_SOFTIRQ);
298 }
300 /*
301 * Local variables:
302 * mode: C
303 * c-set-style: "BSD"
304 * c-basic-offset: 4
305 * tab-width: 4
306 * indent-tabs-mode: nil
307 * End:
308 */