ia64/xen-unstable

view xen/common/trace.c @ 11128:f2f584093379

[POWERPC] Update .hgignore
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author kfraser@localhost.localdomain
date Tue Aug 15 10:38:59 2006 +0100 (2006-08-15)
parents aa25666d4643
children 03fd2accb4d9
line source
1 /******************************************************************************
2 * common/trace.c
3 *
4 * Xen Trace Buffer
5 *
6 * Copyright (C) 2004 by Intel Research Cambridge
7 *
8 * Authors: Mark Williamson, mark.a.williamson@intel.com
9 * Rob Gardner, rob.gardner@hp.com
10 * Date: October 2005
11 *
12 * Copyright (C) 2005 Bin Ren
13 *
14 * The trace buffer code is designed to allow debugging traces of Xen to be
15 * generated on UP / SMP machines. Each trace entry is timestamped so that
16 * it's possible to reconstruct a chronological record of trace events.
17 *
18 * See also include/xen/trace.h and the dom0 op in
19 * include/public/dom0_ops.h
20 */
22 #include <xen/config.h>
23 #include <asm/types.h>
24 #include <asm/io.h>
25 #include <xen/lib.h>
26 #include <xen/sched.h>
27 #include <xen/smp.h>
28 #include <xen/trace.h>
29 #include <xen/errno.h>
30 #include <xen/event.h>
31 #include <xen/softirq.h>
32 #include <xen/init.h>
33 #include <xen/percpu.h>
34 #include <asm/atomic.h>
35 #include <public/dom0_ops.h>
37 /* opt_tbuf_size: trace buffer size (in pages) */
38 static unsigned int opt_tbuf_size = 0;
39 integer_param("tbuf_size", opt_tbuf_size);
41 /* Pointers to the meta-data objects for all system trace buffers */
42 static DEFINE_PER_CPU(struct t_buf *, t_bufs);
43 static DEFINE_PER_CPU(struct t_rec *, t_recs);
44 static int nr_recs;
46 /* High water mark for trace buffers; */
47 /* Send virtual interrupt when buffer level reaches this point */
48 static int t_buf_highwater;
50 /* Number of records lost due to per-CPU trace buffer being full. */
51 static DEFINE_PER_CPU(unsigned long, lost_records);
53 /* a flag recording whether initialization has been done */
54 /* or more properly, if the tbuf subsystem is enabled right now */
55 int tb_init_done;
57 /* which CPUs tracing is enabled on */
58 static unsigned long tb_cpu_mask = (~0UL);
60 /* which tracing events are enabled */
61 static u32 tb_event_mask = TRC_ALL;
63 static void trace_notify_guest(void)
64 {
65 send_guest_global_virq(dom0, VIRQ_TBUF);
66 }
69 /**
70 * alloc_trace_bufs - performs initialization of the per-cpu trace buffers.
71 *
72 * This function is called at start of day in order to initialize the per-cpu
73 * trace buffers. The trace buffers are then available for debugging use, via
74 * the %TRACE_xD macros exported in <xen/trace.h>.
75 *
76 * This function may also be called later when enabling trace buffers
77 * via the SET_SIZE hypercall.
78 */
79 static int alloc_trace_bufs(void)
80 {
81 int i, order;
82 unsigned long nr_pages;
83 char *rawbuf;
84 struct t_buf *buf;
86 if ( opt_tbuf_size == 0 )
87 return -EINVAL;
89 nr_pages = num_online_cpus() * opt_tbuf_size;
90 order = get_order_from_pages(nr_pages);
91 nr_recs = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf)) /
92 sizeof(struct t_rec);
94 if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
95 {
96 printk("Xen trace buffers: memory allocation failed\n");
97 opt_tbuf_size = 0;
98 return -EINVAL;
99 }
101 /* Share pages so that xentrace can map them. */
102 for ( i = 0; i < nr_pages; i++ )
103 share_xen_page_with_privileged_guests(
104 virt_to_page(rawbuf) + i, XENSHARE_writable);
106 for_each_online_cpu ( i )
107 {
108 buf = per_cpu(t_bufs, i) = (struct t_buf *)
109 &rawbuf[i*opt_tbuf_size*PAGE_SIZE];
110 buf->cons = buf->prod = 0;
111 per_cpu(t_recs, i) = (struct t_rec *)(buf + 1);
112 }
114 t_buf_highwater = nr_recs >> 1; /* 50% high water */
115 open_softirq(TRACE_SOFTIRQ, trace_notify_guest);
117 return 0;
118 }
121 /**
122 * tb_set_size - handle the logic involved with dynamically
123 * allocating and deallocating tbufs
124 *
125 * This function is called when the SET_SIZE hypercall is done.
126 */
127 static int tb_set_size(int size)
128 {
129 /*
130 * Setting size is a one-shot operation. It can be done either at
131 * boot time or via control tools, but not by both. Once buffers
132 * are created they cannot be destroyed.
133 */
134 if ( (opt_tbuf_size != 0) || (size <= 0) )
135 {
136 DPRINTK("tb_set_size from %d to %d not implemented\n",
137 opt_tbuf_size, size);
138 return -EINVAL;
139 }
141 opt_tbuf_size = size;
142 if ( alloc_trace_bufs() != 0 )
143 return -EINVAL;
145 printk("Xen trace buffers: initialized\n");
146 return 0;
147 }
150 /**
151 * init_trace_bufs - performs initialization of the per-cpu trace buffers.
152 *
153 * This function is called at start of day in order to initialize the per-cpu
154 * trace buffers. The trace buffers are then available for debugging use, via
155 * the %TRACE_xD macros exported in <xen/trace.h>.
156 */
157 void init_trace_bufs(void)
158 {
159 if ( opt_tbuf_size == 0 )
160 {
161 printk("Xen trace buffers: disabled\n");
162 return;
163 }
165 if ( alloc_trace_bufs() == 0 )
166 {
167 printk("Xen trace buffers: initialised\n");
168 wmb(); /* above must be visible before tb_init_done flag set */
169 tb_init_done = 1;
170 }
171 }
174 /**
175 * tb_control - DOM0 operations on trace buffers.
176 * @tbc: a pointer to a dom0_tbufcontrol_t to be filled out
177 */
178 int tb_control(dom0_tbufcontrol_t *tbc)
179 {
180 static DEFINE_SPINLOCK(lock);
181 int rc = 0;
183 spin_lock(&lock);
185 switch ( tbc->op )
186 {
187 case DOM0_TBUF_GET_INFO:
188 tbc->cpu_mask = tb_cpu_mask;
189 tbc->evt_mask = tb_event_mask;
190 tbc->buffer_mfn = opt_tbuf_size ? virt_to_mfn(per_cpu(t_bufs, 0)) : 0;
191 tbc->size = opt_tbuf_size * PAGE_SIZE;
192 break;
193 case DOM0_TBUF_SET_CPU_MASK:
194 tb_cpu_mask = tbc->cpu_mask;
195 break;
196 case DOM0_TBUF_SET_EVT_MASK:
197 tb_event_mask = tbc->evt_mask;
198 break;
199 case DOM0_TBUF_SET_SIZE:
200 rc = !tb_init_done ? tb_set_size(tbc->size) : -EINVAL;
201 break;
202 case DOM0_TBUF_ENABLE:
203 /* Enable trace buffers. Check buffers are already allocated. */
204 if ( opt_tbuf_size == 0 )
205 rc = -EINVAL;
206 else
207 tb_init_done = 1;
208 break;
209 case DOM0_TBUF_DISABLE:
210 /*
211 * Disable trace buffers. Just stops new records from being written,
212 * does not deallocate any memory.
213 */
214 tb_init_done = 0;
215 break;
216 default:
217 rc = -EINVAL;
218 break;
219 }
221 spin_unlock(&lock);
223 return rc;
224 }
226 /**
227 * trace - Enters a trace tuple into the trace buffer for the current CPU.
228 * @event: the event type being logged
229 * @d1...d5: the data items for the event being logged
230 *
231 * Logs a trace record into the appropriate buffer. Returns nonzero on
232 * failure, otherwise 0. Failure occurs only if the trace buffers are not yet
233 * initialised.
234 */
235 void trace(u32 event, unsigned long d1, unsigned long d2,
236 unsigned long d3, unsigned long d4, unsigned long d5)
237 {
238 struct t_buf *buf;
239 struct t_rec *rec;
240 unsigned long flags;
242 BUG_ON(!tb_init_done);
244 if ( (tb_event_mask & event) == 0 )
245 return;
247 /* match class */
248 if ( ((tb_event_mask >> TRC_CLS_SHIFT) & (event >> TRC_CLS_SHIFT)) == 0 )
249 return;
251 /* then match subclass */
252 if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf )
253 & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
254 return;
256 if ( (tb_cpu_mask & (1UL << smp_processor_id())) == 0 )
257 return;
259 /* Read tb_init_done /before/ t_bufs. */
260 rmb();
262 buf = this_cpu(t_bufs);
264 local_irq_save(flags);
266 /* Check if space for two records (we write two if there are lost recs). */
267 if ( (buf->prod - buf->cons) >= (nr_recs - 1) )
268 {
269 this_cpu(lost_records)++;
270 local_irq_restore(flags);
271 return;
272 }
274 if ( unlikely(this_cpu(lost_records) != 0) )
275 {
276 rec = &this_cpu(t_recs)[buf->prod % nr_recs];
277 memset(rec, 0, sizeof(*rec));
278 rec->cycles = (u64)get_cycles();
279 rec->event = TRC_LOST_RECORDS;
280 rec->data[0] = this_cpu(lost_records);
281 this_cpu(lost_records) = 0;
283 wmb();
284 buf->prod++;
285 }
287 rec = &this_cpu(t_recs)[buf->prod % nr_recs];
288 rec->cycles = (u64)get_cycles();
289 rec->event = event;
290 rec->data[0] = d1;
291 rec->data[1] = d2;
292 rec->data[2] = d3;
293 rec->data[3] = d4;
294 rec->data[4] = d5;
296 wmb();
297 buf->prod++;
299 local_irq_restore(flags);
301 /*
302 * Notify trace buffer consumer that we've reached the high water mark.
303 *
304 */
305 if ( (buf->prod - buf->cons) == t_buf_highwater )
306 raise_softirq(TRACE_SOFTIRQ);
307 }
309 /*
310 * Local variables:
311 * mode: C
312 * c-set-style: "BSD"
313 * c-basic-offset: 4
314 * tab-width: 4
315 * indent-tabs-mode: nil
316 * End:
317 */