ia64/xen-unstable

view xen/common/trace.c @ 15927:b7eb2bb9b625

IRQ injection changes for HVM PCI passthru.
Signed-off-by: Allen Kay <allen.m.kay@intel.com>
Signed-off-by: Guy Zana <guy@neocleus.com>
author kfraser@localhost.localdomain
date Tue Sep 18 16:09:19 2007 +0100 (2007-09-18)
parents 759d924af6d8
children 7ed576909132
line source
1 /******************************************************************************
2 * common/trace.c
3 *
4 * Xen Trace Buffer
5 *
6 * Copyright (C) 2004 by Intel Research Cambridge
7 *
8 * Authors: Mark Williamson, mark.a.williamson@intel.com
9 * Rob Gardner, rob.gardner@hp.com
10 * Date: October 2005
11 *
12 * Copyright (C) 2005 Bin Ren
13 *
14 * The trace buffer code is designed to allow debugging traces of Xen to be
15 * generated on UP / SMP machines. Each trace entry is timestamped so that
16 * it's possible to reconstruct a chronological record of trace events.
17 */
19 #include <xen/config.h>
20 #include <asm/types.h>
21 #include <asm/io.h>
22 #include <xen/lib.h>
23 #include <xen/sched.h>
24 #include <xen/smp.h>
25 #include <xen/trace.h>
26 #include <xen/errno.h>
27 #include <xen/event.h>
28 #include <xen/softirq.h>
29 #include <xen/init.h>
30 #include <xen/mm.h>
31 #include <xen/percpu.h>
32 #include <asm/atomic.h>
33 #include <public/sysctl.h>
35 #ifdef CONFIG_COMPAT
36 #include <compat/trace.h>
37 #define xen_t_buf t_buf
38 CHECK_t_buf;
39 #undef xen_t_buf
40 #define TB_COMPAT IS_COMPAT(dom0)
41 #else
42 #define compat_t_rec t_rec
43 #define TB_COMPAT 0
44 #endif
46 typedef union {
47 struct t_rec *nat;
48 struct compat_t_rec *cmp;
49 } t_rec_u;
51 /* opt_tbuf_size: trace buffer size (in pages) */
52 static unsigned int opt_tbuf_size = 0;
53 integer_param("tbuf_size", opt_tbuf_size);
55 /* Pointers to the meta-data objects for all system trace buffers */
56 static DEFINE_PER_CPU(struct t_buf *, t_bufs);
57 static DEFINE_PER_CPU(t_rec_u, t_recs);
58 static int nr_recs;
60 /* High water mark for trace buffers; */
61 /* Send virtual interrupt when buffer level reaches this point */
62 static int t_buf_highwater;
64 /* Number of records lost due to per-CPU trace buffer being full. */
65 static DEFINE_PER_CPU(unsigned long, lost_records);
67 /* a flag recording whether initialization has been done */
68 /* or more properly, if the tbuf subsystem is enabled right now */
69 int tb_init_done __read_mostly;
71 /* which CPUs tracing is enabled on */
72 static cpumask_t tb_cpu_mask = CPU_MASK_ALL;
74 /* which tracing events are enabled */
75 static u32 tb_event_mask = TRC_ALL;
77 static void trace_notify_guest(void)
78 {
79 send_guest_global_virq(dom0, VIRQ_TBUF);
80 }
83 /**
84 * alloc_trace_bufs - performs initialization of the per-cpu trace buffers.
85 *
86 * This function is called at start of day in order to initialize the per-cpu
87 * trace buffers. The trace buffers are then available for debugging use, via
88 * the %TRACE_xD macros exported in <xen/trace.h>.
89 *
90 * This function may also be called later when enabling trace buffers
91 * via the SET_SIZE hypercall.
92 */
93 static int alloc_trace_bufs(void)
94 {
95 int i, order;
96 unsigned long nr_pages;
97 char *rawbuf;
98 struct t_buf *buf;
100 if ( opt_tbuf_size == 0 )
101 return -EINVAL;
103 nr_pages = num_online_cpus() * opt_tbuf_size;
104 order = get_order_from_pages(nr_pages);
105 nr_recs = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf)) /
106 (!TB_COMPAT ? sizeof(struct t_rec) : sizeof(struct compat_t_rec));
108 if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
109 {
110 printk("Xen trace buffers: memory allocation failed\n");
111 opt_tbuf_size = 0;
112 return -EINVAL;
113 }
115 /* Share pages so that xentrace can map them. */
116 for ( i = 0; i < nr_pages; i++ )
117 share_xen_page_with_privileged_guests(
118 virt_to_page(rawbuf) + i, XENSHARE_writable);
120 for_each_online_cpu ( i )
121 {
122 buf = per_cpu(t_bufs, i) = (struct t_buf *)
123 &rawbuf[i*opt_tbuf_size*PAGE_SIZE];
124 buf->cons = buf->prod = 0;
125 per_cpu(t_recs, i).nat = (struct t_rec *)(buf + 1);
126 }
128 t_buf_highwater = nr_recs >> 1; /* 50% high water */
129 open_softirq(TRACE_SOFTIRQ, trace_notify_guest);
131 return 0;
132 }
135 /**
136 * tb_set_size - handle the logic involved with dynamically
137 * allocating and deallocating tbufs
138 *
139 * This function is called when the SET_SIZE hypercall is done.
140 */
141 static int tb_set_size(int size)
142 {
143 /*
144 * Setting size is a one-shot operation. It can be done either at
145 * boot time or via control tools, but not by both. Once buffers
146 * are created they cannot be destroyed.
147 */
148 if ( (opt_tbuf_size != 0) || (size <= 0) )
149 {
150 gdprintk(XENLOG_INFO, "tb_set_size from %d to %d not implemented\n",
151 opt_tbuf_size, size);
152 return -EINVAL;
153 }
155 opt_tbuf_size = size;
156 if ( alloc_trace_bufs() != 0 )
157 return -EINVAL;
159 printk("Xen trace buffers: initialized\n");
160 return 0;
161 }
164 /**
165 * init_trace_bufs - performs initialization of the per-cpu trace buffers.
166 *
167 * This function is called at start of day in order to initialize the per-cpu
168 * trace buffers. The trace buffers are then available for debugging use, via
169 * the %TRACE_xD macros exported in <xen/trace.h>.
170 */
171 void __init init_trace_bufs(void)
172 {
173 if ( opt_tbuf_size == 0 )
174 {
175 printk("Xen trace buffers: disabled\n");
176 return;
177 }
179 if ( alloc_trace_bufs() == 0 )
180 {
181 printk("Xen trace buffers: initialised\n");
182 wmb(); /* above must be visible before tb_init_done flag set */
183 tb_init_done = 1;
184 }
185 }
187 /**
188 * tb_control - sysctl operations on trace buffers.
189 * @tbc: a pointer to a xen_sysctl_tbuf_op_t to be filled out
190 */
191 int tb_control(xen_sysctl_tbuf_op_t *tbc)
192 {
193 static DEFINE_SPINLOCK(lock);
194 int rc = 0;
196 spin_lock(&lock);
198 switch ( tbc->cmd )
199 {
200 case XEN_SYSCTL_TBUFOP_get_info:
201 tbc->evt_mask = tb_event_mask;
202 tbc->buffer_mfn = opt_tbuf_size ? virt_to_mfn(per_cpu(t_bufs, 0)) : 0;
203 tbc->size = opt_tbuf_size * PAGE_SIZE;
204 break;
205 case XEN_SYSCTL_TBUFOP_set_cpu_mask:
206 xenctl_cpumap_to_cpumask(&tb_cpu_mask, &tbc->cpu_mask);
207 break;
208 case XEN_SYSCTL_TBUFOP_set_evt_mask:
209 tb_event_mask = tbc->evt_mask;
210 break;
211 case XEN_SYSCTL_TBUFOP_set_size:
212 rc = !tb_init_done ? tb_set_size(tbc->size) : -EINVAL;
213 break;
214 case XEN_SYSCTL_TBUFOP_enable:
215 /* Enable trace buffers. Check buffers are already allocated. */
216 if ( opt_tbuf_size == 0 )
217 rc = -EINVAL;
218 else
219 tb_init_done = 1;
220 break;
221 case XEN_SYSCTL_TBUFOP_disable:
222 /*
223 * Disable trace buffers. Just stops new records from being written,
224 * does not deallocate any memory.
225 */
226 tb_init_done = 0;
227 break;
228 default:
229 rc = -EINVAL;
230 break;
231 }
233 spin_unlock(&lock);
235 return rc;
236 }
238 /**
239 * trace - Enters a trace tuple into the trace buffer for the current CPU.
240 * @event: the event type being logged
241 * @d1...d5: the data items for the event being logged
242 *
243 * Logs a trace record into the appropriate buffer. Returns nonzero on
244 * failure, otherwise 0. Failure occurs only if the trace buffers are not yet
245 * initialised.
246 */
247 void trace(u32 event, unsigned long d1, unsigned long d2,
248 unsigned long d3, unsigned long d4, unsigned long d5)
249 {
250 struct t_buf *buf;
251 t_rec_u rec;
252 unsigned long flags;
254 BUG_ON(!tb_init_done);
256 if ( (tb_event_mask & event) == 0 )
257 return;
259 /* match class */
260 if ( ((tb_event_mask >> TRC_CLS_SHIFT) & (event >> TRC_CLS_SHIFT)) == 0 )
261 return;
263 /* then match subclass */
264 if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf )
265 & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
266 return;
268 if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) )
269 return;
271 /* Read tb_init_done /before/ t_bufs. */
272 rmb();
274 buf = this_cpu(t_bufs);
276 local_irq_save(flags);
278 /* Check if space for two records (we write two if there are lost recs). */
279 if ( (buf->prod - buf->cons) >= (nr_recs - 1) )
280 {
281 this_cpu(lost_records)++;
282 local_irq_restore(flags);
283 return;
284 }
286 if ( unlikely(this_cpu(lost_records) != 0) )
287 {
288 if ( !TB_COMPAT )
289 {
290 rec.nat = &this_cpu(t_recs).nat[buf->prod % nr_recs];
291 memset(rec.nat, 0, sizeof(*rec.nat));
292 rec.nat->cycles = (u64)get_cycles();
293 rec.nat->event = TRC_LOST_RECORDS;
294 rec.nat->data[0] = this_cpu(lost_records);
295 this_cpu(lost_records) = 0;
296 }
297 else
298 {
299 rec.cmp = &this_cpu(t_recs).cmp[buf->prod % nr_recs];
300 memset(rec.cmp, 0, sizeof(*rec.cmp));
301 rec.cmp->cycles = (u64)get_cycles();
302 rec.cmp->event = TRC_LOST_RECORDS;
303 rec.cmp->data[0] = this_cpu(lost_records);
304 this_cpu(lost_records) = 0;
305 }
307 wmb();
308 buf->prod++;
309 }
311 if ( !TB_COMPAT )
312 {
313 rec.nat = &this_cpu(t_recs).nat[buf->prod % nr_recs];
314 rec.nat->cycles = (u64)get_cycles();
315 rec.nat->event = event;
316 rec.nat->data[0] = d1;
317 rec.nat->data[1] = d2;
318 rec.nat->data[2] = d3;
319 rec.nat->data[3] = d4;
320 rec.nat->data[4] = d5;
321 }
322 else
323 {
324 rec.cmp = &this_cpu(t_recs).cmp[buf->prod % nr_recs];
325 rec.cmp->cycles = (u64)get_cycles();
326 rec.cmp->event = event;
327 rec.cmp->data[0] = d1;
328 rec.cmp->data[1] = d2;
329 rec.cmp->data[2] = d3;
330 rec.cmp->data[3] = d4;
331 rec.cmp->data[4] = d5;
332 }
334 wmb();
335 buf->prod++;
337 local_irq_restore(flags);
339 /*
340 * Notify trace buffer consumer that we've reached the high water mark.
341 *
342 */
343 if ( (buf->prod - buf->cons) == t_buf_highwater )
344 raise_softirq(TRACE_SOFTIRQ);
345 }
347 /*
348 * Local variables:
349 * mode: C
350 * c-set-style: "BSD"
351 * c-basic-offset: 4
352 * tab-width: 4
353 * indent-tabs-mode: nil
354 * End:
355 */