ia64/xen-unstable

view xen/common/trace.c @ 11898:266fb767323c

[XENTRACE] Fix typo in set_cpu_mask op.
From: Xiaowei Yang <xiaowei.yang@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Oct 19 14:53:31 2006 +0100 (2006-10-19)
parents 86d26e6ec89b
children 64100a77fd17
line source
1 /******************************************************************************
2 * common/trace.c
3 *
4 * Xen Trace Buffer
5 *
6 * Copyright (C) 2004 by Intel Research Cambridge
7 *
8 * Authors: Mark Williamson, mark.a.williamson@intel.com
9 * Rob Gardner, rob.gardner@hp.com
10 * Date: October 2005
11 *
12 * Copyright (C) 2005 Bin Ren
13 *
14 * The trace buffer code is designed to allow debugging traces of Xen to be
15 * generated on UP / SMP machines. Each trace entry is timestamped so that
16 * it's possible to reconstruct a chronological record of trace events.
17 */
19 #include <xen/config.h>
20 #include <asm/types.h>
21 #include <asm/io.h>
22 #include <xen/lib.h>
23 #include <xen/sched.h>
24 #include <xen/smp.h>
25 #include <xen/trace.h>
26 #include <xen/errno.h>
27 #include <xen/event.h>
28 #include <xen/softirq.h>
29 #include <xen/init.h>
30 #include <xen/mm.h>
31 #include <xen/percpu.h>
32 #include <asm/atomic.h>
33 #include <public/sysctl.h>
35 /* opt_tbuf_size: trace buffer size (in pages) */
36 static unsigned int opt_tbuf_size = 0;
37 integer_param("tbuf_size", opt_tbuf_size);
39 /* Pointers to the meta-data objects for all system trace buffers */
40 static DEFINE_PER_CPU(struct t_buf *, t_bufs);
41 static DEFINE_PER_CPU(struct t_rec *, t_recs);
42 static int nr_recs;
44 /* High water mark for trace buffers; */
45 /* Send virtual interrupt when buffer level reaches this point */
46 static int t_buf_highwater;
48 /* Number of records lost due to per-CPU trace buffer being full. */
49 static DEFINE_PER_CPU(unsigned long, lost_records);
51 /* a flag recording whether initialization has been done */
52 /* or more properly, if the tbuf subsystem is enabled right now */
53 int tb_init_done;
55 /* which CPUs tracing is enabled on */
56 static cpumask_t tb_cpu_mask = CPU_MASK_ALL;
58 /* which tracing events are enabled */
59 static u32 tb_event_mask = TRC_ALL;
61 static void trace_notify_guest(void)
62 {
63 send_guest_global_virq(dom0, VIRQ_TBUF);
64 }
67 /**
68 * alloc_trace_bufs - performs initialization of the per-cpu trace buffers.
69 *
70 * This function is called at start of day in order to initialize the per-cpu
71 * trace buffers. The trace buffers are then available for debugging use, via
72 * the %TRACE_xD macros exported in <xen/trace.h>.
73 *
74 * This function may also be called later when enabling trace buffers
75 * via the SET_SIZE hypercall.
76 */
77 static int alloc_trace_bufs(void)
78 {
79 int i, order;
80 unsigned long nr_pages;
81 char *rawbuf;
82 struct t_buf *buf;
84 if ( opt_tbuf_size == 0 )
85 return -EINVAL;
87 nr_pages = num_online_cpus() * opt_tbuf_size;
88 order = get_order_from_pages(nr_pages);
89 nr_recs = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf)) /
90 sizeof(struct t_rec);
92 if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
93 {
94 printk("Xen trace buffers: memory allocation failed\n");
95 opt_tbuf_size = 0;
96 return -EINVAL;
97 }
99 /* Share pages so that xentrace can map them. */
100 for ( i = 0; i < nr_pages; i++ )
101 share_xen_page_with_privileged_guests(
102 virt_to_page(rawbuf) + i, XENSHARE_writable);
104 for_each_online_cpu ( i )
105 {
106 buf = per_cpu(t_bufs, i) = (struct t_buf *)
107 &rawbuf[i*opt_tbuf_size*PAGE_SIZE];
108 buf->cons = buf->prod = 0;
109 per_cpu(t_recs, i) = (struct t_rec *)(buf + 1);
110 }
112 t_buf_highwater = nr_recs >> 1; /* 50% high water */
113 open_softirq(TRACE_SOFTIRQ, trace_notify_guest);
115 return 0;
116 }
119 /**
120 * tb_set_size - handle the logic involved with dynamically
121 * allocating and deallocating tbufs
122 *
123 * This function is called when the SET_SIZE hypercall is done.
124 */
125 static int tb_set_size(int size)
126 {
127 /*
128 * Setting size is a one-shot operation. It can be done either at
129 * boot time or via control tools, but not by both. Once buffers
130 * are created they cannot be destroyed.
131 */
132 if ( (opt_tbuf_size != 0) || (size <= 0) )
133 {
134 DPRINTK("tb_set_size from %d to %d not implemented\n",
135 opt_tbuf_size, size);
136 return -EINVAL;
137 }
139 opt_tbuf_size = size;
140 if ( alloc_trace_bufs() != 0 )
141 return -EINVAL;
143 printk("Xen trace buffers: initialized\n");
144 return 0;
145 }
148 /**
149 * init_trace_bufs - performs initialization of the per-cpu trace buffers.
150 *
151 * This function is called at start of day in order to initialize the per-cpu
152 * trace buffers. The trace buffers are then available for debugging use, via
153 * the %TRACE_xD macros exported in <xen/trace.h>.
154 */
155 void init_trace_bufs(void)
156 {
157 if ( opt_tbuf_size == 0 )
158 {
159 printk("Xen trace buffers: disabled\n");
160 return;
161 }
163 if ( alloc_trace_bufs() == 0 )
164 {
165 printk("Xen trace buffers: initialised\n");
166 wmb(); /* above must be visible before tb_init_done flag set */
167 tb_init_done = 1;
168 }
169 }
171 /**
172 * tb_control - sysctl operations on trace buffers.
173 * @tbc: a pointer to a xen_sysctl_tbuf_op_t to be filled out
174 */
175 int tb_control(xen_sysctl_tbuf_op_t *tbc)
176 {
177 static DEFINE_SPINLOCK(lock);
178 int rc = 0;
180 spin_lock(&lock);
182 switch ( tbc->cmd )
183 {
184 case XEN_SYSCTL_TBUFOP_get_info:
185 tbc->evt_mask = tb_event_mask;
186 tbc->buffer_mfn = opt_tbuf_size ? virt_to_mfn(per_cpu(t_bufs, 0)) : 0;
187 tbc->size = opt_tbuf_size * PAGE_SIZE;
188 break;
189 case XEN_SYSCTL_TBUFOP_set_cpu_mask:
190 xenctl_cpumap_to_cpumask(&tb_cpu_mask, &tbc->cpu_mask);
191 break;
192 case XEN_SYSCTL_TBUFOP_set_evt_mask:
193 tb_event_mask = tbc->evt_mask;
194 break;
195 case XEN_SYSCTL_TBUFOP_set_size:
196 rc = !tb_init_done ? tb_set_size(tbc->size) : -EINVAL;
197 break;
198 case XEN_SYSCTL_TBUFOP_enable:
199 /* Enable trace buffers. Check buffers are already allocated. */
200 if ( opt_tbuf_size == 0 )
201 rc = -EINVAL;
202 else
203 tb_init_done = 1;
204 break;
205 case XEN_SYSCTL_TBUFOP_disable:
206 /*
207 * Disable trace buffers. Just stops new records from being written,
208 * does not deallocate any memory.
209 */
210 tb_init_done = 0;
211 break;
212 default:
213 rc = -EINVAL;
214 break;
215 }
217 spin_unlock(&lock);
219 return rc;
220 }
222 /**
223 * trace - Enters a trace tuple into the trace buffer for the current CPU.
224 * @event: the event type being logged
225 * @d1...d5: the data items for the event being logged
226 *
227 * Logs a trace record into the appropriate buffer. Returns nonzero on
228 * failure, otherwise 0. Failure occurs only if the trace buffers are not yet
229 * initialised.
230 */
231 void trace(u32 event, unsigned long d1, unsigned long d2,
232 unsigned long d3, unsigned long d4, unsigned long d5)
233 {
234 struct t_buf *buf;
235 struct t_rec *rec;
236 unsigned long flags;
238 BUG_ON(!tb_init_done);
240 if ( (tb_event_mask & event) == 0 )
241 return;
243 /* match class */
244 if ( ((tb_event_mask >> TRC_CLS_SHIFT) & (event >> TRC_CLS_SHIFT)) == 0 )
245 return;
247 /* then match subclass */
248 if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf )
249 & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
250 return;
252 if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) )
253 return;
255 /* Read tb_init_done /before/ t_bufs. */
256 rmb();
258 buf = this_cpu(t_bufs);
260 local_irq_save(flags);
262 /* Check if space for two records (we write two if there are lost recs). */
263 if ( (buf->prod - buf->cons) >= (nr_recs - 1) )
264 {
265 this_cpu(lost_records)++;
266 local_irq_restore(flags);
267 return;
268 }
270 if ( unlikely(this_cpu(lost_records) != 0) )
271 {
272 rec = &this_cpu(t_recs)[buf->prod % nr_recs];
273 memset(rec, 0, sizeof(*rec));
274 rec->cycles = (u64)get_cycles();
275 rec->event = TRC_LOST_RECORDS;
276 rec->data[0] = this_cpu(lost_records);
277 this_cpu(lost_records) = 0;
279 wmb();
280 buf->prod++;
281 }
283 rec = &this_cpu(t_recs)[buf->prod % nr_recs];
284 rec->cycles = (u64)get_cycles();
285 rec->event = event;
286 rec->data[0] = d1;
287 rec->data[1] = d2;
288 rec->data[2] = d3;
289 rec->data[3] = d4;
290 rec->data[4] = d5;
292 wmb();
293 buf->prod++;
295 local_irq_restore(flags);
297 /*
298 * Notify trace buffer consumer that we've reached the high water mark.
299 *
300 */
301 if ( (buf->prod - buf->cons) == t_buf_highwater )
302 raise_softirq(TRACE_SOFTIRQ);
303 }
305 /*
306 * Local variables:
307 * mode: C
308 * c-set-style: "BSD"
309 * c-basic-offset: 4
310 * tab-width: 4
311 * indent-tabs-mode: nil
312 * End:
313 */