ia64/xen-unstable

view xen/common/trace.c @ 8609:85d693e6f61a

Arch-specific per-vcpu info should be initialised to zero
when allocating a new vcpu structure, not copied from
CPU0's idle VCPU. Especially now that the idle VCPU itself
is dynamically allocated.

This should fix assertions people have been seeing in
getdomain_info_ctxt() relation to IOPL in eflags.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Jan 14 21:26:40 2006 +0100 (2006-01-14)
parents dca4893b0b9f
children a4dc14edd56b
line source
1 /******************************************************************************
2 * common/trace.c
3 *
4 * Xen Trace Buffer
5 *
6 * Copyright (C) 2004 by Intel Research Cambridge
7 *
8 * Authors: Mark Williamson, mark.a.williamson@intel.com
9 * Rob Gardner, rob.gardner@hp.com
10 * Date: October 2005
11 *
12 * Copyright (C) 2005 Bin Ren
13 *
14 * The trace buffer code is designed to allow debugging traces of Xen to be
15 * generated on UP / SMP machines. Each trace entry is timestamped so that
16 * it's possible to reconstruct a chronological record of trace events.
17 *
18 * See also include/xen/trace.h and the dom0 op in
19 * include/public/dom0_ops.h
20 */
22 #include <xen/config.h>
23 #include <asm/types.h>
24 #include <asm/io.h>
25 #include <xen/lib.h>
26 #include <xen/sched.h>
27 #include <xen/smp.h>
28 #include <xen/trace.h>
29 #include <xen/errno.h>
30 #include <xen/init.h>
31 #include <asm/atomic.h>
32 #include <public/dom0_ops.h>
34 /* opt_tbuf_size: trace buffer size (in pages) */
35 static unsigned int opt_tbuf_size = 0;
36 integer_param("tbuf_size", opt_tbuf_size);
38 /* Pointers to the meta-data objects for all system trace buffers */
39 static struct t_buf *t_bufs[NR_CPUS];
40 static struct t_rec *t_recs[NR_CPUS];
41 static int nr_recs;
43 /* a flag recording whether initialization has been done */
44 /* or more properly, if the tbuf subsystem is enabled right now */
45 int tb_init_done;
47 /* which CPUs tracing is enabled on */
48 static unsigned long tb_cpu_mask = (~0UL);
50 /* which tracing events are enabled */
51 static u32 tb_event_mask = TRC_ALL;
53 /**
54 * alloc_trace_bufs - performs initialization of the per-cpu trace buffers.
55 *
56 * This function is called at start of day in order to initialize the per-cpu
57 * trace buffers. The trace buffers are then available for debugging use, via
58 * the %TRACE_xD macros exported in <xen/trace.h>.
59 *
60 * This function may also be called later when enabling trace buffers
61 * via the SET_SIZE hypercall.
62 */
63 static int alloc_trace_bufs(void)
64 {
65 int i, order;
66 unsigned long nr_pages;
67 char *rawbuf;
68 struct t_buf *buf;
70 if ( opt_tbuf_size == 0 )
71 return -EINVAL;
73 nr_pages = num_online_cpus() * opt_tbuf_size;
74 order = get_order_from_pages(nr_pages);
75 nr_recs = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf)) /
76 sizeof(struct t_rec);
78 if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
79 {
80 printk("Xen trace buffers: memory allocation failed\n");
81 return -EINVAL;
82 }
84 /* Share pages so that xentrace can map them. */
85 for ( i = 0; i < nr_pages; i++ )
86 SHARE_PFN_WITH_DOMAIN(virt_to_page(rawbuf + i * PAGE_SIZE), dom0);
88 for_each_online_cpu ( i )
89 {
90 buf = t_bufs[i] = (struct t_buf *)&rawbuf[i*opt_tbuf_size*PAGE_SIZE];
91 buf->cons = buf->prod = 0;
92 t_recs[i] = (struct t_rec *)(buf + 1);
93 }
95 return 0;
96 }
99 /**
100 * tb_set_size - handle the logic involved with dynamically
101 * allocating and deallocating tbufs
102 *
103 * This function is called when the SET_SIZE hypercall is done.
104 */
105 static int tb_set_size(int size)
106 {
107 /*
108 * Setting size is a one-shot operation. It can be done either at
109 * boot time or via control tools, but not by both. Once buffers
110 * are created they cannot be destroyed.
111 */
112 if ( (opt_tbuf_size != 0) || (size <= 0) )
113 {
114 DPRINTK("tb_set_size from %d to %d not implemented\n",
115 opt_tbuf_size, size);
116 return -EINVAL;
117 }
119 opt_tbuf_size = size;
120 if ( alloc_trace_bufs() != 0 )
121 {
122 opt_tbuf_size = 0;
123 return -EINVAL;
124 }
126 printk("Xen trace buffers: initialized\n");
127 return 0;
128 }
131 /**
132 * init_trace_bufs - performs initialization of the per-cpu trace buffers.
133 *
134 * This function is called at start of day in order to initialize the per-cpu
135 * trace buffers. The trace buffers are then available for debugging use, via
136 * the %TRACE_xD macros exported in <xen/trace.h>.
137 */
138 void init_trace_bufs(void)
139 {
140 if ( opt_tbuf_size == 0 )
141 {
142 printk("Xen trace buffers: disabled\n");
143 return;
144 }
146 if ( alloc_trace_bufs() == 0 )
147 {
148 printk("Xen trace buffers: initialised\n");
149 wmb(); /* above must be visible before tb_init_done flag set */
150 tb_init_done = 1;
151 }
152 }
155 /**
156 * tb_control - DOM0 operations on trace buffers.
157 * @tbc: a pointer to a dom0_tbufcontrol_t to be filled out
158 */
159 int tb_control(dom0_tbufcontrol_t *tbc)
160 {
161 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
162 int rc = 0;
164 spin_lock(&lock);
166 if ( !tb_init_done &&
167 (tbc->op != DOM0_TBUF_SET_SIZE) &&
168 (tbc->op != DOM0_TBUF_ENABLE) )
169 {
170 spin_unlock(&lock);
171 return -EINVAL;
172 }
174 switch ( tbc->op )
175 {
176 case DOM0_TBUF_GET_INFO:
177 tbc->cpu_mask = tb_cpu_mask;
178 tbc->evt_mask = tb_event_mask;
179 tbc->buffer_mfn = __pa(t_bufs[0]) >> PAGE_SHIFT;
180 tbc->size = opt_tbuf_size * PAGE_SIZE;
181 break;
182 case DOM0_TBUF_SET_CPU_MASK:
183 tb_cpu_mask = tbc->cpu_mask;
184 break;
185 case DOM0_TBUF_SET_EVT_MASK:
186 tb_event_mask = tbc->evt_mask;
187 break;
188 case DOM0_TBUF_SET_SIZE:
189 rc = !tb_init_done ? tb_set_size(tbc->size) : -EINVAL;
190 break;
191 case DOM0_TBUF_ENABLE:
192 /* Enable trace buffers. Check buffers are already allocated. */
193 if ( opt_tbuf_size == 0 )
194 rc = -EINVAL;
195 else
196 tb_init_done = 1;
197 break;
198 case DOM0_TBUF_DISABLE:
199 /*
200 * Disable trace buffers. Just stops new records from being written,
201 * does not deallocate any memory.
202 */
203 tb_init_done = 0;
204 break;
205 default:
206 rc = -EINVAL;
207 break;
208 }
210 spin_unlock(&lock);
212 return rc;
213 }
215 /**
216 * trace - Enters a trace tuple into the trace buffer for the current CPU.
217 * @event: the event type being logged
218 * @d1...d5: the data items for the event being logged
219 *
220 * Logs a trace record into the appropriate buffer. Returns nonzero on
221 * failure, otherwise 0. Failure occurs only if the trace buffers are not yet
222 * initialised.
223 */
224 void trace(u32 event, unsigned long d1, unsigned long d2,
225 unsigned long d3, unsigned long d4, unsigned long d5)
226 {
227 struct t_buf *buf;
228 struct t_rec *rec;
229 unsigned long flags;
231 BUG_ON(!tb_init_done);
233 if ( (tb_event_mask & event) == 0 )
234 return;
236 /* match class */
237 if ( ((tb_event_mask >> TRC_CLS_SHIFT) & (event >> TRC_CLS_SHIFT)) == 0 )
238 return;
240 /* then match subclass */
241 if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf )
242 & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
243 return;
245 if ( (tb_cpu_mask & (1UL << smp_processor_id())) == 0 )
246 return;
248 /* Read tb_init_done /before/ t_bufs. */
249 rmb();
251 buf = t_bufs[smp_processor_id()];
253 local_irq_save(flags);
255 if ( (buf->prod - buf->cons) >= nr_recs )
256 {
257 local_irq_restore(flags);
258 return;
259 }
261 rec = &t_recs[smp_processor_id()][buf->prod % nr_recs];
262 rec->cycles = (u64)get_cycles();
263 rec->event = event;
264 rec->data[0] = d1;
265 rec->data[1] = d2;
266 rec->data[2] = d3;
267 rec->data[3] = d4;
268 rec->data[4] = d5;
270 wmb();
271 buf->prod++;
273 local_irq_restore(flags);
274 }
276 /*
277 * Local variables:
278 * mode: C
279 * c-set-style: "BSD"
280 * c-basic-offset: 4
281 * tab-width: 4
282 * indent-tabs-mode: nil
283 * End:
284 */