direct-io.hg

view xen/arch/x86/oprofile/xenoprof.c @ 11135:88e6bd5e2b54

Whitespace clean-ups.

Signed-off-by: Steven Hand <steven@xensource.com>
author shand@kneesaa.uk.xensource.com
date Wed Aug 16 11:36:13 2006 +0100 (2006-08-16)
parents 23591d2c46aa
children 1ece34466781
line source
1 /*
2 * Copyright (C) 2005 Hewlett-Packard Co.
3 * written by Aravind Menon & Jose Renato Santos
4 * (email: xenoprof@groups.hp.com)
5 */
7 #include <xen/guest_access.h>
8 #include <xen/sched.h>
9 #include <public/xenoprof.h>
11 #include "op_counter.h"
13 /* Limit amount of pages used for shared buffer (per domain) */
14 #define MAX_OPROF_SHARED_PAGES 32
16 struct domain *active_domains[MAX_OPROF_DOMAINS];
17 int active_ready[MAX_OPROF_DOMAINS];
18 unsigned int adomains;
20 struct domain *passive_domains[MAX_OPROF_DOMAINS];
21 unsigned int pdomains;
23 unsigned int activated;
24 struct domain *primary_profiler;
25 int xenoprof_state = XENOPROF_IDLE;
27 u64 total_samples;
28 u64 invalid_buffer_samples;
29 u64 corrupted_buffer_samples;
30 u64 lost_samples;
31 u64 active_samples;
32 u64 passive_samples;
33 u64 idle_samples;
34 u64 others_samples;
37 extern int nmi_init(int *num_events, int *is_primary, char *cpu_type);
38 extern int nmi_reserve_counters(void);
39 extern int nmi_setup_events(void);
40 extern int nmi_enable_virq(void);
41 extern int nmi_start(void);
42 extern void nmi_stop(void);
43 extern void nmi_disable_virq(void);
44 extern void nmi_release_counters(void);
46 int is_active(struct domain *d)
47 {
48 struct xenoprof *x = d->xenoprof;
49 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
50 }
52 int is_passive(struct domain *d)
53 {
54 struct xenoprof *x = d->xenoprof;
55 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
56 }
58 int is_profiled(struct domain *d)
59 {
60 return (is_active(d) || is_passive(d));
61 }
63 static void xenoprof_reset_stat(void)
64 {
65 total_samples = 0;
66 invalid_buffer_samples = 0;
67 corrupted_buffer_samples = 0;
68 lost_samples = 0;
69 active_samples = 0;
70 passive_samples = 0;
71 idle_samples = 0;
72 others_samples = 0;
73 }
75 static void xenoprof_reset_buf(struct domain *d)
76 {
77 int j;
78 struct xenoprof_buf *buf;
80 if ( d->xenoprof == NULL )
81 {
82 printk("xenoprof_reset_buf: ERROR - Unexpected "
83 "Xenoprof NULL pointer \n");
84 return;
85 }
87 for ( j = 0; j < MAX_VIRT_CPUS; j++ )
88 {
89 buf = d->xenoprof->vcpu[j].buffer;
90 if ( buf != NULL )
91 {
92 buf->event_head = 0;
93 buf->event_tail = 0;
94 }
95 }
96 }
98 char *alloc_xenoprof_buf(struct domain *d, int npages)
99 {
100 char *rawbuf;
101 int i, order;
103 /* allocate pages to store sample buffer shared with domain */
104 order = get_order_from_pages(npages);
105 rawbuf = alloc_xenheap_pages(order);
106 if ( rawbuf == NULL )
107 {
108 printk("alloc_xenoprof_buf(): memory allocation failed\n");
109 return 0;
110 }
112 /* Share pages so that kernel can map it */
113 for ( i = 0; i < npages; i++ )
114 share_xen_page_with_guest(
115 virt_to_page(rawbuf + i * PAGE_SIZE),
116 d, XENSHARE_writable);
118 return rawbuf;
119 }
121 int alloc_xenoprof_struct(struct domain *d, int max_samples, int is_passive)
122 {
123 struct vcpu *v;
124 int nvcpu, npages, bufsize, max_bufsize;
125 int i;
127 d->xenoprof = xmalloc(struct xenoprof);
129 if ( d->xenoprof == NULL )
130 {
131 printk ("alloc_xenoprof_struct(): memory "
132 "allocation (xmalloc) failed\n");
133 return -ENOMEM;
134 }
136 memset(d->xenoprof, 0, sizeof(*d->xenoprof));
138 nvcpu = 0;
139 for_each_vcpu ( d, v )
140 nvcpu++;
142 /* reduce buffer size if necessary to limit pages allocated */
143 bufsize = sizeof(struct xenoprof_buf) +
144 (max_samples - 1) * sizeof(struct event_log);
145 max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
146 if ( bufsize > max_bufsize )
147 {
148 bufsize = max_bufsize;
149 max_samples = ( (max_bufsize - sizeof(struct xenoprof_buf)) /
150 sizeof(struct event_log) ) + 1;
151 }
153 npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
155 d->xenoprof->rawbuf = alloc_xenoprof_buf(is_passive ? dom0 : d, npages);
157 if ( d->xenoprof->rawbuf == NULL )
158 {
159 xfree(d->xenoprof);
160 d->xenoprof = NULL;
161 return -ENOMEM;
162 }
164 d->xenoprof->npages = npages;
165 d->xenoprof->nbuf = nvcpu;
166 d->xenoprof->bufsize = bufsize;
167 d->xenoprof->domain_ready = 0;
168 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
170 /* Update buffer pointers for active vcpus */
171 i = 0;
172 for_each_vcpu ( d, v )
173 {
174 d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
175 d->xenoprof->vcpu[v->vcpu_id].buffer =
176 (struct xenoprof_buf *)&d->xenoprof->rawbuf[i * bufsize];
177 d->xenoprof->vcpu[v->vcpu_id].buffer->event_size = max_samples;
178 d->xenoprof->vcpu[v->vcpu_id].buffer->vcpu_id = v->vcpu_id;
180 i++;
181 /* in the unlikely case that the number of active vcpus changes */
182 if ( i >= nvcpu )
183 break;
184 }
186 return 0;
187 }
189 void free_xenoprof_pages(struct domain *d)
190 {
191 struct xenoprof *x;
192 int order;
194 x = d->xenoprof;
195 if ( x == NULL )
196 return;
198 if ( x->rawbuf != NULL )
199 {
200 order = get_order_from_pages(x->npages);
201 free_xenheap_pages(x->rawbuf, order);
202 }
204 xfree(x);
205 d->xenoprof = NULL;
206 }
208 int active_index(struct domain *d)
209 {
210 int i;
212 for ( i = 0; i < adomains; i++ )
213 if ( active_domains[i] == d )
214 return i;
216 return -1;
217 }
219 int set_active(struct domain *d)
220 {
221 int ind;
222 struct xenoprof *x;
224 ind = active_index(d);
225 if ( ind < 0 )
226 return -EPERM;
228 x = d->xenoprof;
229 if ( x == NULL )
230 return -EPERM;
232 x->domain_ready = 1;
233 x->domain_type = XENOPROF_DOMAIN_ACTIVE;
234 active_ready[ind] = 1;
235 activated++;
237 return 0;
238 }
240 int reset_active(struct domain *d)
241 {
242 int ind;
243 struct xenoprof *x;
245 ind = active_index(d);
246 if ( ind < 0 )
247 return -EPERM;
249 x = d->xenoprof;
250 if ( x == NULL )
251 return -EPERM;
253 x->domain_ready = 0;
254 x->domain_type = XENOPROF_DOMAIN_IGNORED;
255 active_ready[ind] = 0;
256 active_domains[ind] = NULL;
257 activated--;
258 put_domain(d);
260 if ( activated <= 0 )
261 adomains = 0;
263 return 0;
264 }
266 void reset_passive(struct domain *d)
267 {
268 struct xenoprof *x;
270 if (d==0)
271 return;
273 x = d->xenoprof;
274 if ( x == NULL )
275 return;
277 x->domain_type = XENOPROF_DOMAIN_IGNORED;
279 return;
280 }
282 void reset_active_list(void)
283 {
284 int i;
286 for ( i = 0; i < adomains; i++ )
287 {
288 if ( active_ready[i] )
289 {
290 reset_active(active_domains[i]);
291 }
292 }
294 adomains = 0;
295 activated = 0;
296 }
298 void reset_passive_list(void)
299 {
300 int i;
302 for ( i = 0; i < pdomains; i++ )
303 {
304 reset_passive(passive_domains[i]);
305 put_domain(passive_domains[i]);
306 passive_domains[i] = NULL;
307 }
309 pdomains = 0;
310 }
312 int add_active_list (domid_t domid)
313 {
314 struct domain *d;
316 if ( adomains >= MAX_OPROF_DOMAINS )
317 return -E2BIG;
319 d = find_domain_by_id(domid);
320 if ( d == NULL )
321 return -EINVAL;
323 active_domains[adomains] = d;
324 active_ready[adomains] = 0;
325 adomains++;
327 return 0;
328 }
330 int add_passive_list(XEN_GUEST_HANDLE(void) arg)
331 {
332 struct xenoprof_passive passive;
333 struct domain *d;
334 int ret = 0;
336 if ( pdomains >= MAX_OPROF_DOMAINS )
337 return -E2BIG;
339 if ( copy_from_guest(&passive, arg, 1) )
340 return -EFAULT;
342 d = find_domain_by_id(passive.domain_id);
343 if ( d == NULL )
344 return -EINVAL;
346 if ( (d->xenoprof == NULL) &&
347 ((ret = alloc_xenoprof_struct(d, passive.max_samples, 1)) < 0) ) {
348 put_domain(d);
349 return -ENOMEM;
350 }
352 d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
353 passive.nbuf = d->xenoprof->nbuf;
354 passive.bufsize = d->xenoprof->bufsize;
355 passive.buf_maddr = __pa(d->xenoprof->rawbuf);
357 if ( copy_to_guest(arg, &passive, 1) ) {
358 put_domain(d);
359 return -EFAULT;
360 }
362 passive_domains[pdomains] = d;
363 pdomains++;
365 return ret;
366 }
368 void xenoprof_log_event(
369 struct vcpu *vcpu, unsigned long eip, int mode, int event)
370 {
371 struct xenoprof_vcpu *v;
372 struct xenoprof_buf *buf;
373 int head;
374 int tail;
375 int size;
378 total_samples++;
380 /* ignore samples of un-monitored domains */
381 /* Count samples in idle separate from other unmonitored domains */
382 if ( !is_profiled(vcpu->domain) )
383 {
384 others_samples++;
385 return;
386 }
388 v = &vcpu->domain->xenoprof->vcpu[vcpu->vcpu_id];
390 /* Sanity check. Should never happen */
391 if ( v->buffer == NULL )
392 {
393 invalid_buffer_samples++;
394 return;
395 }
397 buf = vcpu->domain->xenoprof->vcpu[vcpu->vcpu_id].buffer;
399 head = buf->event_head;
400 tail = buf->event_tail;
401 size = v->event_size;
403 /* make sure indexes in shared buffer are sane */
404 if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
405 {
406 corrupted_buffer_samples++;
407 return;
408 }
410 if ( (head == tail - 1) || (head == size - 1 && tail == 0) )
411 {
412 buf->lost_samples++;
413 lost_samples++;
414 }
415 else
416 {
417 buf->event_log[head].eip = eip;
418 buf->event_log[head].mode = mode;
419 buf->event_log[head].event = event;
420 head++;
421 if ( head >= size )
422 head = 0;
423 buf->event_head = head;
424 if ( is_active(vcpu->domain) )
425 active_samples++;
426 else
427 passive_samples++;
428 if ( mode == 0 )
429 buf->user_samples++;
430 else if ( mode == 1 )
431 buf->kernel_samples++;
432 else
433 buf->xen_samples++;
434 }
435 }
437 int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
438 {
439 struct xenoprof_init xenoprof_init;
440 int is_primary, num_events;
441 struct domain *d = current->domain;
442 int ret;
444 if ( copy_from_guest(&xenoprof_init, arg, 1) )
445 return -EFAULT;
447 ret = nmi_init(&num_events,
448 &is_primary,
449 xenoprof_init.cpu_type);
450 if ( ret < 0 )
451 goto err;
453 if ( is_primary )
454 primary_profiler = current->domain;
456 /*
457 * We allocate xenoprof struct and buffers only at first time xenoprof_init
458 * is called. Memory is then kept until domain is destroyed.
459 */
460 if ( (d->xenoprof == NULL) &&
461 ((ret = alloc_xenoprof_struct(d, xenoprof_init.max_samples, 0)) < 0) )
462 goto err;
464 xenoprof_reset_buf(d);
466 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
467 d->xenoprof->domain_ready = 0;
468 d->xenoprof->is_primary = is_primary;
470 xenoprof_init.is_primary = is_primary;
471 xenoprof_init.num_events = num_events;
472 xenoprof_init.nbuf = d->xenoprof->nbuf;
473 xenoprof_init.bufsize = d->xenoprof->bufsize;
474 xenoprof_init.buf_maddr = __pa(d->xenoprof->rawbuf);
476 if ( copy_to_guest(arg, &xenoprof_init, 1) )
477 {
478 ret = -EFAULT;
479 goto err;
480 }
482 return ret;
484 err:
485 if ( primary_profiler == current->domain )
486 primary_profiler = NULL;
487 return ret;
488 }
490 #define PRIV_OP(op) ( (op == XENOPROF_set_active) \
491 || (op == XENOPROF_reserve_counters) \
492 || (op == XENOPROF_setup_events) \
493 || (op == XENOPROF_start) \
494 || (op == XENOPROF_stop) \
495 || (op == XENOPROF_release_counters) \
496 || (op == XENOPROF_shutdown))
498 int do_xenoprof_op(int op, XEN_GUEST_HANDLE(void) arg)
499 {
500 int ret = 0;
502 if ( PRIV_OP(op) && (current->domain != primary_profiler) )
503 {
504 printk("xenoprof: dom %d denied privileged operation %d\n",
505 current->domain->domain_id, op);
506 return -EPERM;
507 }
509 switch ( op )
510 {
511 case XENOPROF_init:
512 ret = xenoprof_op_init(arg);
513 break;
515 case XENOPROF_reset_active_list:
516 {
517 reset_active_list();
518 ret = 0;
519 break;
520 }
521 case XENOPROF_reset_passive_list:
522 {
523 reset_passive_list();
524 ret = 0;
525 break;
526 }
527 case XENOPROF_set_active:
528 {
529 domid_t domid;
530 if ( xenoprof_state != XENOPROF_IDLE )
531 return -EPERM;
532 if ( copy_from_guest(&domid, arg, 1) )
533 return -EFAULT;
534 ret = add_active_list(domid);
535 break;
536 }
537 case XENOPROF_set_passive:
538 {
539 if ( xenoprof_state != XENOPROF_IDLE )
540 return -EPERM;
541 ret = add_passive_list(arg);
542 break;
543 }
544 case XENOPROF_reserve_counters:
545 if ( xenoprof_state != XENOPROF_IDLE )
546 return -EPERM;
547 ret = nmi_reserve_counters();
548 if ( !ret )
549 xenoprof_state = XENOPROF_COUNTERS_RESERVED;
550 break;
552 case XENOPROF_counter:
553 {
554 struct xenoprof_counter counter;
555 if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
556 return -EPERM;
557 if ( adomains == 0 )
558 return -EPERM;
560 if ( copy_from_guest(&counter, arg, 1) )
561 return -EFAULT;
563 if ( counter.ind > OP_MAX_COUNTER )
564 return -E2BIG;
566 counter_config[counter.ind].count = (unsigned long) counter.count;
567 counter_config[counter.ind].enabled = (unsigned long) counter.enabled;
568 counter_config[counter.ind].event = (unsigned long) counter.event;
569 counter_config[counter.ind].kernel = (unsigned long) counter.kernel;
570 counter_config[counter.ind].user = (unsigned long) counter.user;
571 counter_config[counter.ind].unit_mask = (unsigned long) counter.unit_mask;
573 ret = 0;
574 break;
575 }
577 case XENOPROF_setup_events:
578 if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
579 return -EPERM;
580 ret = nmi_setup_events();
581 if ( !ret )
582 xenoprof_state = XENOPROF_READY;
583 break;
585 case XENOPROF_enable_virq:
586 {
587 int i;
588 if ( current->domain == primary_profiler )
589 {
590 nmi_enable_virq();
591 xenoprof_reset_stat();
592 for ( i = 0; i < pdomains; i++ ) {
593 xenoprof_reset_buf(passive_domains[i]);
594 }
595 }
596 xenoprof_reset_buf(current->domain);
597 ret = set_active(current->domain);
598 break;
599 }
601 case XENOPROF_start:
602 ret = -EPERM;
603 if ( (xenoprof_state == XENOPROF_READY) &&
604 (activated == adomains) )
605 ret = nmi_start();
607 if ( ret == 0 )
608 xenoprof_state = XENOPROF_PROFILING;
609 break;
611 case XENOPROF_stop:
612 if ( xenoprof_state != XENOPROF_PROFILING )
613 return -EPERM;
614 nmi_stop();
615 xenoprof_state = XENOPROF_READY;
616 break;
618 case XENOPROF_disable_virq:
619 if ( (xenoprof_state == XENOPROF_PROFILING) &&
620 (is_active(current->domain)) )
621 return -EPERM;
622 ret = reset_active(current->domain);
623 break;
625 case XENOPROF_release_counters:
626 ret = -EPERM;
627 if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
628 (xenoprof_state == XENOPROF_READY) )
629 {
630 xenoprof_state = XENOPROF_IDLE;
631 nmi_release_counters();
632 nmi_disable_virq();
633 reset_passive_list();
634 ret = 0;
635 }
636 break;
638 case XENOPROF_shutdown:
639 ret = -EPERM;
640 if ( xenoprof_state == XENOPROF_IDLE )
641 {
642 activated = 0;
643 adomains=0;
644 primary_profiler = NULL;
645 ret = 0;
646 }
647 break;
649 default:
650 ret = -EINVAL;
651 }
653 if ( ret < 0 )
654 printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
655 op, current->domain->domain_id, ret);
657 return ret;
658 }
660 /*
661 * Local variables:
662 * mode: C
663 * c-set-style: "BSD"
664 * c-basic-offset: 4
665 * tab-width: 4
666 * indent-tabs-mode: nil
667 * End:
668 */