ia64/xen-unstable

view xen/common/xenoprof.c @ 12541:1d7d5d48fcdc

[XENOPROFILE] Make xenoprof arch-generic with dynamic mapping/unmapping xenoprof
buffer support and auto translated mode support.
renamed xenoprof_get_buffer::buf_maddr, xenoprof_passive::buf_maddr to
xenoprof_get_buffer::buf_gmaddr, xenoprof_passive::buf_gmaddr
to support auto translated mode. With auto translated mode enabled,
it is gmaddr, not maddr.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>

Simplify the share function.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Nov 22 10:09:28 2006 +0000 (2006-11-22)
parents da5c5fc8908f
children 5e70a5184746
line source
1 /*
2 * Copyright (C) 2005 Hewlett-Packard Co.
3 * written by Aravind Menon & Jose Renato Santos
4 * (email: xenoprof@groups.hp.com)
5 *
6 * arch generic xenoprof and IA64 support.
7 * dynamic map/unmap xenoprof buffer support.
8 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
9 * VA Linux Systems Japan K.K.
10 */
12 #include <xen/guest_access.h>
13 #include <xen/sched.h>
14 #include <public/xenoprof.h>
15 #include <asm/shadow.h>
17 /* Limit amount of pages used for shared buffer (per domain) */
18 #define MAX_OPROF_SHARED_PAGES 32
20 /* Lock protecting the following global state */
21 static DEFINE_SPINLOCK(xenoprof_lock);
23 struct domain *active_domains[MAX_OPROF_DOMAINS];
24 int active_ready[MAX_OPROF_DOMAINS];
25 unsigned int adomains;
27 struct domain *passive_domains[MAX_OPROF_DOMAINS];
28 unsigned int pdomains;
30 unsigned int activated;
31 struct domain *xenoprof_primary_profiler;
32 int xenoprof_state = XENOPROF_IDLE;
34 u64 total_samples;
35 u64 invalid_buffer_samples;
36 u64 corrupted_buffer_samples;
37 u64 lost_samples;
38 u64 active_samples;
39 u64 passive_samples;
40 u64 idle_samples;
41 u64 others_samples;
43 int is_active(struct domain *d)
44 {
45 struct xenoprof *x = d->xenoprof;
46 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
47 }
49 int is_passive(struct domain *d)
50 {
51 struct xenoprof *x = d->xenoprof;
52 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
53 }
55 int is_profiled(struct domain *d)
56 {
57 return (is_active(d) || is_passive(d));
58 }
60 static void xenoprof_reset_stat(void)
61 {
62 total_samples = 0;
63 invalid_buffer_samples = 0;
64 corrupted_buffer_samples = 0;
65 lost_samples = 0;
66 active_samples = 0;
67 passive_samples = 0;
68 idle_samples = 0;
69 others_samples = 0;
70 }
72 static void xenoprof_reset_buf(struct domain *d)
73 {
74 int j;
75 struct xenoprof_buf *buf;
77 if ( d->xenoprof == NULL )
78 {
79 printk("xenoprof_reset_buf: ERROR - Unexpected "
80 "Xenoprof NULL pointer \n");
81 return;
82 }
84 for ( j = 0; j < MAX_VIRT_CPUS; j++ )
85 {
86 buf = d->xenoprof->vcpu[j].buffer;
87 if ( buf != NULL )
88 {
89 buf->event_head = 0;
90 buf->event_tail = 0;
91 }
92 }
93 }
95 static void
96 share_xenoprof_page_with_guest(struct domain* d, unsigned long mfn, int npages)
97 {
98 int i;
100 for ( i = 0; i < npages; i++ )
101 share_xen_page_with_guest(mfn_to_page(mfn + i), d, XENSHARE_writable);
102 }
104 static void
105 unshare_xenoprof_page_with_guest(unsigned long mfn, int npages)
106 {
107 int i;
109 for ( i = 0; i < npages; i++ )
110 {
111 struct page_info *page = mfn_to_page(mfn + i);
112 BUG_ON(page_get_owner(page) != current->domain);
113 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
114 put_page(page);
115 }
116 }
118 static void
119 xenoprof_shared_gmfn_with_guest(
120 struct domain* d, unsigned long maddr, unsigned long gmaddr, int npages)
121 {
122 int i;
124 for ( i = 0; i < npages; i++, maddr += PAGE_SIZE, gmaddr += PAGE_SIZE )
125 {
126 BUG_ON(page_get_owner(maddr_to_page(maddr)) != d);
127 xenoprof_shared_gmfn(d, gmaddr, maddr);
128 }
129 }
131 static char *alloc_xenoprof_buf(struct domain *d, int npages, uint64_t gmaddr)
132 {
133 char *rawbuf;
134 int order;
136 /* allocate pages to store sample buffer shared with domain */
137 order = get_order_from_pages(npages);
138 rawbuf = alloc_xenheap_pages(order);
139 if ( rawbuf == NULL )
140 {
141 printk("alloc_xenoprof_buf(): memory allocation failed\n");
142 return 0;
143 }
145 return rawbuf;
146 }
148 static int alloc_xenoprof_struct(
149 struct domain *d, int max_samples, int is_passive, uint64_t gmaddr)
150 {
151 struct vcpu *v;
152 int nvcpu, npages, bufsize, max_bufsize;
153 unsigned max_max_samples;
154 int i;
156 d->xenoprof = xmalloc(struct xenoprof);
158 if ( d->xenoprof == NULL )
159 {
160 printk ("alloc_xenoprof_struct(): memory "
161 "allocation (xmalloc) failed\n");
162 return -ENOMEM;
163 }
165 memset(d->xenoprof, 0, sizeof(*d->xenoprof));
167 nvcpu = 0;
168 for_each_vcpu ( d, v )
169 nvcpu++;
171 /* reduce max_samples if necessary to limit pages allocated */
172 max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
173 max_max_samples = ( (max_bufsize - sizeof(struct xenoprof_buf)) /
174 sizeof(struct event_log) ) + 1;
175 if ( (unsigned)max_samples > max_max_samples )
176 max_samples = max_max_samples;
178 bufsize = sizeof(struct xenoprof_buf) +
179 (max_samples - 1) * sizeof(struct event_log);
180 npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
182 d->xenoprof->rawbuf = alloc_xenoprof_buf(is_passive ? dom0 : d, npages,
183 gmaddr);
185 if ( d->xenoprof->rawbuf == NULL )
186 {
187 xfree(d->xenoprof);
188 d->xenoprof = NULL;
189 return -ENOMEM;
190 }
192 d->xenoprof->npages = npages;
193 d->xenoprof->nbuf = nvcpu;
194 d->xenoprof->bufsize = bufsize;
195 d->xenoprof->domain_ready = 0;
196 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
198 /* Update buffer pointers for active vcpus */
199 i = 0;
200 for_each_vcpu ( d, v )
201 {
202 d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
203 d->xenoprof->vcpu[v->vcpu_id].buffer =
204 (struct xenoprof_buf *)&d->xenoprof->rawbuf[i * bufsize];
205 d->xenoprof->vcpu[v->vcpu_id].buffer->event_size = max_samples;
206 d->xenoprof->vcpu[v->vcpu_id].buffer->vcpu_id = v->vcpu_id;
208 i++;
209 /* in the unlikely case that the number of active vcpus changes */
210 if ( i >= nvcpu )
211 break;
212 }
214 return 0;
215 }
217 void free_xenoprof_pages(struct domain *d)
218 {
219 struct xenoprof *x;
220 int order;
222 x = d->xenoprof;
223 if ( x == NULL )
224 return;
226 if ( x->rawbuf != NULL )
227 {
228 order = get_order_from_pages(x->npages);
229 free_xenheap_pages(x->rawbuf, order);
230 }
232 xfree(x);
233 d->xenoprof = NULL;
234 }
236 static int active_index(struct domain *d)
237 {
238 int i;
240 for ( i = 0; i < adomains; i++ )
241 if ( active_domains[i] == d )
242 return i;
244 return -1;
245 }
247 static int set_active(struct domain *d)
248 {
249 int ind;
250 struct xenoprof *x;
252 ind = active_index(d);
253 if ( ind < 0 )
254 return -EPERM;
256 x = d->xenoprof;
257 if ( x == NULL )
258 return -EPERM;
260 x->domain_ready = 1;
261 x->domain_type = XENOPROF_DOMAIN_ACTIVE;
262 active_ready[ind] = 1;
263 activated++;
265 return 0;
266 }
268 static int reset_active(struct domain *d)
269 {
270 int ind;
271 struct xenoprof *x;
273 ind = active_index(d);
274 if ( ind < 0 )
275 return -EPERM;
277 x = d->xenoprof;
278 if ( x == NULL )
279 return -EPERM;
281 x->domain_ready = 0;
282 x->domain_type = XENOPROF_DOMAIN_IGNORED;
283 active_ready[ind] = 0;
284 active_domains[ind] = NULL;
285 activated--;
286 put_domain(d);
288 if ( activated <= 0 )
289 adomains = 0;
291 return 0;
292 }
294 static void reset_passive(struct domain *d)
295 {
296 struct xenoprof *x;
298 if ( d == 0 )
299 return;
301 x = d->xenoprof;
302 if ( x == NULL )
303 return;
305 unshare_xenoprof_page_with_guest(virt_to_mfn(x->rawbuf), x->npages);
306 x->domain_type = XENOPROF_DOMAIN_IGNORED;
307 }
309 static void reset_active_list(void)
310 {
311 int i;
313 for ( i = 0; i < adomains; i++ )
314 if ( active_ready[i] )
315 reset_active(active_domains[i]);
317 adomains = 0;
318 activated = 0;
319 }
321 static void reset_passive_list(void)
322 {
323 int i;
325 for ( i = 0; i < pdomains; i++ )
326 {
327 reset_passive(passive_domains[i]);
328 put_domain(passive_domains[i]);
329 passive_domains[i] = NULL;
330 }
332 pdomains = 0;
333 }
335 static int add_active_list(domid_t domid)
336 {
337 struct domain *d;
339 if ( adomains >= MAX_OPROF_DOMAINS )
340 return -E2BIG;
342 d = find_domain_by_id(domid);
343 if ( d == NULL )
344 return -EINVAL;
346 active_domains[adomains] = d;
347 active_ready[adomains] = 0;
348 adomains++;
350 return 0;
351 }
353 static int add_passive_list(XEN_GUEST_HANDLE(void) arg)
354 {
355 struct xenoprof_passive passive;
356 struct domain *d;
357 int ret = 0;
359 if ( pdomains >= MAX_OPROF_DOMAINS )
360 return -E2BIG;
362 if ( copy_from_guest(&passive, arg, 1) )
363 return -EFAULT;
365 d = find_domain_by_id(passive.domain_id);
366 if ( d == NULL )
367 return -EINVAL;
369 if ( d->xenoprof == NULL )
370 {
371 ret = alloc_xenoprof_struct(
372 d, passive.max_samples, 1, passive.buf_gmaddr);
373 if ( ret < 0 )
374 {
375 put_domain(d);
376 return -ENOMEM;
377 }
378 }
380 share_xenoprof_page_with_guest(
381 current->domain, virt_to_mfn(d->xenoprof->rawbuf),
382 d->xenoprof->npages);
384 d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
385 passive.nbuf = d->xenoprof->nbuf;
386 passive.bufsize = d->xenoprof->bufsize;
387 if ( !shadow_mode_translate(d) )
388 passive.buf_gmaddr = __pa(d->xenoprof->rawbuf);
389 else
390 xenoprof_shared_gmfn_with_guest(
391 current->domain, __pa(d->xenoprof->rawbuf),
392 passive.buf_gmaddr, d->xenoprof->npages);
394 if ( copy_to_guest(arg, &passive, 1) )
395 {
396 put_domain(d);
397 return -EFAULT;
398 }
400 passive_domains[pdomains] = d;
401 pdomains++;
403 return ret;
404 }
406 void xenoprof_log_event(
407 struct vcpu *vcpu, unsigned long eip, int mode, int event)
408 {
409 struct xenoprof_vcpu *v;
410 struct xenoprof_buf *buf;
411 int head;
412 int tail;
413 int size;
416 total_samples++;
418 /* ignore samples of un-monitored domains */
419 /* Count samples in idle separate from other unmonitored domains */
420 if ( !is_profiled(vcpu->domain) )
421 {
422 others_samples++;
423 return;
424 }
426 v = &vcpu->domain->xenoprof->vcpu[vcpu->vcpu_id];
428 /* Sanity check. Should never happen */
429 if ( v->buffer == NULL )
430 {
431 invalid_buffer_samples++;
432 return;
433 }
435 buf = vcpu->domain->xenoprof->vcpu[vcpu->vcpu_id].buffer;
437 head = buf->event_head;
438 tail = buf->event_tail;
439 size = v->event_size;
441 /* make sure indexes in shared buffer are sane */
442 if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
443 {
444 corrupted_buffer_samples++;
445 return;
446 }
448 if ( (head == tail - 1) || (head == size - 1 && tail == 0) )
449 {
450 buf->lost_samples++;
451 lost_samples++;
452 }
453 else
454 {
455 buf->event_log[head].eip = eip;
456 buf->event_log[head].mode = mode;
457 buf->event_log[head].event = event;
458 head++;
459 if ( head >= size )
460 head = 0;
461 buf->event_head = head;
462 if ( is_active(vcpu->domain) )
463 active_samples++;
464 else
465 passive_samples++;
466 if ( mode == 0 )
467 buf->user_samples++;
468 else if ( mode == 1 )
469 buf->kernel_samples++;
470 else
471 buf->xen_samples++;
472 }
473 }
475 static int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
476 {
477 struct xenoprof_init xenoprof_init;
478 int ret;
480 if ( copy_from_guest(&xenoprof_init, arg, 1) )
481 return -EFAULT;
483 if ( (ret = xenoprof_arch_init(&xenoprof_init.num_events,
484 &xenoprof_init.is_primary,
485 xenoprof_init.cpu_type)) )
486 return ret;
488 if ( copy_to_guest(arg, &xenoprof_init, 1) )
489 return -EFAULT;
491 if ( xenoprof_init.is_primary )
492 xenoprof_primary_profiler = current->domain;
494 return 0;
495 }
497 static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE(void) arg)
498 {
499 struct xenoprof_get_buffer xenoprof_get_buffer;
500 struct domain *d = current->domain;
501 int ret;
503 if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
504 return -EFAULT;
506 /*
507 * We allocate xenoprof struct and buffers only at first time
508 * get_buffer is called. Memory is then kept until domain is destroyed.
509 */
510 if ( d->xenoprof == NULL )
511 {
512 ret = alloc_xenoprof_struct(
513 d, xenoprof_get_buffer.max_samples, 0,
514 xenoprof_get_buffer.buf_gmaddr);
515 if ( ret < 0 )
516 return ret;
517 }
519 share_xenoprof_page_with_guest(
520 d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages);
522 xenoprof_reset_buf(d);
524 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
525 d->xenoprof->domain_ready = 0;
526 d->xenoprof->is_primary = (xenoprof_primary_profiler == current->domain);
528 xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
529 xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
530 if ( !shadow_mode_translate(d) )
531 xenoprof_get_buffer.buf_gmaddr = __pa(d->xenoprof->rawbuf);
532 else
533 xenoprof_shared_gmfn_with_guest(
534 d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr,
535 d->xenoprof->npages);
537 if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
538 return -EFAULT;
540 return 0;
541 }
543 #define NONPRIV_OP(op) ( (op == XENOPROF_init) \
544 || (op == XENOPROF_enable_virq) \
545 || (op == XENOPROF_disable_virq) \
546 || (op == XENOPROF_get_buffer))
548 int do_xenoprof_op(int op, XEN_GUEST_HANDLE(void) arg)
549 {
550 int ret = 0;
552 if ( (op < 0) || (op > XENOPROF_last_op) )
553 {
554 printk("xenoprof: invalid operation %d for domain %d\n",
555 op, current->domain->domain_id);
556 return -EINVAL;
557 }
559 if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
560 {
561 printk("xenoprof: dom %d denied privileged operation %d\n",
562 current->domain->domain_id, op);
563 return -EPERM;
564 }
566 spin_lock(&xenoprof_lock);
568 switch ( op )
569 {
570 case XENOPROF_init:
571 ret = xenoprof_op_init(arg);
572 break;
574 case XENOPROF_get_buffer:
575 ret = xenoprof_op_get_buffer(arg);
576 break;
578 case XENOPROF_reset_active_list:
579 {
580 reset_active_list();
581 ret = 0;
582 break;
583 }
584 case XENOPROF_reset_passive_list:
585 {
586 reset_passive_list();
587 ret = 0;
588 break;
589 }
590 case XENOPROF_set_active:
591 {
592 domid_t domid;
593 if ( xenoprof_state != XENOPROF_IDLE )
594 {
595 ret = -EPERM;
596 break;
597 }
598 if ( copy_from_guest(&domid, arg, 1) )
599 {
600 ret = -EFAULT;
601 break;
602 }
603 ret = add_active_list(domid);
604 break;
605 }
606 case XENOPROF_set_passive:
607 {
608 if ( xenoprof_state != XENOPROF_IDLE )
609 {
610 ret = -EPERM;
611 break;
612 }
613 ret = add_passive_list(arg);
614 break;
615 }
616 case XENOPROF_reserve_counters:
617 if ( xenoprof_state != XENOPROF_IDLE )
618 {
619 ret = -EPERM;
620 break;
621 }
622 ret = xenoprof_arch_reserve_counters();
623 if ( !ret )
624 xenoprof_state = XENOPROF_COUNTERS_RESERVED;
625 break;
627 case XENOPROF_counter:
628 if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
629 (adomains == 0) )
630 {
631 ret = -EPERM;
632 break;
633 }
635 ret = xenoprof_arch_counter(arg);
636 break;
638 case XENOPROF_setup_events:
639 if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
640 {
641 ret = -EPERM;
642 break;
643 }
644 ret = xenoprof_arch_setup_events();
645 if ( !ret )
646 xenoprof_state = XENOPROF_READY;
647 break;
649 case XENOPROF_enable_virq:
650 {
651 int i;
652 if ( current->domain == xenoprof_primary_profiler )
653 {
654 xenoprof_arch_enable_virq();
655 xenoprof_reset_stat();
656 for ( i = 0; i < pdomains; i++ )
657 xenoprof_reset_buf(passive_domains[i]);
658 }
659 xenoprof_reset_buf(current->domain);
660 ret = set_active(current->domain);
661 break;
662 }
664 case XENOPROF_start:
665 ret = -EPERM;
666 if ( (xenoprof_state == XENOPROF_READY) &&
667 (activated == adomains) )
668 ret = xenoprof_arch_start();
669 if ( ret == 0 )
670 xenoprof_state = XENOPROF_PROFILING;
671 break;
673 case XENOPROF_stop:
674 if ( xenoprof_state != XENOPROF_PROFILING ) {
675 ret = -EPERM;
676 break;
677 }
678 xenoprof_arch_stop();
679 xenoprof_state = XENOPROF_READY;
680 break;
682 case XENOPROF_disable_virq:
683 {
684 struct xenoprof *x;
685 if ( (xenoprof_state == XENOPROF_PROFILING) &&
686 (is_active(current->domain)) )
687 {
688 ret = -EPERM;
689 break;
690 }
691 if ( (ret = reset_active(current->domain)) != 0 )
692 break;
693 x = current->domain->xenoprof;
694 unshare_xenoprof_page_with_guest(virt_to_mfn(x->rawbuf), x->npages);
695 break;
696 }
698 case XENOPROF_release_counters:
699 ret = -EPERM;
700 if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
701 (xenoprof_state == XENOPROF_READY) )
702 {
703 xenoprof_state = XENOPROF_IDLE;
704 xenoprof_arch_release_counters();
705 xenoprof_arch_disable_virq();
706 reset_passive_list();
707 ret = 0;
708 }
709 break;
711 case XENOPROF_shutdown:
712 ret = -EPERM;
713 if ( xenoprof_state == XENOPROF_IDLE )
714 {
715 activated = 0;
716 adomains=0;
717 xenoprof_primary_profiler = NULL;
718 ret = 0;
719 }
720 break;
722 default:
723 ret = -ENOSYS;
724 }
726 spin_unlock(&xenoprof_lock);
728 if ( ret < 0 )
729 printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
730 op, current->domain->domain_id, ret);
732 return ret;
733 }
735 /*
736 * Local variables:
737 * mode: C
738 * c-set-style: "BSD"
739 * c-basic-offset: 4
740 * tab-width: 4
741 * indent-tabs-mode: nil
742 * End:
743 */