ia64/xen-unstable

view xen/common/xenoprof.c @ 13089:8348e3a1a31e

[XENOPROF] Fix buffer sharing with auto-translate guests.
Signed-off-by: Jose Renato Santos <jsantos@hpl.hp.com>
author kfraser@localhost.localdomain
date Tue Dec 19 11:00:05 2006 +0000 (2006-12-19)
parents b08e7ed94991
children 62f3df867d0e
line source
1 /*
2 * Copyright (C) 2005 Hewlett-Packard Co.
3 * written by Aravind Menon & Jose Renato Santos
4 * (email: xenoprof@groups.hp.com)
5 *
6 * arch generic xenoprof and IA64 support.
7 * dynamic map/unmap xenoprof buffer support.
8 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
9 * VA Linux Systems Japan K.K.
10 */
12 #include <xen/guest_access.h>
13 #include <xen/sched.h>
14 #include <public/xenoprof.h>
15 #include <asm/shadow.h>
17 /* Limit amount of pages used for shared buffer (per domain) */
18 #define MAX_OPROF_SHARED_PAGES 32
20 /* Lock protecting the following global state */
21 static DEFINE_SPINLOCK(xenoprof_lock);
23 struct domain *active_domains[MAX_OPROF_DOMAINS];
24 int active_ready[MAX_OPROF_DOMAINS];
25 unsigned int adomains;
27 struct domain *passive_domains[MAX_OPROF_DOMAINS];
28 unsigned int pdomains;
30 unsigned int activated;
31 struct domain *xenoprof_primary_profiler;
32 int xenoprof_state = XENOPROF_IDLE;
34 u64 total_samples;
35 u64 invalid_buffer_samples;
36 u64 corrupted_buffer_samples;
37 u64 lost_samples;
38 u64 active_samples;
39 u64 passive_samples;
40 u64 idle_samples;
41 u64 others_samples;
43 int is_active(struct domain *d)
44 {
45 struct xenoprof *x = d->xenoprof;
46 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
47 }
49 int is_passive(struct domain *d)
50 {
51 struct xenoprof *x = d->xenoprof;
52 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
53 }
55 int is_profiled(struct domain *d)
56 {
57 return (is_active(d) || is_passive(d));
58 }
60 static void xenoprof_reset_stat(void)
61 {
62 total_samples = 0;
63 invalid_buffer_samples = 0;
64 corrupted_buffer_samples = 0;
65 lost_samples = 0;
66 active_samples = 0;
67 passive_samples = 0;
68 idle_samples = 0;
69 others_samples = 0;
70 }
72 static void xenoprof_reset_buf(struct domain *d)
73 {
74 int j;
75 struct xenoprof_buf *buf;
77 if ( d->xenoprof == NULL )
78 {
79 printk("xenoprof_reset_buf: ERROR - Unexpected "
80 "Xenoprof NULL pointer \n");
81 return;
82 }
84 for ( j = 0; j < MAX_VIRT_CPUS; j++ )
85 {
86 buf = d->xenoprof->vcpu[j].buffer;
87 if ( buf != NULL )
88 {
89 buf->event_head = 0;
90 buf->event_tail = 0;
91 }
92 }
93 }
95 static int
96 share_xenoprof_page_with_guest(struct domain *d, unsigned long mfn, int npages)
97 {
98 int i;
100 /* Check if previous page owner has released the page. */
101 for ( i = 0; i < npages; i++ )
102 {
103 struct page_info *page = mfn_to_page(mfn + i);
104 if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
105 {
106 gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%x\n",
107 mfn + i, page->count_info);
108 return -EBUSY;
109 }
110 page_set_owner(page, NULL);
111 }
113 for ( i = 0; i < npages; i++ )
114 share_xen_page_with_guest(mfn_to_page(mfn + i), d, XENSHARE_writable);
116 return 0;
117 }
119 static void
120 unshare_xenoprof_page_with_guest(struct xenoprof *x)
121 {
122 int i, npages = x->npages;
123 unsigned long mfn = virt_to_mfn(x->rawbuf);
125 for ( i = 0; i < npages; i++ )
126 {
127 struct page_info *page = mfn_to_page(mfn + i);
128 BUG_ON(page_get_owner(page) != current->domain);
129 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
130 put_page(page);
131 }
132 }
134 static void
135 xenoprof_shared_gmfn_with_guest(
136 struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages)
137 {
138 int i;
140 for ( i = 0; i < npages; i++, maddr += PAGE_SIZE, gmaddr += PAGE_SIZE )
141 {
142 BUG_ON(page_get_owner(maddr_to_page(maddr)) != d);
143 xenoprof_shared_gmfn(d, gmaddr, maddr);
144 }
145 }
147 static int alloc_xenoprof_struct(
148 struct domain *d, int max_samples, int is_passive)
149 {
150 struct vcpu *v;
151 int nvcpu, npages, bufsize, max_bufsize;
152 unsigned max_max_samples;
153 int i;
155 d->xenoprof = xmalloc(struct xenoprof);
157 if ( d->xenoprof == NULL )
158 {
159 printk("alloc_xenoprof_struct(): memory allocation failed\n");
160 return -ENOMEM;
161 }
163 memset(d->xenoprof, 0, sizeof(*d->xenoprof));
165 nvcpu = 0;
166 for_each_vcpu ( d, v )
167 nvcpu++;
169 /* reduce max_samples if necessary to limit pages allocated */
170 max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
171 max_max_samples = ( (max_bufsize - sizeof(struct xenoprof_buf)) /
172 sizeof(struct event_log) ) + 1;
173 if ( (unsigned)max_samples > max_max_samples )
174 max_samples = max_max_samples;
176 bufsize = sizeof(struct xenoprof_buf) +
177 (max_samples - 1) * sizeof(struct event_log);
178 npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
180 d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages));
181 if ( d->xenoprof->rawbuf == NULL )
182 {
183 xfree(d->xenoprof);
184 d->xenoprof = NULL;
185 return -ENOMEM;
186 }
188 d->xenoprof->npages = npages;
189 d->xenoprof->nbuf = nvcpu;
190 d->xenoprof->bufsize = bufsize;
191 d->xenoprof->domain_ready = 0;
192 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
194 /* Update buffer pointers for active vcpus */
195 i = 0;
196 for_each_vcpu ( d, v )
197 {
198 d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
199 d->xenoprof->vcpu[v->vcpu_id].buffer =
200 (struct xenoprof_buf *)&d->xenoprof->rawbuf[i * bufsize];
201 d->xenoprof->vcpu[v->vcpu_id].buffer->event_size = max_samples;
202 d->xenoprof->vcpu[v->vcpu_id].buffer->vcpu_id = v->vcpu_id;
204 i++;
205 /* in the unlikely case that the number of active vcpus changes */
206 if ( i >= nvcpu )
207 break;
208 }
210 return 0;
211 }
213 void free_xenoprof_pages(struct domain *d)
214 {
215 struct xenoprof *x;
216 int order;
218 x = d->xenoprof;
219 if ( x == NULL )
220 return;
222 if ( x->rawbuf != NULL )
223 {
224 order = get_order_from_pages(x->npages);
225 free_xenheap_pages(x->rawbuf, order);
226 }
228 xfree(x);
229 d->xenoprof = NULL;
230 }
232 static int active_index(struct domain *d)
233 {
234 int i;
236 for ( i = 0; i < adomains; i++ )
237 if ( active_domains[i] == d )
238 return i;
240 return -1;
241 }
243 static int set_active(struct domain *d)
244 {
245 int ind;
246 struct xenoprof *x;
248 ind = active_index(d);
249 if ( ind < 0 )
250 return -EPERM;
252 x = d->xenoprof;
253 if ( x == NULL )
254 return -EPERM;
256 x->domain_ready = 1;
257 x->domain_type = XENOPROF_DOMAIN_ACTIVE;
258 active_ready[ind] = 1;
259 activated++;
261 return 0;
262 }
264 static int reset_active(struct domain *d)
265 {
266 int ind;
267 struct xenoprof *x;
269 ind = active_index(d);
270 if ( ind < 0 )
271 return -EPERM;
273 x = d->xenoprof;
274 if ( x == NULL )
275 return -EPERM;
277 x->domain_ready = 0;
278 x->domain_type = XENOPROF_DOMAIN_IGNORED;
279 active_ready[ind] = 0;
280 active_domains[ind] = NULL;
281 activated--;
282 put_domain(d);
284 if ( activated <= 0 )
285 adomains = 0;
287 return 0;
288 }
290 static void reset_passive(struct domain *d)
291 {
292 struct xenoprof *x;
294 if ( d == NULL )
295 return;
297 x = d->xenoprof;
298 if ( x == NULL )
299 return;
301 unshare_xenoprof_page_with_guest(x);
302 x->domain_type = XENOPROF_DOMAIN_IGNORED;
303 }
305 static void reset_active_list(void)
306 {
307 int i;
309 for ( i = 0; i < adomains; i++ )
310 if ( active_ready[i] )
311 reset_active(active_domains[i]);
313 adomains = 0;
314 activated = 0;
315 }
317 static void reset_passive_list(void)
318 {
319 int i;
321 for ( i = 0; i < pdomains; i++ )
322 {
323 reset_passive(passive_domains[i]);
324 put_domain(passive_domains[i]);
325 passive_domains[i] = NULL;
326 }
328 pdomains = 0;
329 }
331 static int add_active_list(domid_t domid)
332 {
333 struct domain *d;
335 if ( adomains >= MAX_OPROF_DOMAINS )
336 return -E2BIG;
338 d = find_domain_by_id(domid);
339 if ( d == NULL )
340 return -EINVAL;
342 active_domains[adomains] = d;
343 active_ready[adomains] = 0;
344 adomains++;
346 return 0;
347 }
349 static int add_passive_list(XEN_GUEST_HANDLE(void) arg)
350 {
351 struct xenoprof_passive passive;
352 struct domain *d;
353 int ret = 0;
355 if ( pdomains >= MAX_OPROF_DOMAINS )
356 return -E2BIG;
358 if ( copy_from_guest(&passive, arg, 1) )
359 return -EFAULT;
361 d = find_domain_by_id(passive.domain_id);
362 if ( d == NULL )
363 return -EINVAL;
365 if ( d->xenoprof == NULL )
366 {
367 ret = alloc_xenoprof_struct(d, passive.max_samples, 1);
368 if ( ret < 0 )
369 {
370 put_domain(d);
371 return -ENOMEM;
372 }
373 }
375 ret = share_xenoprof_page_with_guest(
376 current->domain, virt_to_mfn(d->xenoprof->rawbuf),
377 d->xenoprof->npages);
378 if ( ret < 0 )
379 {
380 put_domain(d);
381 return ret;
382 }
384 d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
385 passive.nbuf = d->xenoprof->nbuf;
386 passive.bufsize = d->xenoprof->bufsize;
387 if ( !shadow_mode_translate(current->domain) )
388 passive.buf_gmaddr = __pa(d->xenoprof->rawbuf);
389 else
390 xenoprof_shared_gmfn_with_guest(
391 current->domain, __pa(d->xenoprof->rawbuf),
392 passive.buf_gmaddr, d->xenoprof->npages);
394 if ( copy_to_guest(arg, &passive, 1) )
395 {
396 put_domain(d);
397 return -EFAULT;
398 }
400 passive_domains[pdomains] = d;
401 pdomains++;
403 return ret;
404 }
406 void xenoprof_log_event(
407 struct vcpu *vcpu, unsigned long eip, int mode, int event)
408 {
409 struct xenoprof_vcpu *v;
410 struct xenoprof_buf *buf;
411 int head;
412 int tail;
413 int size;
416 total_samples++;
418 /* ignore samples of un-monitored domains */
419 /* Count samples in idle separate from other unmonitored domains */
420 if ( !is_profiled(vcpu->domain) )
421 {
422 others_samples++;
423 return;
424 }
426 v = &vcpu->domain->xenoprof->vcpu[vcpu->vcpu_id];
428 /* Sanity check. Should never happen */
429 if ( v->buffer == NULL )
430 {
431 invalid_buffer_samples++;
432 return;
433 }
435 buf = vcpu->domain->xenoprof->vcpu[vcpu->vcpu_id].buffer;
437 head = buf->event_head;
438 tail = buf->event_tail;
439 size = v->event_size;
441 /* make sure indexes in shared buffer are sane */
442 if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
443 {
444 corrupted_buffer_samples++;
445 return;
446 }
448 if ( (head == tail - 1) || (head == size - 1 && tail == 0) )
449 {
450 buf->lost_samples++;
451 lost_samples++;
452 }
453 else
454 {
455 buf->event_log[head].eip = eip;
456 buf->event_log[head].mode = mode;
457 buf->event_log[head].event = event;
458 head++;
459 if ( head >= size )
460 head = 0;
461 buf->event_head = head;
462 if ( is_active(vcpu->domain) )
463 active_samples++;
464 else
465 passive_samples++;
466 if ( mode == 0 )
467 buf->user_samples++;
468 else if ( mode == 1 )
469 buf->kernel_samples++;
470 else
471 buf->xen_samples++;
472 }
473 }
475 static int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
476 {
477 struct xenoprof_init xenoprof_init;
478 int ret;
480 if ( copy_from_guest(&xenoprof_init, arg, 1) )
481 return -EFAULT;
483 if ( (ret = xenoprof_arch_init(&xenoprof_init.num_events,
484 &xenoprof_init.is_primary,
485 xenoprof_init.cpu_type)) )
486 return ret;
488 if ( copy_to_guest(arg, &xenoprof_init, 1) )
489 return -EFAULT;
491 if ( xenoprof_init.is_primary )
492 xenoprof_primary_profiler = current->domain;
494 return 0;
495 }
497 static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE(void) arg)
498 {
499 struct xenoprof_get_buffer xenoprof_get_buffer;
500 struct domain *d = current->domain;
501 int ret;
503 if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
504 return -EFAULT;
506 /*
507 * We allocate xenoprof struct and buffers only at first time
508 * get_buffer is called. Memory is then kept until domain is destroyed.
509 */
510 if ( d->xenoprof == NULL )
511 {
512 ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0);
513 if ( ret < 0 )
514 return ret;
515 }
517 ret = share_xenoprof_page_with_guest(
518 d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages);
519 if ( ret < 0 )
520 return ret;
522 xenoprof_reset_buf(d);
524 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
525 d->xenoprof->domain_ready = 0;
526 d->xenoprof->is_primary = (xenoprof_primary_profiler == current->domain);
528 xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
529 xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
530 if ( !shadow_mode_translate(d) )
531 xenoprof_get_buffer.buf_gmaddr = __pa(d->xenoprof->rawbuf);
532 else
533 xenoprof_shared_gmfn_with_guest(
534 d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr,
535 d->xenoprof->npages);
537 if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
538 return -EFAULT;
540 return 0;
541 }
543 #define NONPRIV_OP(op) ( (op == XENOPROF_init) \
544 || (op == XENOPROF_enable_virq) \
545 || (op == XENOPROF_disable_virq) \
546 || (op == XENOPROF_get_buffer))
548 int do_xenoprof_op(int op, XEN_GUEST_HANDLE(void) arg)
549 {
550 int ret = 0;
552 if ( (op < 0) || (op > XENOPROF_last_op) )
553 {
554 printk("xenoprof: invalid operation %d for domain %d\n",
555 op, current->domain->domain_id);
556 return -EINVAL;
557 }
559 if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
560 {
561 printk("xenoprof: dom %d denied privileged operation %d\n",
562 current->domain->domain_id, op);
563 return -EPERM;
564 }
566 spin_lock(&xenoprof_lock);
568 switch ( op )
569 {
570 case XENOPROF_init:
571 ret = xenoprof_op_init(arg);
572 break;
574 case XENOPROF_get_buffer:
575 ret = xenoprof_op_get_buffer(arg);
576 break;
578 case XENOPROF_reset_active_list:
579 {
580 reset_active_list();
581 ret = 0;
582 break;
583 }
584 case XENOPROF_reset_passive_list:
585 {
586 reset_passive_list();
587 ret = 0;
588 break;
589 }
590 case XENOPROF_set_active:
591 {
592 domid_t domid;
593 if ( xenoprof_state != XENOPROF_IDLE )
594 {
595 ret = -EPERM;
596 break;
597 }
598 if ( copy_from_guest(&domid, arg, 1) )
599 {
600 ret = -EFAULT;
601 break;
602 }
603 ret = add_active_list(domid);
604 break;
605 }
606 case XENOPROF_set_passive:
607 {
608 if ( xenoprof_state != XENOPROF_IDLE )
609 {
610 ret = -EPERM;
611 break;
612 }
613 ret = add_passive_list(arg);
614 break;
615 }
616 case XENOPROF_reserve_counters:
617 if ( xenoprof_state != XENOPROF_IDLE )
618 {
619 ret = -EPERM;
620 break;
621 }
622 ret = xenoprof_arch_reserve_counters();
623 if ( !ret )
624 xenoprof_state = XENOPROF_COUNTERS_RESERVED;
625 break;
627 case XENOPROF_counter:
628 if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
629 (adomains == 0) )
630 {
631 ret = -EPERM;
632 break;
633 }
635 ret = xenoprof_arch_counter(arg);
636 break;
638 case XENOPROF_setup_events:
639 if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
640 {
641 ret = -EPERM;
642 break;
643 }
644 ret = xenoprof_arch_setup_events();
645 if ( !ret )
646 xenoprof_state = XENOPROF_READY;
647 break;
649 case XENOPROF_enable_virq:
650 {
651 int i;
652 if ( current->domain == xenoprof_primary_profiler )
653 {
654 xenoprof_arch_enable_virq();
655 xenoprof_reset_stat();
656 for ( i = 0; i < pdomains; i++ )
657 xenoprof_reset_buf(passive_domains[i]);
658 }
659 xenoprof_reset_buf(current->domain);
660 ret = set_active(current->domain);
661 break;
662 }
664 case XENOPROF_start:
665 ret = -EPERM;
666 if ( (xenoprof_state == XENOPROF_READY) &&
667 (activated == adomains) )
668 ret = xenoprof_arch_start();
669 if ( ret == 0 )
670 xenoprof_state = XENOPROF_PROFILING;
671 break;
673 case XENOPROF_stop:
674 if ( xenoprof_state != XENOPROF_PROFILING ) {
675 ret = -EPERM;
676 break;
677 }
678 xenoprof_arch_stop();
679 xenoprof_state = XENOPROF_READY;
680 break;
682 case XENOPROF_disable_virq:
683 {
684 struct xenoprof *x;
685 if ( (xenoprof_state == XENOPROF_PROFILING) &&
686 (is_active(current->domain)) )
687 {
688 ret = -EPERM;
689 break;
690 }
691 if ( (ret = reset_active(current->domain)) != 0 )
692 break;
693 x = current->domain->xenoprof;
694 unshare_xenoprof_page_with_guest(x);
695 break;
696 }
698 case XENOPROF_release_counters:
699 ret = -EPERM;
700 if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
701 (xenoprof_state == XENOPROF_READY) )
702 {
703 xenoprof_state = XENOPROF_IDLE;
704 xenoprof_arch_release_counters();
705 xenoprof_arch_disable_virq();
706 reset_passive_list();
707 ret = 0;
708 }
709 break;
711 case XENOPROF_shutdown:
712 ret = -EPERM;
713 if ( xenoprof_state == XENOPROF_IDLE )
714 {
715 activated = 0;
716 adomains=0;
717 xenoprof_primary_profiler = NULL;
718 ret = 0;
719 }
720 break;
722 default:
723 ret = -ENOSYS;
724 }
726 spin_unlock(&xenoprof_lock);
728 if ( ret < 0 )
729 printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
730 op, current->domain->domain_id, ret);
732 return ret;
733 }
735 /*
736 * Local variables:
737 * mode: C
738 * c-set-style: "BSD"
739 * c-basic-offset: 4
740 * tab-width: 4
741 * indent-tabs-mode: nil
742 * End:
743 */