ia64/xen-unstable

view xen/common/xenoprof.c @ 18594:5e4e234d58be

x86: Define __per_cpu_shift label to help kdump/crashdump.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Oct 08 13:11:06 2008 +0100 (2008-10-08)
parents 6ea3db7ae24d
children 2a022ee37392
line source
1 /*
2 * Copyright (C) 2005 Hewlett-Packard Co.
3 * written by Aravind Menon & Jose Renato Santos
4 * (email: xenoprof@groups.hp.com)
5 *
6 * arch generic xenoprof and IA64 support.
7 * dynamic map/unmap xenoprof buffer support.
8 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
9 * VA Linux Systems Japan K.K.
10 */
12 #ifndef COMPAT
13 #include <xen/guest_access.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <public/xenoprof.h>
17 #include <xen/paging.h>
18 #include <xsm/xsm.h>
20 /* Limit amount of pages used for shared buffer (per domain) */
21 #define MAX_OPROF_SHARED_PAGES 32
23 /* Lock protecting the following global state */
24 static DEFINE_SPINLOCK(xenoprof_lock);
26 static DEFINE_SPINLOCK(pmu_owner_lock);
27 int pmu_owner = 0;
28 int pmu_hvm_refcount = 0;
30 static struct domain *active_domains[MAX_OPROF_DOMAINS];
31 static int active_ready[MAX_OPROF_DOMAINS];
32 static unsigned int adomains;
34 static struct domain *passive_domains[MAX_OPROF_DOMAINS];
35 static unsigned int pdomains;
37 static unsigned int activated;
38 static struct domain *xenoprof_primary_profiler;
39 static int xenoprof_state = XENOPROF_IDLE;
40 static unsigned long backtrace_depth;
42 static u64 total_samples;
43 static u64 invalid_buffer_samples;
44 static u64 corrupted_buffer_samples;
45 static u64 lost_samples;
46 static u64 active_samples;
47 static u64 passive_samples;
48 static u64 idle_samples;
49 static u64 others_samples;
51 int acquire_pmu_ownership(int pmu_ownship)
52 {
53 spin_lock(&pmu_owner_lock);
54 if ( pmu_owner == PMU_OWNER_NONE )
55 {
56 pmu_owner = pmu_ownship;
57 goto out;
58 }
60 if ( pmu_owner == pmu_ownship )
61 goto out;
63 spin_unlock(&pmu_owner_lock);
64 return 0;
65 out:
66 if ( pmu_owner == PMU_OWNER_HVM )
67 pmu_hvm_refcount++;
68 spin_unlock(&pmu_owner_lock);
69 return 1;
70 }
72 void release_pmu_ownship(int pmu_ownship)
73 {
74 spin_lock(&pmu_owner_lock);
75 if ( pmu_ownship == PMU_OWNER_HVM )
76 pmu_hvm_refcount--;
77 if ( !pmu_hvm_refcount )
78 pmu_owner = PMU_OWNER_NONE;
79 spin_unlock(&pmu_owner_lock);
80 }
82 int is_active(struct domain *d)
83 {
84 struct xenoprof *x = d->xenoprof;
85 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
86 }
88 static int is_passive(struct domain *d)
89 {
90 struct xenoprof *x = d->xenoprof;
91 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
92 }
94 static int is_profiled(struct domain *d)
95 {
96 return (is_active(d) || is_passive(d));
97 }
99 static void xenoprof_reset_stat(void)
100 {
101 total_samples = 0;
102 invalid_buffer_samples = 0;
103 corrupted_buffer_samples = 0;
104 lost_samples = 0;
105 active_samples = 0;
106 passive_samples = 0;
107 idle_samples = 0;
108 others_samples = 0;
109 }
111 static void xenoprof_reset_buf(struct domain *d)
112 {
113 int j;
114 xenoprof_buf_t *buf;
116 if ( d->xenoprof == NULL )
117 {
118 printk("xenoprof_reset_buf: ERROR - Unexpected "
119 "Xenoprof NULL pointer \n");
120 return;
121 }
123 for ( j = 0; j < MAX_VIRT_CPUS; j++ )
124 {
125 buf = d->xenoprof->vcpu[j].buffer;
126 if ( buf != NULL )
127 {
128 xenoprof_buf(d, buf, event_head) = 0;
129 xenoprof_buf(d, buf, event_tail) = 0;
130 }
131 }
132 }
134 static int
135 share_xenoprof_page_with_guest(struct domain *d, unsigned long mfn, int npages)
136 {
137 int i;
139 /* Check if previous page owner has released the page. */
140 for ( i = 0; i < npages; i++ )
141 {
142 struct page_info *page = mfn_to_page(mfn + i);
143 if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
144 {
145 gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%x\n",
146 mfn + i, page->count_info);
147 return -EBUSY;
148 }
149 page_set_owner(page, NULL);
150 }
152 for ( i = 0; i < npages; i++ )
153 share_xen_page_with_guest(mfn_to_page(mfn + i), d, XENSHARE_writable);
155 return 0;
156 }
158 static void
159 unshare_xenoprof_page_with_guest(struct xenoprof *x)
160 {
161 int i, npages = x->npages;
162 unsigned long mfn = virt_to_mfn(x->rawbuf);
164 for ( i = 0; i < npages; i++ )
165 {
166 struct page_info *page = mfn_to_page(mfn + i);
167 BUG_ON(page_get_owner(page) != current->domain);
168 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
169 put_page(page);
170 }
171 }
173 static void
174 xenoprof_shared_gmfn_with_guest(
175 struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages)
176 {
177 int i;
179 for ( i = 0; i < npages; i++, maddr += PAGE_SIZE, gmaddr += PAGE_SIZE )
180 {
181 BUG_ON(page_get_owner(maddr_to_page(maddr)) != d);
182 xenoprof_shared_gmfn(d, gmaddr, maddr);
183 }
184 }
186 static int alloc_xenoprof_struct(
187 struct domain *d, int max_samples, int is_passive)
188 {
189 struct vcpu *v;
190 int nvcpu, npages, bufsize, max_bufsize;
191 unsigned max_max_samples;
192 int i;
194 d->xenoprof = xmalloc(struct xenoprof);
196 if ( d->xenoprof == NULL )
197 {
198 printk("alloc_xenoprof_struct(): memory allocation failed\n");
199 return -ENOMEM;
200 }
202 memset(d->xenoprof, 0, sizeof(*d->xenoprof));
204 nvcpu = 0;
205 for_each_vcpu ( d, v )
206 nvcpu++;
208 bufsize = sizeof(struct xenoprof_buf);
209 i = sizeof(struct event_log);
210 #ifdef CONFIG_COMPAT
211 d->xenoprof->is_compat = IS_COMPAT(is_passive ? dom0 : d);
212 if ( XENOPROF_COMPAT(d->xenoprof) )
213 {
214 bufsize = sizeof(struct compat_oprof_buf);
215 i = sizeof(struct compat_event_log);
216 }
217 #endif
219 /* reduce max_samples if necessary to limit pages allocated */
220 max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
221 max_max_samples = ( (max_bufsize - bufsize) / i ) + 1;
222 if ( (unsigned)max_samples > max_max_samples )
223 max_samples = max_max_samples;
225 bufsize += (max_samples - 1) * i;
226 npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
228 d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages));
229 if ( d->xenoprof->rawbuf == NULL )
230 {
231 xfree(d->xenoprof);
232 d->xenoprof = NULL;
233 return -ENOMEM;
234 }
236 d->xenoprof->npages = npages;
237 d->xenoprof->nbuf = nvcpu;
238 d->xenoprof->bufsize = bufsize;
239 d->xenoprof->domain_ready = 0;
240 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
242 /* Update buffer pointers for active vcpus */
243 i = 0;
244 for_each_vcpu ( d, v )
245 {
246 xenoprof_buf_t *buf = (xenoprof_buf_t *)
247 &d->xenoprof->rawbuf[i * bufsize];
249 d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
250 d->xenoprof->vcpu[v->vcpu_id].buffer = buf;
251 xenoprof_buf(d, buf, event_size) = max_samples;
252 xenoprof_buf(d, buf, vcpu_id) = v->vcpu_id;
254 i++;
255 /* in the unlikely case that the number of active vcpus changes */
256 if ( i >= nvcpu )
257 break;
258 }
260 return 0;
261 }
263 void free_xenoprof_pages(struct domain *d)
264 {
265 struct xenoprof *x;
266 int order;
268 x = d->xenoprof;
269 if ( x == NULL )
270 return;
272 if ( x->rawbuf != NULL )
273 {
274 order = get_order_from_pages(x->npages);
275 free_xenheap_pages(x->rawbuf, order);
276 }
278 xfree(x);
279 d->xenoprof = NULL;
280 }
282 static int active_index(struct domain *d)
283 {
284 int i;
286 for ( i = 0; i < adomains; i++ )
287 if ( active_domains[i] == d )
288 return i;
290 return -1;
291 }
293 static int set_active(struct domain *d)
294 {
295 int ind;
296 struct xenoprof *x;
298 ind = active_index(d);
299 if ( ind < 0 )
300 return -EPERM;
302 x = d->xenoprof;
303 if ( x == NULL )
304 return -EPERM;
306 x->domain_ready = 1;
307 x->domain_type = XENOPROF_DOMAIN_ACTIVE;
308 active_ready[ind] = 1;
309 activated++;
311 return 0;
312 }
314 static int reset_active(struct domain *d)
315 {
316 int ind;
317 struct xenoprof *x;
319 ind = active_index(d);
320 if ( ind < 0 )
321 return -EPERM;
323 x = d->xenoprof;
324 if ( x == NULL )
325 return -EPERM;
327 x->domain_ready = 0;
328 x->domain_type = XENOPROF_DOMAIN_IGNORED;
329 active_ready[ind] = 0;
330 active_domains[ind] = NULL;
331 activated--;
332 put_domain(d);
334 if ( activated <= 0 )
335 adomains = 0;
337 return 0;
338 }
340 static void reset_passive(struct domain *d)
341 {
342 struct xenoprof *x;
344 if ( d == NULL )
345 return;
347 x = d->xenoprof;
348 if ( x == NULL )
349 return;
351 unshare_xenoprof_page_with_guest(x);
352 x->domain_type = XENOPROF_DOMAIN_IGNORED;
353 }
355 static void reset_active_list(void)
356 {
357 int i;
359 for ( i = 0; i < adomains; i++ )
360 if ( active_ready[i] )
361 reset_active(active_domains[i]);
363 adomains = 0;
364 activated = 0;
365 }
367 static void reset_passive_list(void)
368 {
369 int i;
371 for ( i = 0; i < pdomains; i++ )
372 {
373 reset_passive(passive_domains[i]);
374 put_domain(passive_domains[i]);
375 passive_domains[i] = NULL;
376 }
378 pdomains = 0;
379 }
381 static int add_active_list(domid_t domid)
382 {
383 struct domain *d;
385 if ( adomains >= MAX_OPROF_DOMAINS )
386 return -E2BIG;
388 d = get_domain_by_id(domid);
389 if ( d == NULL )
390 return -EINVAL;
392 active_domains[adomains] = d;
393 active_ready[adomains] = 0;
394 adomains++;
396 return 0;
397 }
399 static int add_passive_list(XEN_GUEST_HANDLE(void) arg)
400 {
401 struct xenoprof_passive passive;
402 struct domain *d;
403 int ret = 0;
405 if ( pdomains >= MAX_OPROF_DOMAINS )
406 return -E2BIG;
408 if ( copy_from_guest(&passive, arg, 1) )
409 return -EFAULT;
411 d = get_domain_by_id(passive.domain_id);
412 if ( d == NULL )
413 return -EINVAL;
415 if ( d->xenoprof == NULL )
416 {
417 ret = alloc_xenoprof_struct(d, passive.max_samples, 1);
418 if ( ret < 0 )
419 {
420 put_domain(d);
421 return -ENOMEM;
422 }
423 }
425 ret = share_xenoprof_page_with_guest(
426 current->domain, virt_to_mfn(d->xenoprof->rawbuf),
427 d->xenoprof->npages);
428 if ( ret < 0 )
429 {
430 put_domain(d);
431 return ret;
432 }
434 d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
435 passive.nbuf = d->xenoprof->nbuf;
436 passive.bufsize = d->xenoprof->bufsize;
437 if ( !paging_mode_translate(current->domain) )
438 passive.buf_gmaddr = __pa(d->xenoprof->rawbuf);
439 else
440 xenoprof_shared_gmfn_with_guest(
441 current->domain, __pa(d->xenoprof->rawbuf),
442 passive.buf_gmaddr, d->xenoprof->npages);
444 if ( copy_to_guest(arg, &passive, 1) )
445 {
446 put_domain(d);
447 return -EFAULT;
448 }
450 passive_domains[pdomains] = d;
451 pdomains++;
453 return ret;
454 }
457 /* Get space in the buffer */
458 static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size)
459 {
460 int head, tail;
462 head = xenoprof_buf(d, buf, event_head);
463 tail = xenoprof_buf(d, buf, event_tail);
465 return ((tail > head) ? 0 : size) + tail - head - 1;
466 }
468 /* Check for space and add a sample. Return 1 if successful, 0 otherwise. */
469 static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf,
470 unsigned long eip, int mode, int event)
471 {
472 int head, tail, size;
474 head = xenoprof_buf(d, buf, event_head);
475 tail = xenoprof_buf(d, buf, event_tail);
476 size = xenoprof_buf(d, buf, event_size);
478 /* make sure indexes in shared buffer are sane */
479 if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
480 {
481 corrupted_buffer_samples++;
482 return 0;
483 }
485 if ( xenoprof_buf_space(d, buf, size) > 0 )
486 {
487 xenoprof_buf(d, buf, event_log[head].eip) = eip;
488 xenoprof_buf(d, buf, event_log[head].mode) = mode;
489 xenoprof_buf(d, buf, event_log[head].event) = event;
490 head++;
491 if ( head >= size )
492 head = 0;
494 xenoprof_buf(d, buf, event_head) = head;
495 }
496 else
497 {
498 xenoprof_buf(d, buf, lost_samples)++;
499 lost_samples++;
500 return 0;
501 }
503 return 1;
504 }
506 int xenoprof_add_trace(struct domain *d, struct vcpu *vcpu,
507 unsigned long eip, int mode)
508 {
509 xenoprof_buf_t *buf = d->xenoprof->vcpu[vcpu->vcpu_id].buffer;
511 /* Do not accidentally write an escape code due to a broken frame. */
512 if ( eip == XENOPROF_ESCAPE_CODE )
513 {
514 invalid_buffer_samples++;
515 return 0;
516 }
518 return xenoprof_add_sample(d, buf, eip, mode, 0);
519 }
521 void xenoprof_log_event(struct vcpu *vcpu,
522 struct cpu_user_regs * regs, unsigned long eip,
523 int mode, int event)
524 {
525 struct domain *d = vcpu->domain;
526 struct xenoprof_vcpu *v;
527 xenoprof_buf_t *buf;
529 total_samples++;
531 /* Ignore samples of un-monitored domains. */
532 if ( !is_profiled(d) )
533 {
534 others_samples++;
535 return;
536 }
538 v = &d->xenoprof->vcpu[vcpu->vcpu_id];
539 if ( v->buffer == NULL )
540 {
541 invalid_buffer_samples++;
542 return;
543 }
545 buf = v->buffer;
547 /* Provide backtrace if requested. */
548 if ( backtrace_depth > 0 )
549 {
550 if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) ||
551 !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode,
552 XENOPROF_TRACE_BEGIN) )
553 {
554 xenoprof_buf(d, buf, lost_samples)++;
555 lost_samples++;
556 return;
557 }
558 }
560 if ( xenoprof_add_sample(d, buf, eip, mode, event) )
561 {
562 if ( is_active(vcpu->domain) )
563 active_samples++;
564 else
565 passive_samples++;
566 if ( mode == 0 )
567 xenoprof_buf(d, buf, user_samples)++;
568 else if ( mode == 1 )
569 xenoprof_buf(d, buf, kernel_samples)++;
570 else
571 xenoprof_buf(d, buf, xen_samples)++;
573 }
575 if ( backtrace_depth > 0 )
576 xenoprof_backtrace(d, vcpu, regs, backtrace_depth, mode);
577 }
581 static int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
582 {
583 struct domain *d = current->domain;
584 struct xenoprof_init xenoprof_init;
585 int ret;
587 if ( copy_from_guest(&xenoprof_init, arg, 1) )
588 return -EFAULT;
590 if ( (ret = xenoprof_arch_init(&xenoprof_init.num_events,
591 xenoprof_init.cpu_type)) )
592 return ret;
594 xenoprof_init.is_primary =
595 ((xenoprof_primary_profiler == d) ||
596 ((xenoprof_primary_profiler == NULL) && (d->domain_id == 0)));
597 if ( xenoprof_init.is_primary )
598 xenoprof_primary_profiler = current->domain;
600 return (copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0);
601 }
603 #endif /* !COMPAT */
605 static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE(void) arg)
606 {
607 struct xenoprof_get_buffer xenoprof_get_buffer;
608 struct domain *d = current->domain;
609 int ret;
611 if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
612 return -EFAULT;
614 /*
615 * We allocate xenoprof struct and buffers only at first time
616 * get_buffer is called. Memory is then kept until domain is destroyed.
617 */
618 if ( d->xenoprof == NULL )
619 {
620 ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0);
621 if ( ret < 0 )
622 return ret;
623 }
625 ret = share_xenoprof_page_with_guest(
626 d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages);
627 if ( ret < 0 )
628 return ret;
630 xenoprof_reset_buf(d);
632 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
633 d->xenoprof->domain_ready = 0;
634 d->xenoprof->is_primary = (xenoprof_primary_profiler == current->domain);
636 xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
637 xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
638 if ( !paging_mode_translate(d) )
639 xenoprof_get_buffer.buf_gmaddr = __pa(d->xenoprof->rawbuf);
640 else
641 xenoprof_shared_gmfn_with_guest(
642 d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr,
643 d->xenoprof->npages);
645 if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
646 return -EFAULT;
648 return 0;
649 }
651 #define NONPRIV_OP(op) ( (op == XENOPROF_init) \
652 || (op == XENOPROF_enable_virq) \
653 || (op == XENOPROF_disable_virq) \
654 || (op == XENOPROF_get_buffer))
656 int do_xenoprof_op(int op, XEN_GUEST_HANDLE(void) arg)
657 {
658 int ret = 0;
660 if ( (op < 0) || (op > XENOPROF_last_op) )
661 {
662 printk("xenoprof: invalid operation %d for domain %d\n",
663 op, current->domain->domain_id);
664 return -EINVAL;
665 }
667 if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
668 {
669 printk("xenoprof: dom %d denied privileged operation %d\n",
670 current->domain->domain_id, op);
671 return -EPERM;
672 }
674 ret = xsm_profile(current->domain, op);
675 if ( ret )
676 return ret;
678 spin_lock(&xenoprof_lock);
680 switch ( op )
681 {
682 case XENOPROF_init:
683 ret = xenoprof_op_init(arg);
684 break;
686 case XENOPROF_get_buffer:
687 if ( !acquire_pmu_ownership(PMU_OWNER_XENOPROF) )
688 {
689 ret = -EBUSY;
690 break;
691 }
692 ret = xenoprof_op_get_buffer(arg);
693 break;
695 case XENOPROF_reset_active_list:
696 {
697 reset_active_list();
698 ret = 0;
699 break;
700 }
701 case XENOPROF_reset_passive_list:
702 {
703 reset_passive_list();
704 ret = 0;
705 break;
706 }
707 case XENOPROF_set_active:
708 {
709 domid_t domid;
710 if ( xenoprof_state != XENOPROF_IDLE )
711 {
712 ret = -EPERM;
713 break;
714 }
715 if ( copy_from_guest(&domid, arg, 1) )
716 {
717 ret = -EFAULT;
718 break;
719 }
720 ret = add_active_list(domid);
721 break;
722 }
723 case XENOPROF_set_passive:
724 {
725 if ( xenoprof_state != XENOPROF_IDLE )
726 {
727 ret = -EPERM;
728 break;
729 }
730 ret = add_passive_list(arg);
731 break;
732 }
733 case XENOPROF_reserve_counters:
734 if ( xenoprof_state != XENOPROF_IDLE )
735 {
736 ret = -EPERM;
737 break;
738 }
739 ret = xenoprof_arch_reserve_counters();
740 if ( !ret )
741 xenoprof_state = XENOPROF_COUNTERS_RESERVED;
742 break;
744 case XENOPROF_counter:
745 if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
746 (adomains == 0) )
747 {
748 ret = -EPERM;
749 break;
750 }
752 ret = xenoprof_arch_counter(arg);
753 break;
755 case XENOPROF_setup_events:
756 if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
757 {
758 ret = -EPERM;
759 break;
760 }
761 ret = xenoprof_arch_setup_events();
762 if ( !ret )
763 xenoprof_state = XENOPROF_READY;
764 break;
766 case XENOPROF_enable_virq:
767 {
768 int i;
769 if ( current->domain == xenoprof_primary_profiler )
770 {
771 xenoprof_arch_enable_virq();
772 xenoprof_reset_stat();
773 for ( i = 0; i < pdomains; i++ )
774 xenoprof_reset_buf(passive_domains[i]);
775 }
776 xenoprof_reset_buf(current->domain);
777 ret = set_active(current->domain);
778 break;
779 }
781 case XENOPROF_start:
782 ret = -EPERM;
783 if ( (xenoprof_state == XENOPROF_READY) &&
784 (activated == adomains) )
785 ret = xenoprof_arch_start();
786 if ( ret == 0 )
787 xenoprof_state = XENOPROF_PROFILING;
788 break;
790 case XENOPROF_stop:
791 {
792 struct domain *d;
793 struct vcpu *v;
794 int i;
796 if ( xenoprof_state != XENOPROF_PROFILING )
797 {
798 ret = -EPERM;
799 break;
800 }
801 xenoprof_arch_stop();
803 /* Flush remaining samples. */
804 for ( i = 0; i < adomains; i++ )
805 {
806 if ( !active_ready[i] )
807 continue;
808 d = active_domains[i];
809 for_each_vcpu(d, v)
810 send_guest_vcpu_virq(v, VIRQ_XENOPROF);
811 }
812 xenoprof_state = XENOPROF_READY;
813 break;
814 }
816 case XENOPROF_disable_virq:
817 {
818 struct xenoprof *x;
819 if ( (xenoprof_state == XENOPROF_PROFILING) &&
820 (is_active(current->domain)) )
821 {
822 ret = -EPERM;
823 break;
824 }
825 if ( (ret = reset_active(current->domain)) != 0 )
826 break;
827 x = current->domain->xenoprof;
828 unshare_xenoprof_page_with_guest(x);
829 release_pmu_ownship(PMU_OWNER_XENOPROF);
830 break;
831 }
833 case XENOPROF_release_counters:
834 ret = -EPERM;
835 if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
836 (xenoprof_state == XENOPROF_READY) )
837 {
838 xenoprof_state = XENOPROF_IDLE;
839 xenoprof_arch_release_counters();
840 xenoprof_arch_disable_virq();
841 reset_passive_list();
842 ret = 0;
843 }
844 break;
846 case XENOPROF_shutdown:
847 ret = -EPERM;
848 if ( xenoprof_state == XENOPROF_IDLE )
849 {
850 activated = 0;
851 adomains=0;
852 xenoprof_primary_profiler = NULL;
853 backtrace_depth=0;
854 ret = 0;
855 }
856 break;
858 case XENOPROF_set_backtrace:
859 ret = 0;
860 if ( !xenoprof_backtrace_supported() )
861 ret = -EINVAL;
862 else if ( copy_from_guest(&backtrace_depth, arg, 1) )
863 ret = -EFAULT;
864 break;
866 default:
867 ret = -ENOSYS;
868 }
870 spin_unlock(&xenoprof_lock);
872 if ( ret < 0 )
873 printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
874 op, current->domain->domain_id, ret);
876 return ret;
877 }
879 #if defined(CONFIG_COMPAT) && !defined(COMPAT)
880 #include "compat/xenoprof.c"
881 #endif
883 /*
884 * Local variables:
885 * mode: C
886 * c-set-style: "BSD"
887 * c-basic-offset: 4
888 * tab-width: 4
889 * indent-tabs-mode: nil
890 * End:
891 */