ia64/xen-unstable

view xen/common/xenoprof.c @ 15927:b7eb2bb9b625

IRQ injection changes for HVM PCI passthru.
Signed-off-by: Allen Kay <allen.m.kay@intel.com>
Signed-off-by: Guy Zana <guy@neocleus.com>
author kfraser@localhost.localdomain
date Tue Sep 18 16:09:19 2007 +0100 (2007-09-18)
parents 96f64f4c42f0
children 8aa377fb1b1f
line source
1 /*
2 * Copyright (C) 2005 Hewlett-Packard Co.
3 * written by Aravind Menon & Jose Renato Santos
4 * (email: xenoprof@groups.hp.com)
5 *
6 * arch generic xenoprof and IA64 support.
7 * dynamic map/unmap xenoprof buffer support.
8 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
9 * VA Linux Systems Japan K.K.
10 */
12 #ifndef COMPAT
13 #include <xen/guest_access.h>
14 #include <xen/sched.h>
15 #include <public/xenoprof.h>
16 #include <xen/paging.h>
17 #include <xsm/xsm.h>
19 /* Limit amount of pages used for shared buffer (per domain) */
20 #define MAX_OPROF_SHARED_PAGES 32
22 /* Lock protecting the following global state */
23 static DEFINE_SPINLOCK(xenoprof_lock);
25 static struct domain *active_domains[MAX_OPROF_DOMAINS];
26 static int active_ready[MAX_OPROF_DOMAINS];
27 static unsigned int adomains;
29 static struct domain *passive_domains[MAX_OPROF_DOMAINS];
30 static unsigned int pdomains;
32 static unsigned int activated;
33 static struct domain *xenoprof_primary_profiler;
34 static int xenoprof_state = XENOPROF_IDLE;
35 static unsigned long backtrace_depth;
37 static u64 total_samples;
38 static u64 invalid_buffer_samples;
39 static u64 corrupted_buffer_samples;
40 static u64 lost_samples;
41 static u64 active_samples;
42 static u64 passive_samples;
43 static u64 idle_samples;
44 static u64 others_samples;
46 int is_active(struct domain *d)
47 {
48 struct xenoprof *x = d->xenoprof;
49 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
50 }
52 static int is_passive(struct domain *d)
53 {
54 struct xenoprof *x = d->xenoprof;
55 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
56 }
58 static int is_profiled(struct domain *d)
59 {
60 return (is_active(d) || is_passive(d));
61 }
63 static void xenoprof_reset_stat(void)
64 {
65 total_samples = 0;
66 invalid_buffer_samples = 0;
67 corrupted_buffer_samples = 0;
68 lost_samples = 0;
69 active_samples = 0;
70 passive_samples = 0;
71 idle_samples = 0;
72 others_samples = 0;
73 }
75 static void xenoprof_reset_buf(struct domain *d)
76 {
77 int j;
78 xenoprof_buf_t *buf;
80 if ( d->xenoprof == NULL )
81 {
82 printk("xenoprof_reset_buf: ERROR - Unexpected "
83 "Xenoprof NULL pointer \n");
84 return;
85 }
87 for ( j = 0; j < MAX_VIRT_CPUS; j++ )
88 {
89 buf = d->xenoprof->vcpu[j].buffer;
90 if ( buf != NULL )
91 {
92 xenoprof_buf(d, buf, event_head) = 0;
93 xenoprof_buf(d, buf, event_tail) = 0;
94 }
95 }
96 }
98 static int
99 share_xenoprof_page_with_guest(struct domain *d, unsigned long mfn, int npages)
100 {
101 int i;
103 /* Check if previous page owner has released the page. */
104 for ( i = 0; i < npages; i++ )
105 {
106 struct page_info *page = mfn_to_page(mfn + i);
107 if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
108 {
109 gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%x\n",
110 mfn + i, page->count_info);
111 return -EBUSY;
112 }
113 page_set_owner(page, NULL);
114 }
116 for ( i = 0; i < npages; i++ )
117 share_xen_page_with_guest(mfn_to_page(mfn + i), d, XENSHARE_writable);
119 return 0;
120 }
122 static void
123 unshare_xenoprof_page_with_guest(struct xenoprof *x)
124 {
125 int i, npages = x->npages;
126 unsigned long mfn = virt_to_mfn(x->rawbuf);
128 for ( i = 0; i < npages; i++ )
129 {
130 struct page_info *page = mfn_to_page(mfn + i);
131 BUG_ON(page_get_owner(page) != current->domain);
132 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
133 put_page(page);
134 }
135 }
137 static void
138 xenoprof_shared_gmfn_with_guest(
139 struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages)
140 {
141 int i;
143 for ( i = 0; i < npages; i++, maddr += PAGE_SIZE, gmaddr += PAGE_SIZE )
144 {
145 BUG_ON(page_get_owner(maddr_to_page(maddr)) != d);
146 xenoprof_shared_gmfn(d, gmaddr, maddr);
147 }
148 }
150 static int alloc_xenoprof_struct(
151 struct domain *d, int max_samples, int is_passive)
152 {
153 struct vcpu *v;
154 int nvcpu, npages, bufsize, max_bufsize;
155 unsigned max_max_samples;
156 int i;
158 d->xenoprof = xmalloc(struct xenoprof);
160 if ( d->xenoprof == NULL )
161 {
162 printk("alloc_xenoprof_struct(): memory allocation failed\n");
163 return -ENOMEM;
164 }
166 memset(d->xenoprof, 0, sizeof(*d->xenoprof));
168 nvcpu = 0;
169 for_each_vcpu ( d, v )
170 nvcpu++;
172 bufsize = sizeof(struct xenoprof_buf);
173 i = sizeof(struct event_log);
174 #ifdef CONFIG_COMPAT
175 d->xenoprof->is_compat = IS_COMPAT(is_passive ? dom0 : d);
176 if ( XENOPROF_COMPAT(d->xenoprof) )
177 {
178 bufsize = sizeof(struct compat_oprof_buf);
179 i = sizeof(struct compat_event_log);
180 }
181 #endif
183 /* reduce max_samples if necessary to limit pages allocated */
184 max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
185 max_max_samples = ( (max_bufsize - bufsize) / i ) + 1;
186 if ( (unsigned)max_samples > max_max_samples )
187 max_samples = max_max_samples;
189 bufsize += (max_samples - 1) * i;
190 npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
192 d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages));
193 if ( d->xenoprof->rawbuf == NULL )
194 {
195 xfree(d->xenoprof);
196 d->xenoprof = NULL;
197 return -ENOMEM;
198 }
200 d->xenoprof->npages = npages;
201 d->xenoprof->nbuf = nvcpu;
202 d->xenoprof->bufsize = bufsize;
203 d->xenoprof->domain_ready = 0;
204 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
206 /* Update buffer pointers for active vcpus */
207 i = 0;
208 for_each_vcpu ( d, v )
209 {
210 xenoprof_buf_t *buf = (xenoprof_buf_t *)
211 &d->xenoprof->rawbuf[i * bufsize];
213 d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
214 d->xenoprof->vcpu[v->vcpu_id].buffer = buf;
215 xenoprof_buf(d, buf, event_size) = max_samples;
216 xenoprof_buf(d, buf, vcpu_id) = v->vcpu_id;
218 i++;
219 /* in the unlikely case that the number of active vcpus changes */
220 if ( i >= nvcpu )
221 break;
222 }
224 return 0;
225 }
227 void free_xenoprof_pages(struct domain *d)
228 {
229 struct xenoprof *x;
230 int order;
232 x = d->xenoprof;
233 if ( x == NULL )
234 return;
236 if ( x->rawbuf != NULL )
237 {
238 order = get_order_from_pages(x->npages);
239 free_xenheap_pages(x->rawbuf, order);
240 }
242 xfree(x);
243 d->xenoprof = NULL;
244 }
246 static int active_index(struct domain *d)
247 {
248 int i;
250 for ( i = 0; i < adomains; i++ )
251 if ( active_domains[i] == d )
252 return i;
254 return -1;
255 }
257 static int set_active(struct domain *d)
258 {
259 int ind;
260 struct xenoprof *x;
262 ind = active_index(d);
263 if ( ind < 0 )
264 return -EPERM;
266 x = d->xenoprof;
267 if ( x == NULL )
268 return -EPERM;
270 x->domain_ready = 1;
271 x->domain_type = XENOPROF_DOMAIN_ACTIVE;
272 active_ready[ind] = 1;
273 activated++;
275 return 0;
276 }
278 static int reset_active(struct domain *d)
279 {
280 int ind;
281 struct xenoprof *x;
283 ind = active_index(d);
284 if ( ind < 0 )
285 return -EPERM;
287 x = d->xenoprof;
288 if ( x == NULL )
289 return -EPERM;
291 x->domain_ready = 0;
292 x->domain_type = XENOPROF_DOMAIN_IGNORED;
293 active_ready[ind] = 0;
294 active_domains[ind] = NULL;
295 activated--;
296 put_domain(d);
298 if ( activated <= 0 )
299 adomains = 0;
301 return 0;
302 }
304 static void reset_passive(struct domain *d)
305 {
306 struct xenoprof *x;
308 if ( d == NULL )
309 return;
311 x = d->xenoprof;
312 if ( x == NULL )
313 return;
315 unshare_xenoprof_page_with_guest(x);
316 x->domain_type = XENOPROF_DOMAIN_IGNORED;
317 }
319 static void reset_active_list(void)
320 {
321 int i;
323 for ( i = 0; i < adomains; i++ )
324 if ( active_ready[i] )
325 reset_active(active_domains[i]);
327 adomains = 0;
328 activated = 0;
329 }
331 static void reset_passive_list(void)
332 {
333 int i;
335 for ( i = 0; i < pdomains; i++ )
336 {
337 reset_passive(passive_domains[i]);
338 put_domain(passive_domains[i]);
339 passive_domains[i] = NULL;
340 }
342 pdomains = 0;
343 }
345 static int add_active_list(domid_t domid)
346 {
347 struct domain *d;
349 if ( adomains >= MAX_OPROF_DOMAINS )
350 return -E2BIG;
352 d = get_domain_by_id(domid);
353 if ( d == NULL )
354 return -EINVAL;
356 active_domains[adomains] = d;
357 active_ready[adomains] = 0;
358 adomains++;
360 return 0;
361 }
363 static int add_passive_list(XEN_GUEST_HANDLE(void) arg)
364 {
365 struct xenoprof_passive passive;
366 struct domain *d;
367 int ret = 0;
369 if ( pdomains >= MAX_OPROF_DOMAINS )
370 return -E2BIG;
372 if ( copy_from_guest(&passive, arg, 1) )
373 return -EFAULT;
375 d = get_domain_by_id(passive.domain_id);
376 if ( d == NULL )
377 return -EINVAL;
379 if ( d->xenoprof == NULL )
380 {
381 ret = alloc_xenoprof_struct(d, passive.max_samples, 1);
382 if ( ret < 0 )
383 {
384 put_domain(d);
385 return -ENOMEM;
386 }
387 }
389 ret = share_xenoprof_page_with_guest(
390 current->domain, virt_to_mfn(d->xenoprof->rawbuf),
391 d->xenoprof->npages);
392 if ( ret < 0 )
393 {
394 put_domain(d);
395 return ret;
396 }
398 d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
399 passive.nbuf = d->xenoprof->nbuf;
400 passive.bufsize = d->xenoprof->bufsize;
401 if ( !paging_mode_translate(current->domain) )
402 passive.buf_gmaddr = __pa(d->xenoprof->rawbuf);
403 else
404 xenoprof_shared_gmfn_with_guest(
405 current->domain, __pa(d->xenoprof->rawbuf),
406 passive.buf_gmaddr, d->xenoprof->npages);
408 if ( copy_to_guest(arg, &passive, 1) )
409 {
410 put_domain(d);
411 return -EFAULT;
412 }
414 passive_domains[pdomains] = d;
415 pdomains++;
417 return ret;
418 }
421 /* Get space in the buffer */
422 static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size)
423 {
424 int head, tail;
426 head = xenoprof_buf(d, buf, event_head);
427 tail = xenoprof_buf(d, buf, event_tail);
429 return ((tail > head) ? 0 : size) + tail - head - 1;
430 }
432 /* Check for space and add a sample. Return 1 if successful, 0 otherwise. */
433 static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf,
434 unsigned long eip, int mode, int event)
435 {
436 int head, tail, size;
438 head = xenoprof_buf(d, buf, event_head);
439 tail = xenoprof_buf(d, buf, event_tail);
440 size = xenoprof_buf(d, buf, event_size);
442 /* make sure indexes in shared buffer are sane */
443 if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
444 {
445 corrupted_buffer_samples++;
446 return 0;
447 }
449 if ( xenoprof_buf_space(d, buf, size) > 0 )
450 {
451 xenoprof_buf(d, buf, event_log[head].eip) = eip;
452 xenoprof_buf(d, buf, event_log[head].mode) = mode;
453 xenoprof_buf(d, buf, event_log[head].event) = event;
454 head++;
455 if ( head >= size )
456 head = 0;
458 xenoprof_buf(d, buf, event_head) = head;
459 }
460 else
461 {
462 xenoprof_buf(d, buf, lost_samples)++;
463 lost_samples++;
464 return 0;
465 }
467 return 1;
468 }
470 int xenoprof_add_trace(struct domain *d, struct vcpu *vcpu,
471 unsigned long eip, int mode)
472 {
473 xenoprof_buf_t *buf = d->xenoprof->vcpu[vcpu->vcpu_id].buffer;
475 /* Do not accidentally write an escape code due to a broken frame. */
476 if ( eip == XENOPROF_ESCAPE_CODE )
477 {
478 invalid_buffer_samples++;
479 return 0;
480 }
482 return xenoprof_add_sample(d, buf, eip, mode, 0);
483 }
485 void xenoprof_log_event(struct vcpu *vcpu,
486 struct cpu_user_regs * regs, unsigned long eip,
487 int mode, int event)
488 {
489 struct domain *d = vcpu->domain;
490 struct xenoprof_vcpu *v;
491 xenoprof_buf_t *buf;
493 total_samples++;
495 /* Ignore samples of un-monitored domains. */
496 if ( !is_profiled(d) )
497 {
498 others_samples++;
499 return;
500 }
502 v = &d->xenoprof->vcpu[vcpu->vcpu_id];
503 if ( v->buffer == NULL )
504 {
505 invalid_buffer_samples++;
506 return;
507 }
509 buf = v->buffer;
511 /* Provide backtrace if requested. */
512 if ( backtrace_depth > 0 )
513 {
514 if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) ||
515 !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode,
516 XENOPROF_TRACE_BEGIN) )
517 {
518 xenoprof_buf(d, buf, lost_samples)++;
519 lost_samples++;
520 return;
521 }
522 }
524 if ( xenoprof_add_sample(d, buf, eip, mode, event) )
525 {
526 if ( is_active(vcpu->domain) )
527 active_samples++;
528 else
529 passive_samples++;
530 if ( mode == 0 )
531 xenoprof_buf(d, buf, user_samples)++;
532 else if ( mode == 1 )
533 xenoprof_buf(d, buf, kernel_samples)++;
534 else
535 xenoprof_buf(d, buf, xen_samples)++;
537 }
539 if ( backtrace_depth > 0 )
540 xenoprof_backtrace(d, vcpu, regs, backtrace_depth, mode);
541 }
545 static int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
546 {
547 struct domain *d = current->domain;
548 struct xenoprof_init xenoprof_init;
549 int ret;
551 if ( copy_from_guest(&xenoprof_init, arg, 1) )
552 return -EFAULT;
554 if ( (ret = xenoprof_arch_init(&xenoprof_init.num_events,
555 xenoprof_init.cpu_type)) )
556 return ret;
558 xenoprof_init.is_primary =
559 ((xenoprof_primary_profiler == d) ||
560 ((xenoprof_primary_profiler == NULL) && (d->domain_id == 0)));
561 if ( xenoprof_init.is_primary )
562 xenoprof_primary_profiler = current->domain;
564 return (copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0);
565 }
567 #endif /* !COMPAT */
569 static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE(void) arg)
570 {
571 struct xenoprof_get_buffer xenoprof_get_buffer;
572 struct domain *d = current->domain;
573 int ret;
575 if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
576 return -EFAULT;
578 /*
579 * We allocate xenoprof struct and buffers only at first time
580 * get_buffer is called. Memory is then kept until domain is destroyed.
581 */
582 if ( d->xenoprof == NULL )
583 {
584 ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0);
585 if ( ret < 0 )
586 return ret;
587 }
589 ret = share_xenoprof_page_with_guest(
590 d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages);
591 if ( ret < 0 )
592 return ret;
594 xenoprof_reset_buf(d);
596 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
597 d->xenoprof->domain_ready = 0;
598 d->xenoprof->is_primary = (xenoprof_primary_profiler == current->domain);
600 xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
601 xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
602 if ( !paging_mode_translate(d) )
603 xenoprof_get_buffer.buf_gmaddr = __pa(d->xenoprof->rawbuf);
604 else
605 xenoprof_shared_gmfn_with_guest(
606 d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr,
607 d->xenoprof->npages);
609 if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
610 return -EFAULT;
612 return 0;
613 }
615 #define NONPRIV_OP(op) ( (op == XENOPROF_init) \
616 || (op == XENOPROF_enable_virq) \
617 || (op == XENOPROF_disable_virq) \
618 || (op == XENOPROF_get_buffer))
620 int do_xenoprof_op(int op, XEN_GUEST_HANDLE(void) arg)
621 {
622 int ret = 0;
624 if ( (op < 0) || (op > XENOPROF_last_op) )
625 {
626 printk("xenoprof: invalid operation %d for domain %d\n",
627 op, current->domain->domain_id);
628 return -EINVAL;
629 }
631 if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
632 {
633 printk("xenoprof: dom %d denied privileged operation %d\n",
634 current->domain->domain_id, op);
635 return -EPERM;
636 }
638 ret = xsm_profile(current->domain, op);
639 if ( ret )
640 return ret;
642 spin_lock(&xenoprof_lock);
644 switch ( op )
645 {
646 case XENOPROF_init:
647 ret = xenoprof_op_init(arg);
648 break;
650 case XENOPROF_get_buffer:
651 ret = xenoprof_op_get_buffer(arg);
652 break;
654 case XENOPROF_reset_active_list:
655 {
656 reset_active_list();
657 ret = 0;
658 break;
659 }
660 case XENOPROF_reset_passive_list:
661 {
662 reset_passive_list();
663 ret = 0;
664 break;
665 }
666 case XENOPROF_set_active:
667 {
668 domid_t domid;
669 if ( xenoprof_state != XENOPROF_IDLE )
670 {
671 ret = -EPERM;
672 break;
673 }
674 if ( copy_from_guest(&domid, arg, 1) )
675 {
676 ret = -EFAULT;
677 break;
678 }
679 ret = add_active_list(domid);
680 break;
681 }
682 case XENOPROF_set_passive:
683 {
684 if ( xenoprof_state != XENOPROF_IDLE )
685 {
686 ret = -EPERM;
687 break;
688 }
689 ret = add_passive_list(arg);
690 break;
691 }
692 case XENOPROF_reserve_counters:
693 if ( xenoprof_state != XENOPROF_IDLE )
694 {
695 ret = -EPERM;
696 break;
697 }
698 ret = xenoprof_arch_reserve_counters();
699 if ( !ret )
700 xenoprof_state = XENOPROF_COUNTERS_RESERVED;
701 break;
703 case XENOPROF_counter:
704 if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
705 (adomains == 0) )
706 {
707 ret = -EPERM;
708 break;
709 }
711 ret = xenoprof_arch_counter(arg);
712 break;
714 case XENOPROF_setup_events:
715 if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
716 {
717 ret = -EPERM;
718 break;
719 }
720 ret = xenoprof_arch_setup_events();
721 if ( !ret )
722 xenoprof_state = XENOPROF_READY;
723 break;
725 case XENOPROF_enable_virq:
726 {
727 int i;
728 if ( current->domain == xenoprof_primary_profiler )
729 {
730 xenoprof_arch_enable_virq();
731 xenoprof_reset_stat();
732 for ( i = 0; i < pdomains; i++ )
733 xenoprof_reset_buf(passive_domains[i]);
734 }
735 xenoprof_reset_buf(current->domain);
736 ret = set_active(current->domain);
737 break;
738 }
740 case XENOPROF_start:
741 ret = -EPERM;
742 if ( (xenoprof_state == XENOPROF_READY) &&
743 (activated == adomains) )
744 ret = xenoprof_arch_start();
745 if ( ret == 0 )
746 xenoprof_state = XENOPROF_PROFILING;
747 break;
749 case XENOPROF_stop:
750 if ( xenoprof_state != XENOPROF_PROFILING )
751 {
752 ret = -EPERM;
753 break;
754 }
755 xenoprof_arch_stop();
756 xenoprof_state = XENOPROF_READY;
757 break;
759 case XENOPROF_disable_virq:
760 {
761 struct xenoprof *x;
762 if ( (xenoprof_state == XENOPROF_PROFILING) &&
763 (is_active(current->domain)) )
764 {
765 ret = -EPERM;
766 break;
767 }
768 if ( (ret = reset_active(current->domain)) != 0 )
769 break;
770 x = current->domain->xenoprof;
771 unshare_xenoprof_page_with_guest(x);
772 break;
773 }
775 case XENOPROF_release_counters:
776 ret = -EPERM;
777 if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
778 (xenoprof_state == XENOPROF_READY) )
779 {
780 xenoprof_state = XENOPROF_IDLE;
781 xenoprof_arch_release_counters();
782 xenoprof_arch_disable_virq();
783 reset_passive_list();
784 ret = 0;
785 }
786 break;
788 case XENOPROF_shutdown:
789 ret = -EPERM;
790 if ( xenoprof_state == XENOPROF_IDLE )
791 {
792 activated = 0;
793 adomains=0;
794 xenoprof_primary_profiler = NULL;
795 backtrace_depth=0;
796 ret = 0;
797 }
798 break;
800 case XENOPROF_set_backtrace:
801 ret = 0;
802 if ( !xenoprof_backtrace_supported() )
803 ret = -EINVAL;
804 else if ( copy_from_guest(&backtrace_depth, arg, 1) )
805 ret = -EFAULT;
806 break;
808 default:
809 ret = -ENOSYS;
810 }
812 spin_unlock(&xenoprof_lock);
814 if ( ret < 0 )
815 printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
816 op, current->domain->domain_id, ret);
818 return ret;
819 }
821 #if defined(CONFIG_COMPAT) && !defined(COMPAT)
822 #include "compat/xenoprof.c"
823 #endif
825 /*
826 * Local variables:
827 * mode: C
828 * c-set-style: "BSD"
829 * c-basic-offset: 4
830 * tab-width: 4
831 * indent-tabs-mode: nil
832 * End:
833 */