direct-io.hg

changeset 12505:fc11c91e5371

[XENOPROFILE] Move code under xen/arch/x86/oprofile to xen/common.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kfraser@localhost.localdomain
date Wed Nov 22 09:35:50 2006 +0000 (2006-11-22)
parents f555a90bcc37
children da5c5fc8908f
files xen/arch/x86/Rules.mk xen/arch/x86/oprofile/xenoprof.c xen/common/Makefile xen/common/xenoprof.c
line diff
     1.1 --- a/xen/arch/x86/Rules.mk	Tue Nov 21 19:22:25 2006 +0000
     1.2 +++ b/xen/arch/x86/Rules.mk	Wed Nov 22 09:35:50 2006 +0000
     1.3 @@ -3,6 +3,7 @@
     1.4  
     1.5  HAS_ACPI := y
     1.6  HAS_VGA  := y
     1.7 +xenoprof := y
     1.8  
     1.9  #
    1.10  # If you change any of these configuration options then you must
     2.1 --- a/xen/arch/x86/oprofile/xenoprof.c	Tue Nov 21 19:22:25 2006 +0000
     2.2 +++ b/xen/arch/x86/oprofile/xenoprof.c	Wed Nov 22 09:35:50 2006 +0000
     2.3 @@ -2,6 +2,10 @@
     2.4   * Copyright (C) 2005 Hewlett-Packard Co.
     2.5   * written by Aravind Menon & Jose Renato Santos
     2.6   *            (email: xenoprof@groups.hp.com)
     2.7 + *
     2.8 + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
     2.9 + *                    VA Linux Systems Japan K.K.
    2.10 + * x86 specific part
    2.11   */
    2.12  
    2.13  #include <xen/guest_access.h>
    2.14 @@ -9,693 +13,6 @@
    2.15  #include <public/xenoprof.h>
    2.16  #include <asm/hvm/support.h>
    2.17  
    2.18 -#include "op_counter.h"
    2.19 -
    2.20 -/* Limit amount of pages used for shared buffer (per domain) */
    2.21 -#define MAX_OPROF_SHARED_PAGES 32
    2.22 -
    2.23 -/* Lock protecting the following global state */
    2.24 -static DEFINE_SPINLOCK(xenoprof_lock);
    2.25 -
    2.26 -struct domain *active_domains[MAX_OPROF_DOMAINS];
    2.27 -int active_ready[MAX_OPROF_DOMAINS];
    2.28 -unsigned int adomains;
    2.29 -
    2.30 -struct domain *passive_domains[MAX_OPROF_DOMAINS];
    2.31 -unsigned int pdomains;
    2.32 -
    2.33 -unsigned int activated;
    2.34 -struct domain *primary_profiler;
    2.35 -int xenoprof_state = XENOPROF_IDLE;
    2.36 -
    2.37 -u64 total_samples;
    2.38 -u64 invalid_buffer_samples;
    2.39 -u64 corrupted_buffer_samples;
    2.40 -u64 lost_samples;
    2.41 -u64 active_samples;
    2.42 -u64 passive_samples;
    2.43 -u64 idle_samples;
    2.44 -u64 others_samples;
    2.45 -
    2.46 -
    2.47 -extern int nmi_init(int *num_events, int *is_primary, char *cpu_type);
    2.48 -extern int nmi_reserve_counters(void);
    2.49 -extern int nmi_setup_events(void);
    2.50 -extern int nmi_enable_virq(void);
    2.51 -extern int nmi_start(void);
    2.52 -extern void nmi_stop(void);
    2.53 -extern void nmi_disable_virq(void);
    2.54 -extern void nmi_release_counters(void);
    2.55 -
    2.56 -int is_active(struct domain *d)
    2.57 -{
    2.58 -    struct xenoprof *x = d->xenoprof;
    2.59 -    return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
    2.60 -}
    2.61 -
    2.62 -int is_passive(struct domain *d)
    2.63 -{
    2.64 -    struct xenoprof *x = d->xenoprof;
    2.65 -    return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
    2.66 -}
    2.67 -
    2.68 -int is_profiled(struct domain *d)
    2.69 -{
    2.70 -    return (is_active(d) || is_passive(d));
    2.71 -}
    2.72 -
    2.73 -static void xenoprof_reset_stat(void)
    2.74 -{
    2.75 -    total_samples = 0;
    2.76 -    invalid_buffer_samples = 0;
    2.77 -    corrupted_buffer_samples = 0;
    2.78 -    lost_samples = 0;
    2.79 -    active_samples = 0;
    2.80 -    passive_samples = 0;
    2.81 -    idle_samples = 0;
    2.82 -    others_samples = 0;
    2.83 -}
    2.84 -
    2.85 -static void xenoprof_reset_buf(struct domain *d)
    2.86 -{
    2.87 -    int j;
    2.88 -    struct xenoprof_buf *buf;
    2.89 -
    2.90 -    if ( d->xenoprof == NULL )
    2.91 -    {
    2.92 -        printk("xenoprof_reset_buf: ERROR - Unexpected "
    2.93 -               "Xenoprof NULL pointer \n");
    2.94 -        return;
    2.95 -    }
    2.96 -
    2.97 -    for ( j = 0; j < MAX_VIRT_CPUS; j++ )
    2.98 -    {
    2.99 -        buf = d->xenoprof->vcpu[j].buffer;
   2.100 -        if ( buf != NULL )
   2.101 -        {
   2.102 -            buf->event_head = 0;
   2.103 -            buf->event_tail = 0;
   2.104 -        }
   2.105 -    }
   2.106 -}
   2.107 -
   2.108 -static char *alloc_xenoprof_buf(struct domain *d, int npages)
   2.109 -{
   2.110 -    char *rawbuf;
   2.111 -    int i, order;
   2.112 -
   2.113 -    /* allocate pages to store sample buffer shared with domain */
   2.114 -    order  = get_order_from_pages(npages);
   2.115 -    rawbuf = alloc_xenheap_pages(order);
   2.116 -    if ( rawbuf == NULL )
   2.117 -    {
   2.118 -        printk("alloc_xenoprof_buf(): memory allocation failed\n");
   2.119 -        return 0;
   2.120 -    }
   2.121 -
   2.122 -    /* Share pages so that kernel can map it */
   2.123 -    for ( i = 0; i < npages; i++ )
   2.124 -        share_xen_page_with_guest(
   2.125 -            virt_to_page(rawbuf + i * PAGE_SIZE), 
   2.126 -            d, XENSHARE_writable);
   2.127 -
   2.128 -    return rawbuf;
   2.129 -}
   2.130 -
   2.131 -static int alloc_xenoprof_struct(
   2.132 -    struct domain *d, int max_samples, int is_passive)
   2.133 -{
   2.134 -    struct vcpu *v;
   2.135 -    int nvcpu, npages, bufsize, max_bufsize;
   2.136 -    unsigned max_max_samples;
   2.137 -    int i;
   2.138 -
   2.139 -    d->xenoprof = xmalloc(struct xenoprof);
   2.140 -
   2.141 -    if ( d->xenoprof == NULL )
   2.142 -    {
   2.143 -        printk ("alloc_xenoprof_struct(): memory "
   2.144 -                "allocation (xmalloc) failed\n");
   2.145 -        return -ENOMEM;
   2.146 -    }
   2.147 -
   2.148 -    memset(d->xenoprof, 0, sizeof(*d->xenoprof));
   2.149 -
   2.150 -    nvcpu = 0;
   2.151 -    for_each_vcpu ( d, v )
   2.152 -        nvcpu++;
   2.153 -
   2.154 -    /* reduce max_samples if necessary to limit pages allocated */
   2.155 -    max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
   2.156 -    max_max_samples = ( (max_bufsize - sizeof(struct xenoprof_buf)) /
   2.157 -                        sizeof(struct event_log) ) + 1;
   2.158 -    if ( (unsigned)max_samples > max_max_samples )
   2.159 -        max_samples = max_max_samples;
   2.160 -
   2.161 -    bufsize = sizeof(struct xenoprof_buf) +
   2.162 -        (max_samples - 1) * sizeof(struct event_log);
   2.163 -    npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
   2.164 -    
   2.165 -    d->xenoprof->rawbuf = alloc_xenoprof_buf(is_passive ? dom0 : d, npages);
   2.166 -
   2.167 -    if ( d->xenoprof->rawbuf == NULL )
   2.168 -    {
   2.169 -        xfree(d->xenoprof);
   2.170 -        d->xenoprof = NULL;
   2.171 -        return -ENOMEM;
   2.172 -    }
   2.173 -
   2.174 -    d->xenoprof->npages = npages;
   2.175 -    d->xenoprof->nbuf = nvcpu;
   2.176 -    d->xenoprof->bufsize = bufsize;
   2.177 -    d->xenoprof->domain_ready = 0;
   2.178 -    d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
   2.179 -
   2.180 -    /* Update buffer pointers for active vcpus */
   2.181 -    i = 0;
   2.182 -    for_each_vcpu ( d, v )
   2.183 -    {
   2.184 -        d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
   2.185 -        d->xenoprof->vcpu[v->vcpu_id].buffer =
   2.186 -            (struct xenoprof_buf *)&d->xenoprof->rawbuf[i * bufsize];
   2.187 -        d->xenoprof->vcpu[v->vcpu_id].buffer->event_size = max_samples;
   2.188 -        d->xenoprof->vcpu[v->vcpu_id].buffer->vcpu_id = v->vcpu_id;
   2.189 -
   2.190 -        i++;
   2.191 -        /* in the unlikely case that the number of active vcpus changes */
   2.192 -        if ( i >= nvcpu )
   2.193 -            break;
   2.194 -    }
   2.195 -    
   2.196 -    return 0;
   2.197 -}
   2.198 -
   2.199 -void free_xenoprof_pages(struct domain *d)
   2.200 -{
   2.201 -    struct xenoprof *x;
   2.202 -    int order;
   2.203 -
   2.204 -    x = d->xenoprof;
   2.205 -    if ( x == NULL )
   2.206 -        return;
   2.207 -
   2.208 -    if ( x->rawbuf != NULL )
   2.209 -    {
   2.210 -        order = get_order_from_pages(x->npages);
   2.211 -        free_xenheap_pages(x->rawbuf, order);
   2.212 -    }
   2.213 -
   2.214 -    xfree(x);
   2.215 -    d->xenoprof = NULL;
   2.216 -}
   2.217 -
   2.218 -static int active_index(struct domain *d)
   2.219 -{
   2.220 -    int i;
   2.221 -
   2.222 -    for ( i = 0; i < adomains; i++ )
   2.223 -        if ( active_domains[i] == d )
   2.224 -            return i;
   2.225 -
   2.226 -    return -1;
   2.227 -}
   2.228 -
   2.229 -static int set_active(struct domain *d)
   2.230 -{
   2.231 -    int ind;
   2.232 -    struct xenoprof *x;
   2.233 -
   2.234 -    ind = active_index(d);
   2.235 -    if ( ind < 0 )
   2.236 -        return -EPERM;
   2.237 -
   2.238 -    x = d->xenoprof;
   2.239 -    if ( x == NULL )
   2.240 -        return -EPERM;
   2.241 -
   2.242 -    x->domain_ready = 1;
   2.243 -    x->domain_type = XENOPROF_DOMAIN_ACTIVE;
   2.244 -    active_ready[ind] = 1;
   2.245 -    activated++;
   2.246 -
   2.247 -    return 0;
   2.248 -}
   2.249 -
   2.250 -static int reset_active(struct domain *d)
   2.251 -{
   2.252 -    int ind;
   2.253 -    struct xenoprof *x;
   2.254 -
   2.255 -    ind = active_index(d);
   2.256 -    if ( ind < 0 )
   2.257 -        return -EPERM;
   2.258 -
   2.259 -    x = d->xenoprof;
   2.260 -    if ( x == NULL )
   2.261 -        return -EPERM;
   2.262 -
   2.263 -    x->domain_ready = 0;
   2.264 -    x->domain_type = XENOPROF_DOMAIN_IGNORED;
   2.265 -    active_ready[ind] = 0;
   2.266 -    active_domains[ind] = NULL;
   2.267 -    activated--;
   2.268 -    put_domain(d);
   2.269 -
   2.270 -    if ( activated <= 0 )
   2.271 -        adomains = 0;
   2.272 -
   2.273 -    return 0;
   2.274 -}
   2.275 -
   2.276 -static void reset_passive(struct domain *d)
   2.277 -{
   2.278 -    struct xenoprof *x;
   2.279 -
   2.280 -    if (d==0)
   2.281 -        return;
   2.282 -
   2.283 -    x = d->xenoprof;
   2.284 -    if ( x == NULL )
   2.285 -        return;
   2.286 -
   2.287 -    x->domain_type = XENOPROF_DOMAIN_IGNORED;
   2.288 -
   2.289 -    return;
   2.290 -}
   2.291 -
   2.292 -static void reset_active_list(void)
   2.293 -{
   2.294 -    int i;
   2.295 -
   2.296 -    for ( i = 0; i < adomains; i++ )
   2.297 -    {
   2.298 -        if ( active_ready[i] )
   2.299 -        {
   2.300 -            reset_active(active_domains[i]);
   2.301 -        }
   2.302 -    }
   2.303 -
   2.304 -    adomains = 0;
   2.305 -    activated = 0;
   2.306 -}
   2.307 -
   2.308 -static void reset_passive_list(void)
   2.309 -{
   2.310 -    int i;
   2.311 -
   2.312 -    for ( i = 0; i < pdomains; i++ )
   2.313 -    {
   2.314 -        reset_passive(passive_domains[i]);
   2.315 -        put_domain(passive_domains[i]);
   2.316 -        passive_domains[i] = NULL;
   2.317 -    }
   2.318 -
   2.319 -    pdomains = 0;
   2.320 -}
   2.321 -
   2.322 -static int add_active_list(domid_t domid)
   2.323 -{
   2.324 -    struct domain *d;
   2.325 -
   2.326 -    if ( adomains >= MAX_OPROF_DOMAINS )
   2.327 -        return -E2BIG;
   2.328 -
   2.329 -    d = find_domain_by_id(domid);
   2.330 -    if ( d == NULL )
   2.331 -        return -EINVAL;
   2.332 -
   2.333 -    active_domains[adomains] = d;
   2.334 -    active_ready[adomains] = 0;
   2.335 -    adomains++;
   2.336 -
   2.337 -    return 0;
   2.338 -}
   2.339 -
   2.340 -static int add_passive_list(XEN_GUEST_HANDLE(void) arg)
   2.341 -{
   2.342 -    struct xenoprof_passive passive;
   2.343 -    struct domain *d;
   2.344 -    int ret = 0;
   2.345 -
   2.346 -    if ( pdomains >= MAX_OPROF_DOMAINS )
   2.347 -        return -E2BIG;
   2.348 -
   2.349 -    if ( copy_from_guest(&passive, arg, 1) )
   2.350 -        return -EFAULT;
   2.351 -
   2.352 -    d = find_domain_by_id(passive.domain_id);
   2.353 -    if ( d == NULL )
   2.354 -        return -EINVAL;
   2.355 -
   2.356 -    if ( (d->xenoprof == NULL) && 
   2.357 -         ((ret = alloc_xenoprof_struct(d, passive.max_samples, 1)) < 0) ) {
   2.358 -        put_domain(d);
   2.359 -        return -ENOMEM;
   2.360 -    }
   2.361 -
   2.362 -    d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
   2.363 -    passive.nbuf = d->xenoprof->nbuf;
   2.364 -    passive.bufsize = d->xenoprof->bufsize;
   2.365 -    passive.buf_maddr = __pa(d->xenoprof->rawbuf);
   2.366 -
   2.367 -    if ( copy_to_guest(arg, &passive, 1) ) {
   2.368 -        put_domain(d);
   2.369 -        return -EFAULT;
   2.370 -    }
   2.371 -    
   2.372 -    passive_domains[pdomains] = d;
   2.373 -    pdomains++;
   2.374 -
   2.375 -    return ret;
   2.376 -}
   2.377 -
   2.378 -void xenoprof_log_event(
   2.379 -    struct vcpu *vcpu, unsigned long eip, int mode, int event)
   2.380 -{
   2.381 -    struct xenoprof_vcpu *v;
   2.382 -    struct xenoprof_buf *buf;
   2.383 -    int head;
   2.384 -    int tail;
   2.385 -    int size;
   2.386 -
   2.387 -
   2.388 -    total_samples++;
   2.389 -
   2.390 -    /* ignore samples of un-monitored domains */
   2.391 -    /* Count samples in idle separate from other unmonitored domains */
   2.392 -    if ( !is_profiled(vcpu->domain) )
   2.393 -    {
   2.394 -        others_samples++;
   2.395 -        return;
   2.396 -    }
   2.397 -
   2.398 -    v = &vcpu->domain->xenoprof->vcpu[vcpu->vcpu_id];
   2.399 -
   2.400 -    /* Sanity check. Should never happen */ 
   2.401 -    if ( v->buffer == NULL )
   2.402 -    {
   2.403 -        invalid_buffer_samples++;
   2.404 -        return;
   2.405 -    }
   2.406 -
   2.407 -    buf = vcpu->domain->xenoprof->vcpu[vcpu->vcpu_id].buffer;
   2.408 -
   2.409 -    head = buf->event_head;
   2.410 -    tail = buf->event_tail;
   2.411 -    size = v->event_size;
   2.412 -
   2.413 -    /* make sure indexes in shared buffer are sane */
   2.414 -    if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
   2.415 -    {
   2.416 -        corrupted_buffer_samples++;
   2.417 -        return;
   2.418 -    }
   2.419 -
   2.420 -    if ( (head == tail - 1) || (head == size - 1 && tail == 0) )
   2.421 -    {
   2.422 -        buf->lost_samples++;
   2.423 -        lost_samples++;
   2.424 -    }
   2.425 -    else
   2.426 -    {
   2.427 -        buf->event_log[head].eip = eip;
   2.428 -        buf->event_log[head].mode = mode;
   2.429 -        buf->event_log[head].event = event;
   2.430 -        head++;
   2.431 -        if ( head >= size )
   2.432 -            head = 0;
   2.433 -        buf->event_head = head;
   2.434 -        if ( is_active(vcpu->domain) )
   2.435 -            active_samples++;
   2.436 -        else
   2.437 -            passive_samples++;
   2.438 -        if ( mode == 0 )
   2.439 -            buf->user_samples++;
   2.440 -        else if ( mode == 1 )
   2.441 -            buf->kernel_samples++;
   2.442 -        else
   2.443 -            buf->xen_samples++;
   2.444 -    }
   2.445 -}
   2.446 -
   2.447 -static int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
   2.448 -{
   2.449 -    struct xenoprof_init xenoprof_init;
   2.450 -    int ret;
   2.451 -
   2.452 -    if ( copy_from_guest(&xenoprof_init, arg, 1) )
   2.453 -        return -EFAULT;
   2.454 -
   2.455 -    if ( (ret = nmi_init(&xenoprof_init.num_events, 
   2.456 -                         &xenoprof_init.is_primary, 
   2.457 -                         xenoprof_init.cpu_type)) )
   2.458 -        return ret;
   2.459 -
   2.460 -    if ( copy_to_guest(arg, &xenoprof_init, 1) )
   2.461 -        return -EFAULT;
   2.462 -
   2.463 -    if ( xenoprof_init.is_primary )
   2.464 -        primary_profiler = current->domain;
   2.465 -
   2.466 -    return 0;
   2.467 -}
   2.468 -
   2.469 -static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE(void) arg)
   2.470 -{
   2.471 -    struct xenoprof_get_buffer xenoprof_get_buffer;
   2.472 -    struct domain *d = current->domain;
   2.473 -    int ret;
   2.474 -
   2.475 -    if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
   2.476 -        return -EFAULT;
   2.477 -
   2.478 -    /*
   2.479 -     * We allocate xenoprof struct and buffers only at first time xenoprof_get_buffer
   2.480 -     * is called. Memory is then kept until domain is destroyed.
   2.481 -     */
   2.482 -    if ( (d->xenoprof == NULL) &&
   2.483 -         ((ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0)) < 0) )
   2.484 -        return ret;
   2.485 -
   2.486 -    xenoprof_reset_buf(d);
   2.487 -
   2.488 -    d->xenoprof->domain_type  = XENOPROF_DOMAIN_IGNORED;
   2.489 -    d->xenoprof->domain_ready = 0;
   2.490 -    if ( primary_profiler == current->domain )
   2.491 -        d->xenoprof->is_primary = 1;
   2.492 -    else
   2.493 -        d->xenoprof->is_primary = 0;
   2.494 -        
   2.495 -    xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
   2.496 -    xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
   2.497 -    xenoprof_get_buffer.buf_maddr = __pa(d->xenoprof->rawbuf);
   2.498 -
   2.499 -    if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
   2.500 -        return -EFAULT;
   2.501 -
   2.502 -    return 0;
   2.503 -}
   2.504 -
   2.505 -#define NONPRIV_OP(op) ( (op == XENOPROF_init)          \
   2.506 -                      || (op == XENOPROF_enable_virq)   \
   2.507 -                      || (op == XENOPROF_disable_virq)  \
   2.508 -                      || (op == XENOPROF_get_buffer))
   2.509 - 
   2.510 -int do_xenoprof_op(int op, XEN_GUEST_HANDLE(void) arg)
   2.511 -{
   2.512 -    int ret = 0;
   2.513 -    
   2.514 -    if ( (op < 0) || (op>XENOPROF_last_op) )
   2.515 -    {
   2.516 -        printk("xenoprof: invalid operation %d for domain %d\n",
   2.517 -               op, current->domain->domain_id);
   2.518 -        return -EINVAL;
   2.519 -    }
   2.520 -
   2.521 -    if ( !NONPRIV_OP(op) && (current->domain != primary_profiler) )
   2.522 -    {
   2.523 -        printk("xenoprof: dom %d denied privileged operation %d\n",
   2.524 -               current->domain->domain_id, op);
   2.525 -        return -EPERM;
   2.526 -    }
   2.527 -
   2.528 -    spin_lock(&xenoprof_lock);
   2.529 -    
   2.530 -    switch ( op )
   2.531 -    {
   2.532 -    case XENOPROF_init:
   2.533 -        ret = xenoprof_op_init(arg);
   2.534 -        break;
   2.535 -
   2.536 -    case XENOPROF_get_buffer:
   2.537 -        ret = xenoprof_op_get_buffer(arg);
   2.538 -        break;
   2.539 -
   2.540 -    case XENOPROF_reset_active_list:
   2.541 -    {
   2.542 -        reset_active_list();
   2.543 -        ret = 0;
   2.544 -        break;
   2.545 -    }
   2.546 -    case XENOPROF_reset_passive_list:
   2.547 -    {
   2.548 -        reset_passive_list();
   2.549 -        ret = 0;
   2.550 -        break;
   2.551 -    }
   2.552 -    case XENOPROF_set_active:
   2.553 -    {
   2.554 -        domid_t domid;
   2.555 -        if ( xenoprof_state != XENOPROF_IDLE ) {
   2.556 -            ret = -EPERM;
   2.557 -            break;
   2.558 -        }
   2.559 -        if ( copy_from_guest(&domid, arg, 1) ) {
   2.560 -            ret = -EFAULT;
   2.561 -            break;
   2.562 -        }
   2.563 -        ret = add_active_list(domid);
   2.564 -        break;
   2.565 -    }
   2.566 -    case XENOPROF_set_passive:
   2.567 -    {
   2.568 -        if ( xenoprof_state != XENOPROF_IDLE ) {
   2.569 -            ret = -EPERM;
   2.570 -            break;
   2.571 -        }
   2.572 -        ret = add_passive_list(arg);
   2.573 -        break;
   2.574 -    }
   2.575 -    case XENOPROF_reserve_counters:
   2.576 -        if ( xenoprof_state != XENOPROF_IDLE ) {
   2.577 -            ret = -EPERM;
   2.578 -            break;
   2.579 -        }
   2.580 -        ret = nmi_reserve_counters();
   2.581 -        if ( !ret )
   2.582 -            xenoprof_state = XENOPROF_COUNTERS_RESERVED;
   2.583 -        break;
   2.584 -
   2.585 -    case XENOPROF_counter:
   2.586 -    {
   2.587 -        struct xenoprof_counter counter;
   2.588 -        if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED || adomains == 0) {
   2.589 -            ret = -EPERM;
   2.590 -            break;
   2.591 -        }
   2.592 -
   2.593 -        if ( copy_from_guest(&counter, arg, 1) ) {
   2.594 -            ret = -EFAULT;
   2.595 -            break;
   2.596 -        }
   2.597 -
   2.598 -        if ( counter.ind > OP_MAX_COUNTER ) {
   2.599 -            ret = -E2BIG;
   2.600 -            break;
   2.601 -        }
   2.602 -
   2.603 -        counter_config[counter.ind].count     = (unsigned long) counter.count;
   2.604 -        counter_config[counter.ind].enabled   = (unsigned long) counter.enabled;
   2.605 -        counter_config[counter.ind].event     = (unsigned long) counter.event;
   2.606 -        counter_config[counter.ind].kernel    = (unsigned long) counter.kernel;
   2.607 -        counter_config[counter.ind].user      = (unsigned long) counter.user;
   2.608 -        counter_config[counter.ind].unit_mask = (unsigned long) counter.unit_mask;
   2.609 -
   2.610 -        ret = 0;
   2.611 -        break;
   2.612 -    }
   2.613 -
   2.614 -    case XENOPROF_setup_events:
   2.615 -        if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED ) {
   2.616 -            ret = -EPERM;
   2.617 -            break;
   2.618 -        }
   2.619 -        ret = nmi_setup_events();
   2.620 -        if ( !ret )
   2.621 -            xenoprof_state = XENOPROF_READY;
   2.622 -        break;
   2.623 -
   2.624 -    case XENOPROF_enable_virq:
   2.625 -    {
   2.626 -        int i;
   2.627 -        if ( current->domain == primary_profiler )
   2.628 -        {
   2.629 -            nmi_enable_virq();
   2.630 -            xenoprof_reset_stat();
   2.631 -            for ( i = 0; i < pdomains; i++ ) {
   2.632 -                xenoprof_reset_buf(passive_domains[i]);
   2.633 -            }
   2.634 -        }
   2.635 -        xenoprof_reset_buf(current->domain);
   2.636 -        ret = set_active(current->domain);
   2.637 -        break;
   2.638 -    }
   2.639 -
   2.640 -    case XENOPROF_start:
   2.641 -        ret = -EPERM;
   2.642 -        if ( (xenoprof_state == XENOPROF_READY) &&
   2.643 -             (activated == adomains) )
   2.644 -            ret = nmi_start();
   2.645 -
   2.646 -        if ( ret == 0 )
   2.647 -            xenoprof_state = XENOPROF_PROFILING;
   2.648 -        break;
   2.649 -
   2.650 -    case XENOPROF_stop:
   2.651 -        if ( xenoprof_state != XENOPROF_PROFILING ) {
   2.652 -            ret = -EPERM;
   2.653 -            break;
   2.654 -        }
   2.655 -        nmi_stop();
   2.656 -        xenoprof_state = XENOPROF_READY;
   2.657 -        break;
   2.658 -
   2.659 -    case XENOPROF_disable_virq:
   2.660 -        if ( (xenoprof_state == XENOPROF_PROFILING) && 
   2.661 -             (is_active(current->domain)) ) {
   2.662 -            ret = -EPERM;
   2.663 -            break;
   2.664 -        }
   2.665 -        ret = reset_active(current->domain);
   2.666 -        break;
   2.667 -
   2.668 -    case XENOPROF_release_counters:
   2.669 -        ret = -EPERM;
   2.670 -        if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
   2.671 -             (xenoprof_state == XENOPROF_READY) )
   2.672 -        {
   2.673 -            xenoprof_state = XENOPROF_IDLE;
   2.674 -            nmi_release_counters();
   2.675 -            nmi_disable_virq();
   2.676 -            reset_passive_list();
   2.677 -            ret = 0;
   2.678 -        }
   2.679 -        break;
   2.680 -
   2.681 -    case XENOPROF_shutdown:
   2.682 -        ret = -EPERM;
   2.683 -        if ( xenoprof_state == XENOPROF_IDLE )
   2.684 -        {
   2.685 -            activated = 0;
   2.686 -            adomains=0;
   2.687 -            primary_profiler = NULL;
   2.688 -            ret = 0;
   2.689 -        }
   2.690 -        break;
   2.691 -
   2.692 -    default:
   2.693 -        ret = -ENOSYS;
   2.694 -    }
   2.695 -
   2.696 -    spin_unlock(&xenoprof_lock);
   2.697 -
   2.698 -    if ( ret < 0 )
   2.699 -        printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
   2.700 -               op, current->domain->domain_id, ret);
   2.701 -
   2.702 -    return ret;
   2.703 -}
   2.704 -
   2.705  int xenoprofile_get_mode(struct vcpu *v, struct cpu_user_regs * const regs)
   2.706  {
   2.707      if ( !guest_mode(regs) )
     3.1 --- a/xen/common/Makefile	Tue Nov 21 19:22:25 2006 +0000
     3.2 +++ b/xen/common/Makefile	Wed Nov 22 09:35:50 2006 +0000
     3.3 @@ -29,6 +29,7 @@ obj-y += xmalloc.o
     3.4  
     3.5  obj-$(perfc)       += perfc.o
     3.6  obj-$(crash_debug) += gdbstub.o
     3.7 +obj-$(xenoprof)    += xenoprof.o
     3.8  
     3.9  # Object file contains changeset and compiler information.
    3.10  version.o: $(BASEDIR)/include/xen/compile.h
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/common/xenoprof.c	Wed Nov 22 09:35:50 2006 +0000
     4.3 @@ -0,0 +1,709 @@
     4.4 +/*
     4.5 + * Copyright (C) 2005 Hewlett-Packard Co.
     4.6 + * written by Aravind Menon & Jose Renato Santos
     4.7 + *            (email: xenoprof@groups.hp.com)
     4.8 + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
     4.9 + *                    VA Linux Systems Japan K.K.
    4.10 + * arch generic xenoprof and IA64 support.
    4.11 + */
    4.12 +
    4.13 +#include <xen/guest_access.h>
    4.14 +#include <xen/sched.h>
    4.15 +#include <public/xenoprof.h>
    4.16 +#include <asm/hvm/support.h>
    4.17 +
    4.18 +#include "../arch/x86/oprofile/op_counter.h"
    4.19 +
    4.20 +/* Limit amount of pages used for shared buffer (per domain) */
    4.21 +#define MAX_OPROF_SHARED_PAGES 32
    4.22 +
    4.23 +/* Lock protecting the following global state */
    4.24 +static DEFINE_SPINLOCK(xenoprof_lock);
    4.25 +
    4.26 +struct domain *active_domains[MAX_OPROF_DOMAINS];
    4.27 +int active_ready[MAX_OPROF_DOMAINS];
    4.28 +unsigned int adomains;
    4.29 +
    4.30 +struct domain *passive_domains[MAX_OPROF_DOMAINS];
    4.31 +unsigned int pdomains;
    4.32 +
    4.33 +unsigned int activated;
    4.34 +struct domain *primary_profiler;
    4.35 +int xenoprof_state = XENOPROF_IDLE;
    4.36 +
    4.37 +u64 total_samples;
    4.38 +u64 invalid_buffer_samples;
    4.39 +u64 corrupted_buffer_samples;
    4.40 +u64 lost_samples;
    4.41 +u64 active_samples;
    4.42 +u64 passive_samples;
    4.43 +u64 idle_samples;
    4.44 +u64 others_samples;
    4.45 +
    4.46 +
    4.47 +extern int nmi_init(int *num_events, int *is_primary, char *cpu_type);
    4.48 +extern int nmi_reserve_counters(void);
    4.49 +extern int nmi_setup_events(void);
    4.50 +extern int nmi_enable_virq(void);
    4.51 +extern int nmi_start(void);
    4.52 +extern void nmi_stop(void);
    4.53 +extern void nmi_disable_virq(void);
    4.54 +extern void nmi_release_counters(void);
    4.55 +
    4.56 +int is_active(struct domain *d)
    4.57 +{
    4.58 +    struct xenoprof *x = d->xenoprof;
    4.59 +    return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
    4.60 +}
    4.61 +
    4.62 +int is_passive(struct domain *d)
    4.63 +{
    4.64 +    struct xenoprof *x = d->xenoprof;
    4.65 +    return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
    4.66 +}
    4.67 +
    4.68 +int is_profiled(struct domain *d)
    4.69 +{
    4.70 +    return (is_active(d) || is_passive(d));
    4.71 +}
    4.72 +
    4.73 +static void xenoprof_reset_stat(void)
    4.74 +{
    4.75 +    total_samples = 0;
    4.76 +    invalid_buffer_samples = 0;
    4.77 +    corrupted_buffer_samples = 0;
    4.78 +    lost_samples = 0;
    4.79 +    active_samples = 0;
    4.80 +    passive_samples = 0;
    4.81 +    idle_samples = 0;
    4.82 +    others_samples = 0;
    4.83 +}
    4.84 +
    4.85 +static void xenoprof_reset_buf(struct domain *d)
    4.86 +{
    4.87 +    int j;
    4.88 +    struct xenoprof_buf *buf;
    4.89 +
    4.90 +    if ( d->xenoprof == NULL )
    4.91 +    {
    4.92 +        printk("xenoprof_reset_buf: ERROR - Unexpected "
    4.93 +               "Xenoprof NULL pointer \n");
    4.94 +        return;
    4.95 +    }
    4.96 +
    4.97 +    for ( j = 0; j < MAX_VIRT_CPUS; j++ )
    4.98 +    {
    4.99 +        buf = d->xenoprof->vcpu[j].buffer;
   4.100 +        if ( buf != NULL )
   4.101 +        {
   4.102 +            buf->event_head = 0;
   4.103 +            buf->event_tail = 0;
   4.104 +        }
   4.105 +    }
   4.106 +}
   4.107 +
   4.108 +static char *alloc_xenoprof_buf(struct domain *d, int npages)
   4.109 +{
   4.110 +    char *rawbuf;
   4.111 +    int i, order;
   4.112 +
   4.113 +    /* allocate pages to store sample buffer shared with domain */
   4.114 +    order  = get_order_from_pages(npages);
   4.115 +    rawbuf = alloc_xenheap_pages(order);
   4.116 +    if ( rawbuf == NULL )
   4.117 +    {
   4.118 +        printk("alloc_xenoprof_buf(): memory allocation failed\n");
   4.119 +        return 0;
   4.120 +    }
   4.121 +
   4.122 +    /* Share pages so that kernel can map it */
   4.123 +    for ( i = 0; i < npages; i++ )
   4.124 +        share_xen_page_with_guest(
   4.125 +            virt_to_page(rawbuf + i * PAGE_SIZE), 
   4.126 +            d, XENSHARE_writable);
   4.127 +
   4.128 +    return rawbuf;
   4.129 +}
   4.130 +
   4.131 +static int alloc_xenoprof_struct(
   4.132 +    struct domain *d, int max_samples, int is_passive)
   4.133 +{
   4.134 +    struct vcpu *v;
   4.135 +    int nvcpu, npages, bufsize, max_bufsize;
   4.136 +    unsigned max_max_samples;
   4.137 +    int i;
   4.138 +
   4.139 +    d->xenoprof = xmalloc(struct xenoprof);
   4.140 +
   4.141 +    if ( d->xenoprof == NULL )
   4.142 +    {
   4.143 +        printk ("alloc_xenoprof_struct(): memory "
   4.144 +                "allocation (xmalloc) failed\n");
   4.145 +        return -ENOMEM;
   4.146 +    }
   4.147 +
   4.148 +    memset(d->xenoprof, 0, sizeof(*d->xenoprof));
   4.149 +
   4.150 +    nvcpu = 0;
   4.151 +    for_each_vcpu ( d, v )
   4.152 +        nvcpu++;
   4.153 +
   4.154 +    /* reduce max_samples if necessary to limit pages allocated */
   4.155 +    max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
   4.156 +    max_max_samples = ( (max_bufsize - sizeof(struct xenoprof_buf)) /
   4.157 +                        sizeof(struct event_log) ) + 1;
   4.158 +    if ( (unsigned)max_samples > max_max_samples )
   4.159 +        max_samples = max_max_samples;
   4.160 +
   4.161 +    bufsize = sizeof(struct xenoprof_buf) +
   4.162 +        (max_samples - 1) * sizeof(struct event_log);
   4.163 +    npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
   4.164 +    
   4.165 +    d->xenoprof->rawbuf = alloc_xenoprof_buf(is_passive ? dom0 : d, npages);
   4.166 +
   4.167 +    if ( d->xenoprof->rawbuf == NULL )
   4.168 +    {
   4.169 +        xfree(d->xenoprof);
   4.170 +        d->xenoprof = NULL;
   4.171 +        return -ENOMEM;
   4.172 +    }
   4.173 +
   4.174 +    d->xenoprof->npages = npages;
   4.175 +    d->xenoprof->nbuf = nvcpu;
   4.176 +    d->xenoprof->bufsize = bufsize;
   4.177 +    d->xenoprof->domain_ready = 0;
   4.178 +    d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
   4.179 +
   4.180 +    /* Update buffer pointers for active vcpus */
   4.181 +    i = 0;
   4.182 +    for_each_vcpu ( d, v )
   4.183 +    {
   4.184 +        d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
   4.185 +        d->xenoprof->vcpu[v->vcpu_id].buffer =
   4.186 +            (struct xenoprof_buf *)&d->xenoprof->rawbuf[i * bufsize];
   4.187 +        d->xenoprof->vcpu[v->vcpu_id].buffer->event_size = max_samples;
   4.188 +        d->xenoprof->vcpu[v->vcpu_id].buffer->vcpu_id = v->vcpu_id;
   4.189 +
   4.190 +        i++;
   4.191 +        /* in the unlikely case that the number of active vcpus changes */
   4.192 +        if ( i >= nvcpu )
   4.193 +            break;
   4.194 +    }
   4.195 +    
   4.196 +    return 0;
   4.197 +}
   4.198 +
   4.199 +void free_xenoprof_pages(struct domain *d)
   4.200 +{
   4.201 +    struct xenoprof *x;
   4.202 +    int order;
   4.203 +
   4.204 +    x = d->xenoprof;
   4.205 +    if ( x == NULL )
   4.206 +        return;
   4.207 +
   4.208 +    if ( x->rawbuf != NULL )
   4.209 +    {
   4.210 +        order = get_order_from_pages(x->npages);
   4.211 +        free_xenheap_pages(x->rawbuf, order);
   4.212 +    }
   4.213 +
   4.214 +    xfree(x);
   4.215 +    d->xenoprof = NULL;
   4.216 +}
   4.217 +
   4.218 +static int active_index(struct domain *d)
   4.219 +{
   4.220 +    int i;
   4.221 +
   4.222 +    for ( i = 0; i < adomains; i++ )
   4.223 +        if ( active_domains[i] == d )
   4.224 +            return i;
   4.225 +
   4.226 +    return -1;
   4.227 +}
   4.228 +
   4.229 +static int set_active(struct domain *d)
   4.230 +{
   4.231 +    int ind;
   4.232 +    struct xenoprof *x;
   4.233 +
   4.234 +    ind = active_index(d);
   4.235 +    if ( ind < 0 )
   4.236 +        return -EPERM;
   4.237 +
   4.238 +    x = d->xenoprof;
   4.239 +    if ( x == NULL )
   4.240 +        return -EPERM;
   4.241 +
   4.242 +    x->domain_ready = 1;
   4.243 +    x->domain_type = XENOPROF_DOMAIN_ACTIVE;
   4.244 +    active_ready[ind] = 1;
   4.245 +    activated++;
   4.246 +
   4.247 +    return 0;
   4.248 +}
   4.249 +
   4.250 +static int reset_active(struct domain *d)
   4.251 +{
   4.252 +    int ind;
   4.253 +    struct xenoprof *x;
   4.254 +
   4.255 +    ind = active_index(d);
   4.256 +    if ( ind < 0 )
   4.257 +        return -EPERM;
   4.258 +
   4.259 +    x = d->xenoprof;
   4.260 +    if ( x == NULL )
   4.261 +        return -EPERM;
   4.262 +
   4.263 +    x->domain_ready = 0;
   4.264 +    x->domain_type = XENOPROF_DOMAIN_IGNORED;
   4.265 +    active_ready[ind] = 0;
   4.266 +    active_domains[ind] = NULL;
   4.267 +    activated--;
   4.268 +    put_domain(d);
   4.269 +
   4.270 +    if ( activated <= 0 )
   4.271 +        adomains = 0;
   4.272 +
   4.273 +    return 0;
   4.274 +}
   4.275 +
   4.276 +static void reset_passive(struct domain *d)
   4.277 +{
   4.278 +    struct xenoprof *x;
   4.279 +
   4.280 +    if ( d == 0 )
   4.281 +        return;
   4.282 +
   4.283 +    x = d->xenoprof;
   4.284 +    if ( x == NULL )
   4.285 +        return;
   4.286 +
   4.287 +    x->domain_type = XENOPROF_DOMAIN_IGNORED;
   4.288 +}
   4.289 +
   4.290 +static void reset_active_list(void)
   4.291 +{
   4.292 +    int i;
   4.293 +
   4.294 +    for ( i = 0; i < adomains; i++ )
   4.295 +        if ( active_ready[i] )
   4.296 +            reset_active(active_domains[i]);
   4.297 +
   4.298 +    adomains = 0;
   4.299 +    activated = 0;
   4.300 +}
   4.301 +
   4.302 +static void reset_passive_list(void)
   4.303 +{
   4.304 +    int i;
   4.305 +
   4.306 +    for ( i = 0; i < pdomains; i++ )
   4.307 +    {
   4.308 +        reset_passive(passive_domains[i]);
   4.309 +        put_domain(passive_domains[i]);
   4.310 +        passive_domains[i] = NULL;
   4.311 +    }
   4.312 +
   4.313 +    pdomains = 0;
   4.314 +}
   4.315 +
   4.316 +static int add_active_list(domid_t domid)
   4.317 +{
   4.318 +    struct domain *d;
   4.319 +
   4.320 +    if ( adomains >= MAX_OPROF_DOMAINS )
   4.321 +        return -E2BIG;
   4.322 +
   4.323 +    d = find_domain_by_id(domid);
   4.324 +    if ( d == NULL )
   4.325 +        return -EINVAL;
   4.326 +
   4.327 +    active_domains[adomains] = d;
   4.328 +    active_ready[adomains] = 0;
   4.329 +    adomains++;
   4.330 +
   4.331 +    return 0;
   4.332 +}
   4.333 +
   4.334 +static int add_passive_list(XEN_GUEST_HANDLE(void) arg)
   4.335 +{
   4.336 +    struct xenoprof_passive passive;
   4.337 +    struct domain *d;
   4.338 +    int ret = 0;
   4.339 +
   4.340 +    if ( pdomains >= MAX_OPROF_DOMAINS )
   4.341 +        return -E2BIG;
   4.342 +
   4.343 +    if ( copy_from_guest(&passive, arg, 1) )
   4.344 +        return -EFAULT;
   4.345 +
   4.346 +    d = find_domain_by_id(passive.domain_id);
   4.347 +    if ( d == NULL )
   4.348 +        return -EINVAL;
   4.349 +
   4.350 +    if ( (d->xenoprof == NULL) && 
   4.351 +         ((ret = alloc_xenoprof_struct(d, passive.max_samples, 1)) < 0) )
   4.352 +    {
   4.353 +        put_domain(d);
   4.354 +        return -ENOMEM;
   4.355 +    }
   4.356 +
   4.357 +    d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
   4.358 +    passive.nbuf = d->xenoprof->nbuf;
   4.359 +    passive.bufsize = d->xenoprof->bufsize;
   4.360 +    passive.buf_maddr = __pa(d->xenoprof->rawbuf);
   4.361 +
   4.362 +    if ( copy_to_guest(arg, &passive, 1) )
   4.363 +    {
   4.364 +        put_domain(d);
   4.365 +        return -EFAULT;
   4.366 +    }
   4.367 +    
   4.368 +    passive_domains[pdomains] = d;
   4.369 +    pdomains++;
   4.370 +
   4.371 +    return ret;
   4.372 +}
   4.373 +
   4.374 +void xenoprof_log_event(
   4.375 +    struct vcpu *vcpu, unsigned long eip, int mode, int event)
   4.376 +{
   4.377 +    struct xenoprof_vcpu *v;
   4.378 +    struct xenoprof_buf *buf;
   4.379 +    int head;
   4.380 +    int tail;
   4.381 +    int size;
   4.382 +
   4.383 +
   4.384 +    total_samples++;
   4.385 +
   4.386 +    /* ignore samples of un-monitored domains */
   4.387 +    /* Count samples in idle separate from other unmonitored domains */
   4.388 +    if ( !is_profiled(vcpu->domain) )
   4.389 +    {
   4.390 +        others_samples++;
   4.391 +        return;
   4.392 +    }
   4.393 +
   4.394 +    v = &vcpu->domain->xenoprof->vcpu[vcpu->vcpu_id];
   4.395 +
   4.396 +    /* Sanity check. Should never happen */ 
   4.397 +    if ( v->buffer == NULL )
   4.398 +    {
   4.399 +        invalid_buffer_samples++;
   4.400 +        return;
   4.401 +    }
   4.402 +
   4.403 +    buf = vcpu->domain->xenoprof->vcpu[vcpu->vcpu_id].buffer;
   4.404 +
   4.405 +    head = buf->event_head;
   4.406 +    tail = buf->event_tail;
   4.407 +    size = v->event_size;
   4.408 +
   4.409 +    /* make sure indexes in shared buffer are sane */
   4.410 +    if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
   4.411 +    {
   4.412 +        corrupted_buffer_samples++;
   4.413 +        return;
   4.414 +    }
   4.415 +
   4.416 +    if ( (head == tail - 1) || (head == size - 1 && tail == 0) )
   4.417 +    {
   4.418 +        buf->lost_samples++;
   4.419 +        lost_samples++;
   4.420 +    }
   4.421 +    else
   4.422 +    {
   4.423 +        buf->event_log[head].eip = eip;
   4.424 +        buf->event_log[head].mode = mode;
   4.425 +        buf->event_log[head].event = event;
   4.426 +        head++;
   4.427 +        if ( head >= size )
   4.428 +            head = 0;
   4.429 +        buf->event_head = head;
   4.430 +        if ( is_active(vcpu->domain) )
   4.431 +            active_samples++;
   4.432 +        else
   4.433 +            passive_samples++;
   4.434 +        if ( mode == 0 )
   4.435 +            buf->user_samples++;
   4.436 +        else if ( mode == 1 )
   4.437 +            buf->kernel_samples++;
   4.438 +        else
   4.439 +            buf->xen_samples++;
   4.440 +    }
   4.441 +}
   4.442 +
   4.443 +static int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
   4.444 +{
   4.445 +    struct xenoprof_init xenoprof_init;
   4.446 +    int ret;
   4.447 +
   4.448 +    if ( copy_from_guest(&xenoprof_init, arg, 1) )
   4.449 +        return -EFAULT;
   4.450 +
   4.451 +    if ( (ret = nmi_init(&xenoprof_init.num_events, 
   4.452 +                         &xenoprof_init.is_primary, 
   4.453 +                         xenoprof_init.cpu_type)) )
   4.454 +        return ret;
   4.455 +
   4.456 +    if ( copy_to_guest(arg, &xenoprof_init, 1) )
   4.457 +        return -EFAULT;
   4.458 +
   4.459 +    if ( xenoprof_init.is_primary )
   4.460 +        primary_profiler = current->domain;
   4.461 +
   4.462 +    return 0;
   4.463 +}
   4.464 +
   4.465 +static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE(void) arg)
   4.466 +{
   4.467 +    struct xenoprof_get_buffer xenoprof_get_buffer;
   4.468 +    struct domain *d = current->domain;
   4.469 +    int ret;
   4.470 +
   4.471 +    if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
   4.472 +        return -EFAULT;
   4.473 +
   4.474 +    /*
   4.475 +     * We allocate xenoprof struct and buffers only at first time
   4.476 +     * get_buffer is called. Memory is then kept until domain is destroyed.
   4.477 +     */
   4.478 +    if ( d->xenoprof == NULL )
   4.479 +    {
   4.480 +        ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0);
   4.481 +        if ( ret < 0 )
   4.482 +            return ret;
   4.483 +    }
   4.484 +
   4.485 +    xenoprof_reset_buf(d);
   4.486 +
   4.487 +    d->xenoprof->domain_type  = XENOPROF_DOMAIN_IGNORED;
   4.488 +    d->xenoprof->domain_ready = 0;
   4.489 +    d->xenoprof->is_primary   = (primary_profiler == current->domain);
   4.490 +        
   4.491 +    xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
   4.492 +    xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
   4.493 +    xenoprof_get_buffer.buf_maddr = __pa(d->xenoprof->rawbuf);
   4.494 +
   4.495 +    if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
   4.496 +        return -EFAULT;
   4.497 +
   4.498 +    return 0;
   4.499 +}
   4.500 +
   4.501 +#define NONPRIV_OP(op) ( (op == XENOPROF_init)          \
   4.502 +                      || (op == XENOPROF_enable_virq)   \
   4.503 +                      || (op == XENOPROF_disable_virq)  \
   4.504 +                      || (op == XENOPROF_get_buffer))
   4.505 + 
   4.506 +int do_xenoprof_op(int op, XEN_GUEST_HANDLE(void) arg)
   4.507 +{
   4.508 +    int ret = 0;
   4.509 +    
   4.510 +    if ( (op < 0) || (op > XENOPROF_last_op) )
   4.511 +    {
   4.512 +        printk("xenoprof: invalid operation %d for domain %d\n",
   4.513 +               op, current->domain->domain_id);
   4.514 +        return -EINVAL;
   4.515 +    }
   4.516 +
   4.517 +    if ( !NONPRIV_OP(op) && (current->domain != primary_profiler) )
   4.518 +    {
   4.519 +        printk("xenoprof: dom %d denied privileged operation %d\n",
   4.520 +               current->domain->domain_id, op);
   4.521 +        return -EPERM;
   4.522 +    }
   4.523 +
   4.524 +    spin_lock(&xenoprof_lock);
   4.525 +    
   4.526 +    switch ( op )
   4.527 +    {
   4.528 +    case XENOPROF_init:
   4.529 +        ret = xenoprof_op_init(arg);
   4.530 +        break;
   4.531 +
   4.532 +    case XENOPROF_get_buffer:
   4.533 +        ret = xenoprof_op_get_buffer(arg);
   4.534 +        break;
   4.535 +
   4.536 +    case XENOPROF_reset_active_list:
   4.537 +    {
   4.538 +        reset_active_list();
   4.539 +        ret = 0;
   4.540 +        break;
   4.541 +    }
   4.542 +    case XENOPROF_reset_passive_list:
   4.543 +    {
   4.544 +        reset_passive_list();
   4.545 +        ret = 0;
   4.546 +        break;
   4.547 +    }
   4.548 +    case XENOPROF_set_active:
   4.549 +    {
   4.550 +        domid_t domid;
   4.551 +        if ( xenoprof_state != XENOPROF_IDLE )
   4.552 +        {
   4.553 +            ret = -EPERM;
   4.554 +            break;
   4.555 +        }
   4.556 +        if ( copy_from_guest(&domid, arg, 1) )
   4.557 +        {
   4.558 +            ret = -EFAULT;
   4.559 +            break;
   4.560 +        }
   4.561 +        ret = add_active_list(domid);
   4.562 +        break;
   4.563 +    }
   4.564 +    case XENOPROF_set_passive:
   4.565 +    {
   4.566 +        if ( xenoprof_state != XENOPROF_IDLE )
   4.567 +        {
   4.568 +            ret = -EPERM;
   4.569 +            break;
   4.570 +        }
   4.571 +        ret = add_passive_list(arg);
   4.572 +        break;
   4.573 +    }
   4.574 +    case XENOPROF_reserve_counters:
   4.575 +        if ( xenoprof_state != XENOPROF_IDLE )
   4.576 +        {
   4.577 +            ret = -EPERM;
   4.578 +            break;
   4.579 +        }
   4.580 +        ret = nmi_reserve_counters();
   4.581 +        if ( !ret )
   4.582 +            xenoprof_state = XENOPROF_COUNTERS_RESERVED;
   4.583 +        break;
   4.584 +
   4.585 +    case XENOPROF_counter:
   4.586 +    {
   4.587 +        struct xenoprof_counter counter;
   4.588 +        if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
   4.589 +             (adomains == 0) )
   4.590 +        {
   4.591 +            ret = -EPERM;
   4.592 +            break;
   4.593 +        }
   4.594 +
   4.595 +        if ( copy_from_guest(&counter, arg, 1) )
   4.596 +            return -EFAULT;
   4.597 +
   4.598 +        if ( counter.ind > OP_MAX_COUNTER )
   4.599 +            return -E2BIG;
   4.600 +
   4.601 +        counter_config[counter.ind].count     = counter.count;
   4.602 +        counter_config[counter.ind].enabled   = counter.enabled;
   4.603 +        counter_config[counter.ind].event     = counter.event;
   4.604 +        counter_config[counter.ind].kernel    = counter.kernel;
   4.605 +        counter_config[counter.ind].user      = counter.user;
   4.606 +        counter_config[counter.ind].unit_mask = counter.unit_mask;
   4.607 +
   4.608 +        ret = 0;
   4.609 +        break;
   4.610 +    }
   4.611 +
   4.612 +    case XENOPROF_setup_events:
   4.613 +        if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
   4.614 +        {
   4.615 +            ret = -EPERM;
   4.616 +            break;
   4.617 +        }
   4.618 +        ret = nmi_setup_events();
   4.619 +        if ( !ret )
   4.620 +            xenoprof_state = XENOPROF_READY;
   4.621 +        break;
   4.622 +
   4.623 +    case XENOPROF_enable_virq:
   4.624 +    {
   4.625 +        int i;
   4.626 +        if ( current->domain == primary_profiler )
   4.627 +        {
   4.628 +            nmi_enable_virq();
   4.629 +            xenoprof_reset_stat();
   4.630 +            for ( i = 0; i < pdomains; i++ )
   4.631 +                xenoprof_reset_buf(passive_domains[i]);
   4.632 +        }
   4.633 +        xenoprof_reset_buf(current->domain);
   4.634 +        ret = set_active(current->domain);
   4.635 +        break;
   4.636 +    }
   4.637 +
   4.638 +    case XENOPROF_start:
   4.639 +        ret = -EPERM;
   4.640 +        if ( (xenoprof_state == XENOPROF_READY) &&
   4.641 +             (activated == adomains) )
   4.642 +            ret = nmi_start();
   4.643 +
   4.644 +        if ( ret == 0 )
   4.645 +            xenoprof_state = XENOPROF_PROFILING;
   4.646 +        break;
   4.647 +
   4.648 +    case XENOPROF_stop:
   4.649 +        if ( xenoprof_state != XENOPROF_PROFILING ) {
   4.650 +            ret = -EPERM;
   4.651 +            break;
   4.652 +        }
   4.653 +        nmi_stop();
   4.654 +        xenoprof_state = XENOPROF_READY;
   4.655 +        break;
   4.656 +
   4.657 +    case XENOPROF_disable_virq:
   4.658 +        if ( (xenoprof_state == XENOPROF_PROFILING) && 
   4.659 +             (is_active(current->domain)) )
   4.660 +        {
   4.661 +            ret = -EPERM;
   4.662 +            break;
   4.663 +        }
   4.664 +        ret = reset_active(current->domain);
   4.665 +        break;
   4.666 +
   4.667 +    case XENOPROF_release_counters:
   4.668 +        ret = -EPERM;
   4.669 +        if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
   4.670 +             (xenoprof_state == XENOPROF_READY) )
   4.671 +        {
   4.672 +            xenoprof_state = XENOPROF_IDLE;
   4.673 +            nmi_release_counters();
   4.674 +            nmi_disable_virq();
   4.675 +            reset_passive_list();
   4.676 +            ret = 0;
   4.677 +        }
   4.678 +        break;
   4.679 +
   4.680 +    case XENOPROF_shutdown:
   4.681 +        ret = -EPERM;
   4.682 +        if ( xenoprof_state == XENOPROF_IDLE )
   4.683 +        {
   4.684 +            activated = 0;
   4.685 +            adomains=0;
   4.686 +            primary_profiler = NULL;
   4.687 +            ret = 0;
   4.688 +        }
   4.689 +        break;
   4.690 +
   4.691 +    default:
   4.692 +        ret = -ENOSYS;
   4.693 +    }
   4.694 +
   4.695 +    spin_unlock(&xenoprof_lock);
   4.696 +
   4.697 +    if ( ret < 0 )
   4.698 +        printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
   4.699 +               op, current->domain->domain_id, ret);
   4.700 +
   4.701 +    return ret;
   4.702 +}
   4.703 +
   4.704 +/*
   4.705 + * Local variables:
   4.706 + * mode: C
   4.707 + * c-set-style: "BSD"
   4.708 + * c-basic-offset: 4
   4.709 + * tab-width: 4
   4.710 + * indent-tabs-mode: nil
   4.711 + * End:
   4.712 + */