ia64/xen-unstable

changeset 7581:d8a39152f982

Tracing cleanups and simplify tb_set_size(). Dynamic
buffer shrinking is unsafe without heavier weight SMP
synchronisation.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Oct 30 23:17:58 2005 +0100 (2005-10-30)
parents a663683fe8cb
children 2947b0df5ffa
files xen/common/trace.c xen/include/xen/trace.h
line diff
     1.1 --- a/xen/common/trace.c	Sun Oct 30 22:35:06 2005 +0100
     1.2 +++ b/xen/common/trace.c	Sun Oct 30 23:17:58 2005 +0100
     1.3 @@ -36,41 +36,17 @@ static unsigned int opt_tbuf_size = 0;
     1.4  integer_param("tbuf_size", opt_tbuf_size);
     1.5  
     1.6  /* Pointers to the meta-data objects for all system trace buffers */
     1.7 -struct t_buf *t_bufs[NR_CPUS];
     1.8 +static struct t_buf *t_bufs[NR_CPUS];
     1.9  
    1.10  /* a flag recording whether initialization has been done */
    1.11  /* or more properly, if the tbuf subsystem is enabled right now */
    1.12  int tb_init_done = 0;
    1.13  
    1.14  /* which CPUs tracing is enabled on */
    1.15 -unsigned long tb_cpu_mask = (~0UL);
    1.16 +static unsigned long tb_cpu_mask = (~0UL);
    1.17  
    1.18  /* which tracing events are enabled */
    1.19 -u32 tb_event_mask = TRC_ALL;
    1.20 -
    1.21 -/**
    1.22 - * init_trace_bufs - performs initialization of the per-cpu trace buffers.
    1.23 - *
    1.24 - * This function is called at start of day in order to initialize the per-cpu
    1.25 - * trace buffers.  The trace buffers are then available for debugging use, via
    1.26 - * the %TRACE_xD macros exported in <xen/trace.h>.
    1.27 - */
    1.28 -void init_trace_bufs(void)
    1.29 -{
    1.30 -    extern int alloc_trace_bufs(void);
    1.31 -    
    1.32 -    if ( opt_tbuf_size == 0 )
    1.33 -    {
    1.34 -        printk("Xen trace buffers: disabled\n");
    1.35 -        return;
    1.36 -    }
    1.37 -
    1.38 -    if (alloc_trace_bufs() == 0) {
    1.39 -        printk("Xen trace buffers: initialised\n");
    1.40 -        wmb(); /* above must be visible before tb_init_done flag set */
    1.41 -        tb_init_done = 1;
    1.42 -    }
    1.43 -}
    1.44 +static u32 tb_event_mask = TRC_ALL;
    1.45  
    1.46  /**
    1.47   * alloc_trace_bufs - performs initialization of the per-cpu trace buffers.
    1.48 @@ -82,7 +58,7 @@ void init_trace_bufs(void)
    1.49   * This function may also be called later when enabling trace buffers 
    1.50   * via the SET_SIZE hypercall.
    1.51   */
    1.52 -int alloc_trace_bufs(void)
    1.53 +static int alloc_trace_bufs(void)
    1.54  {
    1.55      int           i, order;
    1.56      unsigned long nr_pages;
    1.57 @@ -125,37 +101,52 @@ int alloc_trace_bufs(void)
    1.58   *
    1.59   * This function is called when the SET_SIZE hypercall is done.
    1.60   */
    1.61 -int tb_set_size(int size)
    1.62 +static int tb_set_size(int size)
    1.63  {
    1.64 -    // There are three cases to handle:
    1.65 -    //  1. Changing from 0 to non-zero ==> simple allocate
    1.66 -    //  2. Changing from non-zero to 0 ==> simple deallocate
    1.67 -    //  3. Changing size ==> deallocate and reallocate? Or disallow?
    1.68 -    //     User can just do a change to 0, then a change to the new size.
    1.69 -    //
    1.70 -    // Tracing must be disabled (tb_init_done==0) before calling this
    1.71 -    
    1.72 -    if (opt_tbuf_size == 0 && size > 0) {
    1.73 -        // What if size is too big? alloc_xenheap will complain.
    1.74 -        opt_tbuf_size = size;
    1.75 -        if (alloc_trace_bufs() != 0)
    1.76 -            return -EINVAL;
    1.77 -        wmb();
    1.78 -        printk("Xen trace buffers: initialized\n");
    1.79 -        return 0;
    1.80 +    /*
    1.81 +     * Setting size is a one-shot operation. It can be done either at
    1.82 +     * boot time or via control tools, but not by both. Once buffers
    1.83 +     * are created they cannot be destroyed.
    1.84 +     */
    1.85 +    if ( (opt_tbuf_size != 0) || (size <= 0) )
    1.86 +    {
    1.87 +        DPRINTK("tb_set_size from %d to %d not implemented\n",
    1.88 +                opt_tbuf_size, size);
    1.89 +        return -EINVAL;
    1.90      }
    1.91 -    else if (opt_tbuf_size > 0 && size == 0) {
    1.92 -        int order = get_order_from_pages(num_online_cpus() * opt_tbuf_size);
    1.93 -        // is there a way to undo SHARE_PFN_WITH_DOMAIN?
    1.94 -        free_xenheap_pages(t_bufs[0], order);
    1.95 +
    1.96 +    opt_tbuf_size = size;
    1.97 +    if ( alloc_trace_bufs() != 0 )
    1.98 +    {
    1.99          opt_tbuf_size = 0;
   1.100 -        printk("Xen trace buffers: uninitialized\n");
   1.101 -        return 0;
   1.102 +        return -EINVAL;
   1.103      }
   1.104 -    else {
   1.105 -        printk("tb_set_size from %d to %d not implemented\n", opt_tbuf_size, size);
   1.106 -        printk("change size from %d to 0, and then to %d\n",  opt_tbuf_size, size);
   1.107 -        return -EINVAL;
   1.108 +
   1.109 +    printk("Xen trace buffers: initialized\n");
   1.110 +    return 0;
   1.111 +}
   1.112 +
   1.113 +
   1.114 +/**
   1.115 + * init_trace_bufs - performs initialization of the per-cpu trace buffers.
   1.116 + *
   1.117 + * This function is called at start of day in order to initialize the per-cpu
   1.118 + * trace buffers.  The trace buffers are then available for debugging use, via
   1.119 + * the %TRACE_xD macros exported in <xen/trace.h>.
   1.120 + */
   1.121 +void init_trace_bufs(void)
   1.122 +{
   1.123 +    if ( opt_tbuf_size == 0 )
   1.124 +    {
   1.125 +        printk("Xen trace buffers: disabled\n");
   1.126 +        return;
   1.127 +    }
   1.128 +
   1.129 +    if ( alloc_trace_bufs() == 0 )
   1.130 +    {
   1.131 +        printk("Xen trace buffers: initialised\n");
   1.132 +        wmb(); /* above must be visible before tb_init_done flag set */
   1.133 +        tb_init_done = 1;
   1.134      }
   1.135  }
   1.136  
   1.137 @@ -169,14 +160,17 @@ int tb_control(dom0_tbufcontrol_t *tbc)
   1.138      static spinlock_t lock = SPIN_LOCK_UNLOCKED;
   1.139      int rc = 0;
   1.140  
   1.141 -    // Commenting this out since we have to allow some of these operations
   1.142 -    // in order to enable dynamic control of the trace buffers.
   1.143 -    //    if ( !tb_init_done )
   1.144 -    //        return -EINVAL;
   1.145 -
   1.146      spin_lock(&lock);
   1.147  
   1.148 -    switch ( tbc->op)
   1.149 +    if ( !tb_init_done &&
   1.150 +         (tbc->op != DOM0_TBUF_SET_SIZE) &&
   1.151 +         (tbc->op != DOM0_TBUF_ENABLE) )
   1.152 +    {
   1.153 +        spin_unlock(&lock);
   1.154 +        return -EINVAL;
   1.155 +    }
   1.156 +
   1.157 +    switch ( tbc->op )
   1.158      {
   1.159      case DOM0_TBUF_GET_INFO:
   1.160          tbc->cpu_mask   = tb_cpu_mask;
   1.161 @@ -191,31 +185,25 @@ int tb_control(dom0_tbufcontrol_t *tbc)
   1.162          tb_event_mask = tbc->evt_mask;
   1.163          break;
   1.164      case DOM0_TBUF_SET_SIZE:
   1.165 -        // Change trace buffer allocation.
   1.166 -        // Trace buffers must be disabled to do this.
   1.167 -        if (tb_init_done) {
   1.168 -            printk("attempt to change size with tbufs enabled\n");
   1.169 -            rc = -EINVAL;
   1.170 -        }
   1.171 -        else
   1.172 -            rc = tb_set_size(tbc->size);
   1.173 +        rc = !tb_init_done ? tb_set_size(tbc->size) : -EINVAL;
   1.174          break;
   1.175      case DOM0_TBUF_ENABLE:
   1.176 -        // Enable trace buffers. Size must be non-zero, ie, buffers
   1.177 -        // must already be allocated. 
   1.178 -        if (opt_tbuf_size == 0) 
   1.179 +        /* Enable trace buffers. Check buffers are already allocated. */
   1.180 +        if ( opt_tbuf_size == 0 ) 
   1.181              rc = -EINVAL;
   1.182          else
   1.183              tb_init_done = 1;
   1.184          break;
   1.185      case DOM0_TBUF_DISABLE:
   1.186 -        // Disable trace buffers. Just stops new records from being written,
   1.187 -        // does not deallocate any memory.
   1.188 +        /*
   1.189 +         * Disable trace buffers. Just stops new records from being written,
   1.190 +         * does not deallocate any memory.
   1.191 +         */
   1.192          tb_init_done = 0;
   1.193 -        printk("Xen trace buffers: disabled\n");
   1.194          break;
   1.195      default:
   1.196          rc = -EINVAL;
   1.197 +        break;
   1.198      }
   1.199  
   1.200      spin_unlock(&lock);
   1.201 @@ -223,6 +211,64 @@ int tb_control(dom0_tbufcontrol_t *tbc)
   1.202      return rc;
   1.203  }
   1.204  
   1.205 +/**
   1.206 + * trace - Enters a trace tuple into the trace buffer for the current CPU.
   1.207 + * @event: the event type being logged
   1.208 + * @d1...d5: the data items for the event being logged
   1.209 + *
   1.210 + * Logs a trace record into the appropriate buffer.  Returns nonzero on
   1.211 + * failure, otherwise 0.  Failure occurs only if the trace buffers are not yet
   1.212 + * initialised.
   1.213 + */
   1.214 +void trace(u32 event, unsigned long d1, unsigned long d2,
   1.215 +           unsigned long d3, unsigned long d4, unsigned long d5)
   1.216 +{
   1.217 +    atomic_t old, new, seen;
   1.218 +    struct t_buf *buf;
   1.219 +    struct t_rec *rec;
   1.220 +
   1.221 +    BUG_ON(!tb_init_done);
   1.222 +
   1.223 +    if ( (tb_event_mask & event) == 0 )
   1.224 +        return;
   1.225 +
   1.226 +    /* match class */
   1.227 +    if ( ((tb_event_mask >> TRC_CLS_SHIFT) & (event >> TRC_CLS_SHIFT)) == 0 )
   1.228 +        return;
   1.229 +
   1.230 +    /* then match subclass */
   1.231 +    if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf )
   1.232 +                & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
   1.233 +        return;
   1.234 +
   1.235 +    if ( (tb_cpu_mask & (1UL << smp_processor_id())) == 0 )
   1.236 +        return;
   1.237 +
   1.238 +    /* Read tb_init_done /before/ t_bufs. */
   1.239 +    rmb();
   1.240 +
   1.241 +    buf = t_bufs[smp_processor_id()];
   1.242 +
   1.243 +    do
   1.244 +    {
   1.245 +        old = buf->rec_idx;
   1.246 +        _atomic_set(new, (_atomic_read(old) + 1) % buf->rec_num);
   1.247 +        seen = atomic_compareandswap(old, new, &buf->rec_idx);
   1.248 +    }
   1.249 +    while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
   1.250 +
   1.251 +    wmb();
   1.252 +
   1.253 +    rec = &buf->rec[_atomic_read(old)];
   1.254 +    rdtscll(rec->cycles);
   1.255 +    rec->event   = event;
   1.256 +    rec->data[0] = d1;
   1.257 +    rec->data[1] = d2;
   1.258 +    rec->data[2] = d3;
   1.259 +    rec->data[3] = d4;
   1.260 +    rec->data[4] = d5;
   1.261 +}
   1.262 +
   1.263  /*
   1.264   * Local variables:
   1.265   * mode: C
     2.1 --- a/xen/include/xen/trace.h	Sun Oct 30 22:35:06 2005 +0100
     2.2 +++ b/xen/include/xen/trace.h	Sun Oct 30 23:17:58 2005 +0100
     2.3 @@ -32,10 +32,7 @@
     2.4  #include <public/dom0_ops.h>
     2.5  #include <public/trace.h>
     2.6  
     2.7 -extern struct t_buf *t_bufs[];
     2.8  extern int tb_init_done;
     2.9 -extern unsigned long tb_cpu_mask;
    2.10 -extern u32 tb_event_mask;
    2.11  
    2.12  /* Used to initialise trace buffer functionality */
    2.13  void init_trace_bufs(void);
    2.14 @@ -43,72 +40,20 @@ void init_trace_bufs(void);
    2.15  /* used to retrieve the physical address of the trace buffers */
    2.16  int tb_control(dom0_tbufcontrol_t *tbc);
    2.17  
    2.18 -/**
    2.19 - * trace - Enters a trace tuple into the trace buffer for the current CPU.
    2.20 - * @event: the event type being logged
    2.21 - * @d1...d5: the data items for the event being logged
    2.22 - *
    2.23 - * Logs a trace record into the appropriate buffer.  Returns nonzero on
    2.24 - * failure, otherwise 0.  Failure occurs only if the trace buffers are not yet
    2.25 - * initialised.
    2.26 - */
    2.27 -static inline int trace(u32 event, unsigned long d1, unsigned long d2,
    2.28 -                        unsigned long d3, unsigned long d4, unsigned long d5)
    2.29 -{
    2.30 -    atomic_t old, new, seen;
    2.31 -    struct t_buf *buf;
    2.32 -    struct t_rec *rec;
    2.33 -
    2.34 -    if ( !tb_init_done )
    2.35 -        return -1;
    2.36 -
    2.37 -    if ( (tb_event_mask & event) == 0 )
    2.38 -        return 0;
    2.39 -
    2.40 -    /* match class */
    2.41 -    if ( ((tb_event_mask >> TRC_CLS_SHIFT) & (event >> TRC_CLS_SHIFT)) == 0 )
    2.42 -        return 0;
    2.43 -
    2.44 -    /* then match subclass */
    2.45 -    if ( (((tb_event_mask >> TRC_SUBCLS_SHIFT) & 0xf )
    2.46 -                & ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
    2.47 -        return 0;
    2.48 -
    2.49 -    if ( (tb_cpu_mask & (1UL << smp_processor_id())) == 0 )
    2.50 -        return 0;
    2.51 -
    2.52 -    buf = t_bufs[smp_processor_id()];
    2.53 -
    2.54 -    do
    2.55 -    {
    2.56 -        old = buf->rec_idx;
    2.57 -        _atomic_set(new, (_atomic_read(old) + 1) % buf->rec_num);
    2.58 -        seen = atomic_compareandswap(old, new, &buf->rec_idx);
    2.59 -    }
    2.60 -    while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
    2.61 -
    2.62 -    wmb();
    2.63 -
    2.64 -    rec = &buf->rec[_atomic_read(old)];
    2.65 -    rdtscll(rec->cycles);
    2.66 -    rec->event   = event;
    2.67 -    rec->data[0] = d1;
    2.68 -    rec->data[1] = d2;
    2.69 -    rec->data[2] = d3;
    2.70 -    rec->data[3] = d4;
    2.71 -    rec->data[4] = d5;
    2.72 -
    2.73 -    return 0;
    2.74 -}
    2.75 +void trace(u32 event, unsigned long d1, unsigned long d2,
    2.76 +           unsigned long d3, unsigned long d4, unsigned long d5);
    2.77  
    2.78  /* Avoids troubling the caller with casting their arguments to a trace macro */
    2.79 -#define trace_do_casts(e,d1,d2,d3,d4,d5)  \
    2.80 -                 trace(e,                 \
    2.81 -                       (unsigned long)d1, \
    2.82 -                       (unsigned long)d2, \
    2.83 -                       (unsigned long)d3, \
    2.84 -                       (unsigned long)d4, \
    2.85 -                       (unsigned long)d5)
    2.86 +#define trace_do_casts(e,d1,d2,d3,d4,d5) \
    2.87 +    do {                                 \
    2.88 +        if ( tb_init_done )              \
    2.89 +            trace(e,                     \
    2.90 +                 (unsigned long)d1,      \
    2.91 +                 (unsigned long)d2,      \
    2.92 +                 (unsigned long)d3,      \
    2.93 +                 (unsigned long)d4,      \
    2.94 +                 (unsigned long)d5);     \
    2.95 +    } while ( 0 )
    2.96  
    2.97  /* Convenience macros for calling the trace function. */
    2.98  #define TRACE_0D(event)                trace_do_casts(event,0, 0, 0, 0, 0 )