direct-io.hg

changeset 7571:8cc7ce549d00

Sanitise the trace-buffer hypervisor<->user interface.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Oct 31 10:45:31 2005 +0100 (2005-10-31)
parents 7d0d88685f79
children ea6d9f29dff5 7c30e9215558
files tools/libxc/xenctrl.h tools/xentrace/xentrace.c xen/common/trace.c xen/include/public/trace.h xen/include/xen/trace.h
line diff
     1.1 --- a/tools/libxc/xenctrl.h	Sun Oct 30 23:30:41 2005 +0100
     1.2 +++ b/tools/libxc/xenctrl.h	Mon Oct 31 10:45:31 2005 +0100
     1.3 @@ -507,12 +507,10 @@ long xc_get_tot_pages(int xc_handle, uin
     1.4  int xc_tbuf_enable(int xc_handle, int enable);
     1.5  
     1.6  /**
     1.7 - * This function sets the size of the trace buffers. Setting it to zero
     1.8 - * deallocates the memory used for trace buffers, and setting it to a
     1.9 - * non-zero value specifies the number of pages per cpu to allocate.
    1.10 - * To change the size of an existing allocation, you must first deallocate
    1.11 - * it then reallocate it. No change in size is allowed when tracing is
    1.12 - * enabled; A disable call must be made first.
    1.13 + * This function sets the size of the trace buffers. Setting the size
    1.14 + * is currently a one-shot operation that may be performed either at boot
    1.15 + * time or via this interface, not both. The buffer size must be set before
    1.16 + * enabling tracing.
    1.17   *
    1.18   * @parm xc_handle a handle to an open hypervisor interface
    1.19   * @parm size the size in pages per cpu for the trace buffers
     2.1 --- a/tools/xentrace/xentrace.c	Sun Oct 30 23:30:41 2005 +0100
     2.2 +++ b/tools/xentrace/xentrace.c	Mon Oct 31 10:45:31 2005 +0100
     2.3 @@ -23,9 +23,6 @@
     2.4  
     2.5  #include "xc_private.h"
     2.6  
     2.7 -typedef struct { int counter; } atomic_t;
     2.8 -#define _atomic_read(v)		((v).counter)
     2.9 -
    2.10  #include <xen/trace.h>
    2.11  
    2.12  extern FILE *stderr;
    2.13 @@ -148,7 +145,7 @@ struct t_buf *map_tbufs(unsigned long tb
    2.14      }
    2.15  
    2.16      tbufs_mapped = xc_map_foreign_range(xc_handle, 0 /* Dom 0 ID */,
    2.17 -                                        size * num, PROT_READ,
    2.18 +                                        size * num, PROT_READ | PROT_WRITE,
    2.19                                          tbufs_mfn);
    2.20  
    2.21      xc_interface_close(xc_handle);
    2.22 @@ -240,10 +237,7 @@ struct t_buf **init_bufs_ptrs(void *bufs
    2.23   * mapped in user space.  Note that the trace buffer metadata contains machine
    2.24   * pointers - the array returned allows more convenient access to them.
    2.25   */
    2.26 -struct t_rec **init_rec_ptrs(unsigned long tbufs_mfn,
    2.27 -                             struct t_buf *tbufs_mapped,
    2.28 -                             struct t_buf **meta,
    2.29 -                             unsigned int num)
    2.30 +struct t_rec **init_rec_ptrs(struct t_buf **meta, unsigned int num)
    2.31  {
    2.32      int i;
    2.33      struct t_rec **data;
    2.34 @@ -256,39 +250,12 @@ struct t_rec **init_rec_ptrs(unsigned lo
    2.35      }
    2.36  
    2.37      for ( i = 0; i < num; i++ )
    2.38 -        data[i] = (struct t_rec *)(meta[i]->rec_addr - (tbufs_mfn<<XC_PAGE_SHIFT) /* XXX */
    2.39 -                                   + (unsigned long)tbufs_mapped);
    2.40 +        data[i] = (struct t_rec *)(meta[i] + 1);
    2.41  
    2.42      return data;
    2.43  }
    2.44  
    2.45  /**
    2.46 - * init_tail_idxs - initialise an array of tail indexes
    2.47 - * @bufs:           array of pointers to trace buffer metadata
    2.48 - * @num:            number of trace buffers
    2.49 - *
    2.50 - * The tail indexes indicate where we're read to so far in the data array of a
    2.51 - * trace buffer.  Each entry in this table corresponds to the tail index for a
    2.52 - * particular trace buffer.
    2.53 - */
    2.54 -unsigned long *init_tail_idxs(struct t_buf **bufs, unsigned int num)
    2.55 -{
    2.56 -    int i;
    2.57 -    unsigned long *tails = calloc(num, sizeof(unsigned int));
    2.58 - 
    2.59 -    if ( tails == NULL )
    2.60 -    {
    2.61 -        PERROR("Failed to allocate memory for tail pointers\n");
    2.62 -        exit(EXIT_FAILURE);
    2.63 -    }
    2.64 -    
    2.65 -    for ( i = 0; i<num; i++ )
    2.66 -        tails[i] = _atomic_read(bufs[i]->rec_idx);
    2.67 -
    2.68 -    return tails;
    2.69 -}
    2.70 -
    2.71 -/**
    2.72   * get_num_cpus - get the number of logical CPUs
    2.73   */
    2.74  unsigned int get_num_cpus()
    2.75 @@ -329,7 +296,6 @@ int monitor_tbufs(FILE *logfile)
    2.76      struct t_buf **meta;         /* pointers to the trace buffer metadata    */
    2.77      struct t_rec **data;         /* pointers to the trace buffer data areas
    2.78                                    * where they are mapped into user space.   */
    2.79 -    unsigned long *cons;         /* store tail indexes for the trace buffers */
    2.80      unsigned long tbufs_mfn;     /* mfn of the tbufs                         */
    2.81      unsigned int  num;           /* number of trace buffers / logical CPUS   */
    2.82      unsigned long size;          /* size of a single trace buffer            */
    2.83 @@ -346,19 +312,22 @@ int monitor_tbufs(FILE *logfile)
    2.84      size_in_recs = (size - sizeof(struct t_buf)) / sizeof(struct t_rec);
    2.85  
    2.86      /* build arrays of convenience ptrs */
    2.87 -    meta  = init_bufs_ptrs (tbufs_mapped, num, size);
    2.88 -    data  = init_rec_ptrs  (tbufs_mfn, tbufs_mapped, meta, num);
    2.89 -    cons  = init_tail_idxs (meta, num);
    2.90 +    meta  = init_bufs_ptrs(tbufs_mapped, num, size);
    2.91 +    data  = init_rec_ptrs(meta, num);
    2.92  
    2.93      /* now, scan buffers for events */
    2.94      while ( !interrupted )
    2.95      {
    2.96 -        for ( i = 0; ( i < num ) && !interrupted; i++ )
    2.97 -            while( cons[i] != _atomic_read(meta[i]->rec_idx) )
    2.98 +        for ( i = 0; (i < num) && !interrupted; i++ )
    2.99 +        {
   2.100 +            while ( meta[i]->cons != meta[i]->prod )
   2.101              {
   2.102 -                write_rec(i, data[i] + cons[i], logfile);
   2.103 -                cons[i] = (cons[i] + 1) % size_in_recs;
   2.104 +                rmb(); /* read prod, then read item. */
   2.105 +                write_rec(i, data[i] + meta[i]->cons % size_in_recs, logfile);
   2.106 +                mb(); /* read item, then update cons. */
   2.107 +                meta[i]->cons++;
   2.108              }
   2.109 +        }
   2.110  
   2.111          nanosleep(&opts.poll_sleep, NULL);
   2.112      }
   2.113 @@ -366,7 +335,6 @@ int monitor_tbufs(FILE *logfile)
   2.114      /* cleanup */
   2.115      free(meta);
   2.116      free(data);
   2.117 -    free(cons);
   2.118      /* don't need to munmap - cleanup is automatic */
   2.119      fclose(logfile);
   2.120  
     3.1 --- a/xen/common/trace.c	Sun Oct 30 23:30:41 2005 +0100
     3.2 +++ b/xen/common/trace.c	Mon Oct 31 10:45:31 2005 +0100
     3.3 @@ -37,6 +37,8 @@ integer_param("tbuf_size", opt_tbuf_size
     3.4  
     3.5  /* Pointers to the meta-data objects for all system trace buffers */
     3.6  static struct t_buf *t_bufs[NR_CPUS];
     3.7 +static struct t_rec *t_recs[NR_CPUS];
     3.8 +static int nr_recs;
     3.9  
    3.10  /* a flag recording whether initialization has been done */
    3.11  /* or more properly, if the tbuf subsystem is enabled right now */
    3.12 @@ -70,6 +72,8 @@ static int alloc_trace_bufs(void)
    3.13  
    3.14      nr_pages = num_online_cpus() * opt_tbuf_size;
    3.15      order    = get_order_from_pages(nr_pages);
    3.16 +    nr_recs  = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf)) /
    3.17 +        sizeof(struct t_rec);
    3.18      
    3.19      if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
    3.20      {
    3.21 @@ -84,13 +88,11 @@ static int alloc_trace_bufs(void)
    3.22      for_each_online_cpu ( i )
    3.23      {
    3.24          buf = t_bufs[i] = (struct t_buf *)&rawbuf[i*opt_tbuf_size*PAGE_SIZE];
    3.25 -        
    3.26 -        _atomic_set(buf->rec_idx, 0);
    3.27 -        buf->rec_num  = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf))
    3.28 -                        / sizeof(struct t_rec);
    3.29 -        buf->rec      = (struct t_rec *)(buf + 1);
    3.30 -        buf->rec_addr = __pa(buf->rec);
    3.31 +        buf->cons = buf->prod = 0;
    3.32 +        buf->nr_recs = nr_recs;
    3.33 +        t_recs[i] = (struct t_rec *)(buf + 1);
    3.34      }
    3.35 +
    3.36      return 0;
    3.37  }
    3.38  
    3.39 @@ -223,9 +225,9 @@ int tb_control(dom0_tbufcontrol_t *tbc)
    3.40  void trace(u32 event, unsigned long d1, unsigned long d2,
    3.41             unsigned long d3, unsigned long d4, unsigned long d5)
    3.42  {
    3.43 -    atomic_t old, new, seen;
    3.44      struct t_buf *buf;
    3.45      struct t_rec *rec;
    3.46 +    unsigned long flags;
    3.47  
    3.48      BUG_ON(!tb_init_done);
    3.49  
    3.50 @@ -249,17 +251,15 @@ void trace(u32 event, unsigned long d1, 
    3.51  
    3.52      buf = t_bufs[smp_processor_id()];
    3.53  
    3.54 -    do
    3.55 +    local_irq_save(flags);
    3.56 +
    3.57 +    if ( (buf->prod - buf->cons) >= nr_recs )
    3.58      {
    3.59 -        old = buf->rec_idx;
    3.60 -        _atomic_set(new, (_atomic_read(old) + 1) % buf->rec_num);
    3.61 -        seen = atomic_compareandswap(old, new, &buf->rec_idx);
    3.62 +        local_irq_restore(flags);
    3.63 +        return;
    3.64      }
    3.65 -    while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
    3.66  
    3.67 -    wmb();
    3.68 -
    3.69 -    rec = &buf->rec[_atomic_read(old)];
    3.70 +    rec = &t_recs[smp_processor_id()][buf->prod % nr_recs];
    3.71      rdtscll(rec->cycles);
    3.72      rec->event   = event;
    3.73      rec->data[0] = d1;
    3.74 @@ -267,6 +267,11 @@ void trace(u32 event, unsigned long d1, 
    3.75      rec->data[2] = d3;
    3.76      rec->data[3] = d4;
    3.77      rec->data[4] = d5;
    3.78 +
    3.79 +    wmb();
    3.80 +    buf->prod++;
    3.81 +
    3.82 +    local_irq_restore(flags);
    3.83  }
    3.84  
    3.85  /*
     4.1 --- a/xen/include/public/trace.h	Sun Oct 30 23:30:41 2005 +0100
     4.2 +++ b/xen/include/public/trace.h	Mon Oct 31 10:45:31 2005 +0100
     4.3 @@ -65,13 +65,10 @@ struct t_rec {
     4.4   * field, indexes into an array of struct t_rec's.
     4.5   */
     4.6  struct t_buf {
     4.7 -    /* Used by both Xen and user space. */
     4.8 -    atomic_t      rec_idx;   /* the next record to save to */
     4.9 -    unsigned int  rec_num;   /* number of records in this trace buffer  */
    4.10 -    /* Used by Xen only. */
    4.11 -    struct t_rec  *rec;      /* start of records */
    4.12 -    /* Used by user space only. */
    4.13 -    unsigned long rec_addr;  /* machine address of the start of records */
    4.14 +    unsigned int  cons;      /* Next item to be consumed by control tools. */
    4.15 +    unsigned int  prod;      /* Next item to be produced by Xen.           */
    4.16 +    unsigned int  nr_recs;   /* Number of records in this trace buffer.    */
    4.17 +    /* 'nr_recs' records follow immediately after the meta-data header.    */
    4.18  };
    4.19  
    4.20  #endif /* __XEN_PUBLIC_TRACE_H__ */
     5.1 --- a/xen/include/xen/trace.h	Sun Oct 30 23:30:41 2005 +0100
     5.2 +++ b/xen/include/xen/trace.h	Mon Oct 31 10:45:31 2005 +0100
     5.3 @@ -24,7 +24,6 @@
     5.4  #define __XEN_TRACE_H__
     5.5  
     5.6  #include <xen/config.h>
     5.7 -#include <asm/atomic.h>
     5.8  #include <public/dom0_ops.h>
     5.9  #include <public/trace.h>
    5.10