ia64/xen-unstable

changeset 13309:762cb69ce3be

Enable compatibility mode operation for trace buffer access.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Emmanuel Ackaouy <ack@xensource.com>
date Fri Jan 05 17:34:40 2007 +0000 (2007-01-05)
parents 5442b2458e1b
children b8eeb4537e09
files xen/common/trace.c xen/include/xlat.lst
line diff
     1.1 --- a/xen/common/trace.c	Fri Jan 05 17:34:39 2007 +0000
     1.2 +++ b/xen/common/trace.c	Fri Jan 05 17:34:40 2007 +0000
     1.3 @@ -32,13 +32,29 @@
     1.4  #include <asm/atomic.h>
     1.5  #include <public/sysctl.h>
     1.6  
     1.7 +#ifdef CONFIG_COMPAT
     1.8 +#include <compat/trace.h>
     1.9 +#define xen_t_buf t_buf
    1.10 +CHECK_t_buf;
    1.11 +#undef xen_t_buf
    1.12 +#define TB_COMPAT IS_COMPAT(dom0)
    1.13 +#else
    1.14 +#define compat_t_rec t_rec
    1.15 +#define TB_COMPAT 0
    1.16 +#endif
    1.17 +
    1.18 +typedef union {
    1.19 +	struct t_rec *nat;
    1.20 +	struct compat_t_rec *cmp;
    1.21 +} t_rec_u;
    1.22 +
    1.23  /* opt_tbuf_size: trace buffer size (in pages) */
    1.24  static unsigned int opt_tbuf_size = 0;
    1.25  integer_param("tbuf_size", opt_tbuf_size);
    1.26  
    1.27  /* Pointers to the meta-data objects for all system trace buffers */
    1.28  static DEFINE_PER_CPU(struct t_buf *, t_bufs);
    1.29 -static DEFINE_PER_CPU(struct t_rec *, t_recs);
    1.30 +static DEFINE_PER_CPU(t_rec_u, t_recs);
    1.31  static int nr_recs;
    1.32  
    1.33  /* High water mark for trace buffers; */
    1.34 @@ -87,7 +103,7 @@ static int alloc_trace_bufs(void)
    1.35      nr_pages = num_online_cpus() * opt_tbuf_size;
    1.36      order    = get_order_from_pages(nr_pages);
    1.37      nr_recs  = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf)) /
    1.38 -        sizeof(struct t_rec);
    1.39 +        (!TB_COMPAT ? sizeof(struct t_rec) : sizeof(struct compat_t_rec));
    1.40      
    1.41      if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
    1.42      {
    1.43 @@ -106,7 +122,7 @@ static int alloc_trace_bufs(void)
    1.44          buf = per_cpu(t_bufs, i) = (struct t_buf *)
    1.45              &rawbuf[i*opt_tbuf_size*PAGE_SIZE];
    1.46          buf->cons = buf->prod = 0;
    1.47 -        per_cpu(t_recs, i) = (struct t_rec *)(buf + 1);
    1.48 +        per_cpu(t_recs, i).nat = (struct t_rec *)(buf + 1);
    1.49      }
    1.50  
    1.51      t_buf_highwater = nr_recs >> 1; /* 50% high water */
    1.52 @@ -232,7 +248,7 @@ void trace(u32 event, unsigned long d1, 
    1.53             unsigned long d3, unsigned long d4, unsigned long d5)
    1.54  {
    1.55      struct t_buf *buf;
    1.56 -    struct t_rec *rec;
    1.57 +    t_rec_u rec;
    1.58      unsigned long flags;
    1.59      
    1.60      BUG_ON(!tb_init_done);
    1.61 @@ -269,25 +285,51 @@ void trace(u32 event, unsigned long d1, 
    1.62  
    1.63      if ( unlikely(this_cpu(lost_records) != 0) )
    1.64      {
    1.65 -        rec = &this_cpu(t_recs)[buf->prod % nr_recs];
    1.66 -        memset(rec, 0, sizeof(*rec));
    1.67 -        rec->cycles  = (u64)get_cycles();
    1.68 -        rec->event   = TRC_LOST_RECORDS;
    1.69 -        rec->data[0] = this_cpu(lost_records);
    1.70 -        this_cpu(lost_records) = 0;
    1.71 +        if ( !TB_COMPAT )
    1.72 +        {
    1.73 +            rec.nat = &this_cpu(t_recs).nat[buf->prod % nr_recs];
    1.74 +            memset(rec.nat, 0, sizeof(*rec.nat));
    1.75 +            rec.nat->cycles  = (u64)get_cycles();
    1.76 +            rec.nat->event   = TRC_LOST_RECORDS;
    1.77 +            rec.nat->data[0] = this_cpu(lost_records);
    1.78 +            this_cpu(lost_records) = 0;
    1.79 +        }
    1.80 +        else
    1.81 +        {
    1.82 +            rec.cmp = &this_cpu(t_recs).cmp[buf->prod % nr_recs];
    1.83 +            memset(rec.cmp, 0, sizeof(*rec.cmp));
    1.84 +            rec.cmp->cycles  = (u64)get_cycles();
    1.85 +            rec.cmp->event   = TRC_LOST_RECORDS;
    1.86 +            rec.cmp->data[0] = this_cpu(lost_records);
    1.87 +            this_cpu(lost_records) = 0;
    1.88 +        }
    1.89  
    1.90          wmb();
    1.91          buf->prod++;
    1.92      }
    1.93  
    1.94 -    rec = &this_cpu(t_recs)[buf->prod % nr_recs];
    1.95 -    rec->cycles  = (u64)get_cycles();
    1.96 -    rec->event   = event;
    1.97 -    rec->data[0] = d1;
    1.98 -    rec->data[1] = d2;
    1.99 -    rec->data[2] = d3;
   1.100 -    rec->data[3] = d4;
   1.101 -    rec->data[4] = d5;
   1.102 +    if ( !TB_COMPAT )
   1.103 +    {
   1.104 +        rec.nat = &this_cpu(t_recs).nat[buf->prod % nr_recs];
   1.105 +        rec.nat->cycles  = (u64)get_cycles();
   1.106 +        rec.nat->event   = event;
   1.107 +        rec.nat->data[0] = d1;
   1.108 +        rec.nat->data[1] = d2;
   1.109 +        rec.nat->data[2] = d3;
   1.110 +        rec.nat->data[3] = d4;
   1.111 +        rec.nat->data[4] = d5;
   1.112 +    }
   1.113 +    else
   1.114 +    {
   1.115 +        rec.cmp = &this_cpu(t_recs).cmp[buf->prod % nr_recs];
   1.116 +        rec.cmp->cycles  = (u64)get_cycles();
   1.117 +        rec.cmp->event   = event;
   1.118 +        rec.cmp->data[0] = d1;
   1.119 +        rec.cmp->data[1] = d2;
   1.120 +        rec.cmp->data[2] = d3;
   1.121 +        rec.cmp->data[3] = d4;
   1.122 +        rec.cmp->data[4] = d5;
   1.123 +    }
   1.124  
   1.125      wmb();
   1.126      buf->prod++;
     2.1 --- a/xen/include/xlat.lst	Fri Jan 05 17:34:39 2007 +0000
     2.2 +++ b/xen/include/xlat.lst	Fri Jan 05 17:34:40 2007 +0000
     2.3 @@ -44,6 +44,7 @@
     2.4  ?	sysctl_perfc_desc		sysctl.h
     2.5  !	sysctl_perfc_op			sysctl.h
     2.6  !	sysctl_tbuf_op			sysctl.h
     2.7 +?	t_buf				trace.h
     2.8  !	vcpu_runstate_info		vcpu.h
     2.9  ?	xenoprof_init			xenoprof.h
    2.10  ?	xenoprof_passive		xenoprof.h