]> xenbits.xensource.com Git - people/ssmith/netchannel2-pvops.git/commitdiff
manual merge of auto-kmemcheck-next
authorIngo Molnar <mingo@elte.hu>
Tue, 12 May 2009 07:25:29 +0000 (09:25 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 12 May 2009 07:25:29 +0000 (09:25 +0200)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
25 files changed:
1  2 
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/Makefile
arch/x86/include/asm/page_types.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/process.c
arch/x86/kernel/setup.c
arch/x86/kernel/traps.c
arch/x86/mm/fault.c
arch/x86/mm/init.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/pageattr.c
include/linux/interrupt.h
include/linux/ring_buffer.h
include/linux/slab_def.h
init/main.c
kernel/fork.c
kernel/softirq.c
kernel/trace/ring_buffer.c
mm/slab.c
mm/slub.c
net/core/skbuff.c

index 41adbced6c546f7e37b2b07d3355ed709cc91596,0ca7498f710100735701a910c17455e3c8e6ff37..2cd91f90db293e5a525cc2bc16498c32611097f1
@@@ -46,12 -46,8 +46,13 @@@ config X8
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_LZMA
+       select HAVE_ARCH_KMEMCHECK
  
 +config OUTPUT_FORMAT
 +      string
 +      default "elf32-i386" if X86_32
 +      default "elf64-x86-64" if X86_64
 +
  config ARCH_DEFCONFIG
        string
        default "arch/x86/configs/i386_defconfig" if X86_32
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 5ec7ae366615458eb18306a8cbd5987c6c06f17c,3fcd79ab6cfbc79c55c8b62335ec02a03b8bfb41..3d1b942f3eebdb7b50d883d1e7fba6028a6514b4
@@@ -3,16 -3,41 +3,17 @@@
   *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
   *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
   */
 -#include <linux/interrupt.h>
 -#include <linux/mmiotrace.h>
 -#include <linux/bootmem.h>
 -#include <linux/compiler.h>
 -#include <linux/highmem.h>
 -#include <linux/kprobes.h>
 -#include <linux/uaccess.h>
 -#include <linux/vmalloc.h>
 -#include <linux/vt_kern.h>
 -#include <linux/signal.h>
 -#include <linux/kernel.h>
 -#include <linux/ptrace.h>
 -#include <linux/string.h>
 -#include <linux/module.h>
 -#include <linux/kdebug.h>
 -#include <linux/errno.h>
 -#include <linux/magic.h>
 -#include <linux/sched.h>
 -#include <linux/types.h>
 -#include <linux/init.h>
 -#include <linux/mman.h>
 -#include <linux/tty.h>
 -#include <linux/smp.h>
 -#include <linux/mm.h>
 -
 -#include <asm-generic/sections.h>
 -
 -#include <asm/kmemcheck.h>
 -#include <asm/tlbflush.h>
 -#include <asm/pgalloc.h>
 -#include <asm/segment.h>
 -#include <asm/system.h>
 -#include <asm/proto.h>
 -#include <asm/traps.h>
 -#include <asm/desc.h>
 +#include <linux/magic.h>              /* STACK_END_MAGIC              */
 +#include <linux/sched.h>              /* test_thread_flag(), ...      */
 +#include <linux/kdebug.h>             /* oops_begin/end, ...          */
 +#include <linux/module.h>             /* search_exception_table       */
 +#include <linux/bootmem.h>            /* max_low_pfn                  */
 +#include <linux/kprobes.h>            /* __kprobes, ...               */
 +#include <linux/mmiotrace.h>          /* kmmio_handler, ...           */
 +
 +#include <asm/traps.h>                        /* dotraplinkage, ...           */
 +#include <asm/pgalloc.h>              /* pgd_*(), ...                 */
++#include <asm/kmemcheck.h>            /* kmemcheck_*(), ...           */
  
  /*
   * Page fault error code bits:
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index f1345828c7c58dd58d8b2199eda934013b0e803b,ae5b2100f26c13fbaf6fdf5c91783be2207a21a8..4016b5218becb6de1c77dfc73ecc7def804de253
@@@ -11,7 -12,10 +12,9 @@@ struct ring_buffer_iter
   * Don't refer to this struct directly, use functions below.
   */
  struct ring_buffer_event {
-       u32             type_len:5, time_delta:27;
+       kmemcheck_define_bitfield(bitfield, {
 -              u32             type:2, len:3, time_delta:27;
++              u32             type_len:5, time_delta:27;
+       });
 -
        u32             array[];
  };
  
index 713f841ecaa914e74aead8e4d8ff5d74cb5040d3,1a78b7ab62a226e391811f9efb9776637aa31337..850d057500dece3f52497b2bed3aab51dd677852
  #include <asm/page.h>         /* kmalloc_sizes.h needs PAGE_SIZE */
  #include <asm/cache.h>                /* kmalloc_sizes.h needs L1_CACHE_BYTES */
  #include <linux/compiler.h>
 -#include <trace/kmemtrace.h>
 +#include <linux/kmemtrace.h>
  
+ /*
+  * struct kmem_cache
+  *
+  * manages a cache.
+  */
+ struct kmem_cache {
+ /* 1) per-cpu data, touched during every alloc/free */
+       struct array_cache *array[NR_CPUS];
+ /* 2) Cache tunables. Protected by cache_chain_mutex */
+       unsigned int batchcount;
+       unsigned int limit;
+       unsigned int shared;
+       unsigned int buffer_size;
+       u32 reciprocal_buffer_size;
+ /* 3) touched by every alloc & free from the backend */
+       unsigned int flags;             /* constant flags */
+       unsigned int num;               /* # of objs per slab */
+ /* 4) cache_grow/shrink */
+       /* order of pgs per slab (2^n) */
+       unsigned int gfporder;
+       /* force GFP flags, e.g. GFP_DMA */
+       gfp_t gfpflags;
+       size_t colour;                  /* cache colouring range */
+       unsigned int colour_off;        /* colour offset */
+       struct kmem_cache *slabp_cache;
+       unsigned int slab_size;
+       unsigned int dflags;            /* dynamic flags */
+       /* constructor func */
+       void (*ctor)(void *obj);
+ /* 5) cache creation/removal */
+       const char *name;
+       struct list_head next;
+ /* 6) statistics */
+ #ifdef CONFIG_DEBUG_SLAB
+       unsigned long num_active;
+       unsigned long num_allocations;
+       unsigned long high_mark;
+       unsigned long grown;
+       unsigned long reaped;
+       unsigned long errors;
+       unsigned long max_freeable;
+       unsigned long node_allocs;
+       unsigned long node_frees;
+       unsigned long node_overflow;
+       atomic_t allochit;
+       atomic_t allocmiss;
+       atomic_t freehit;
+       atomic_t freemiss;
+       /*
+        * If debugging is enabled, then the allocator can add additional
+        * fields and/or padding to every object. buffer_size contains the total
+        * object size including these internal fields, the following two
+        * variables contain the offset to the user object and its size.
+        */
+       int obj_offset;
+       int obj_size;
+ #endif /* CONFIG_DEBUG_SLAB */
+       /*
+        * We put nodelists[] at the end of kmem_cache, because we want to size
+        * this array to nr_node_ids slots instead of MAX_NUMNODES
+        * (see kmem_cache_init())
+        * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
+        * is statically defined, so we reserve the max number of nodes.
+        */
+       struct kmem_list3 *nodelists[MAX_NUMNODES];
+       /*
+        * Do not add fields after nodelists[]
+        */
+ };
  /* Size description struct for general caches. */
  struct cache_sizes {
        size_t                  cs_size;
diff --cc init/main.c
index 7c6a652d3d78c1ab84c963ca06188a60325f27fc,c8b2929f908581eb2b2b9562147d52aa8012bd7c..33ce9297c42e27deca9a821433ac85ace584e102
  #include <linux/sched.h>
  #include <linux/signal.h>
  #include <linux/idr.h>
+ #include <linux/kmemcheck.h>
  #include <linux/ftrace.h>
  #include <linux/async.h>
 +#include <linux/kmemtrace.h>
  #include <trace/boot.h>
  
  #include <asm/io.h>
diff --cc kernel/fork.c
Simple merge
Simple merge
index 361170609bd04b4d2b2971531ad1539061a21226,d26c74af951b99155c823cf436c0d46a5fe420f2..2b2b0d9d916f4d5eddc3919974c24c9a2b986751
@@@ -1154,156 -1156,132 +1155,157 @@@ static unsigned rb_calculate_event_leng
        return length;
  }
  
 +
  static struct ring_buffer_event *
 -__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 -                unsigned type, unsigned long length, u64 *ts)
 +rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
 +           unsigned long length, unsigned long tail,
 +           struct buffer_page *commit_page,
 +           struct buffer_page *tail_page, u64 *ts)
  {
 -      struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
 -      unsigned long tail, write;
 +      struct buffer_page *next_page, *head_page, *reader_page;
        struct ring_buffer *buffer = cpu_buffer->buffer;
        struct ring_buffer_event *event;
 -      unsigned long flags;
        bool lock_taken = false;
 +      unsigned long flags;
  
 -      commit_page = cpu_buffer->commit_page;
 -      /* we just need to protect against interrupts */
 -      barrier();
 -      tail_page = cpu_buffer->tail_page;
 -      write = local_add_return(length, &tail_page->write);
 -      tail = write - length;
 +      next_page = tail_page;
  
 -      /* See if we shot pass the end of this buffer page */
 -      if (write > BUF_PAGE_SIZE) {
 -              struct buffer_page *next_page = tail_page;
 +      local_irq_save(flags);
 +      /*
 +       * Since the write to the buffer is still not
 +       * fully lockless, we must be careful with NMIs.
 +       * The locks in the writers are taken when a write
 +       * crosses to a new page. The locks protect against
 +       * races with the readers (this will soon be fixed
 +       * with a lockless solution).
 +       *
 +       * Because we can not protect against NMIs, and we
 +       * want to keep traces reentrant, we need to manage
 +       * what happens when we are in an NMI.
 +       *
 +       * NMIs can happen after we take the lock.
 +       * If we are in an NMI, only take the lock
 +       * if it is not already taken. Otherwise
 +       * simply fail.
 +       */
 +      if (unlikely(in_nmi())) {
 +              if (!__raw_spin_trylock(&cpu_buffer->lock)) {
 +                      cpu_buffer->nmi_dropped++;
 +                      goto out_reset;
 +              }
 +      } else
 +              __raw_spin_lock(&cpu_buffer->lock);
  
 -              local_irq_save(flags);
 -              /*
 -               * Since the write to the buffer is still not
 -               * fully lockless, we must be careful with NMIs.
 -               * The locks in the writers are taken when a write
 -               * crosses to a new page. The locks protect against
 -               * races with the readers (this will soon be fixed
 -               * with a lockless solution).
 -               *
 -               * Because we can not protect against NMIs, and we
 -               * want to keep traces reentrant, we need to manage
 -               * what happens when we are in an NMI.
 -               *
 -               * NMIs can happen after we take the lock.
 -               * If we are in an NMI, only take the lock
 -               * if it is not already taken. Otherwise
 -               * simply fail.
 -               */
 -              if (unlikely(in_nmi())) {
 -                      if (!__raw_spin_trylock(&cpu_buffer->lock))
 -                              goto out_reset;
 -              } else
 -                      __raw_spin_lock(&cpu_buffer->lock);
 +      lock_taken = true;
  
 -              lock_taken = true;
 +      rb_inc_page(cpu_buffer, &next_page);
  
 -              rb_inc_page(cpu_buffer, &next_page);
 +      head_page = cpu_buffer->head_page;
 +      reader_page = cpu_buffer->reader_page;
  
 -              head_page = cpu_buffer->head_page;
 -              reader_page = cpu_buffer->reader_page;
 +      /* we grabbed the lock before incrementing */
 +      if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
 +              goto out_reset;
  
 -              /* we grabbed the lock before incrementing */
 -              if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
 -                      goto out_reset;
 +      /*
 +       * If for some reason, we had an interrupt storm that made
 +       * it all the way around the buffer, bail, and warn
 +       * about it.
 +       */
 +      if (unlikely(next_page == commit_page)) {
 +              cpu_buffer->commit_overrun++;
 +              goto out_reset;
 +      }
  
 -              /*
 -               * If for some reason, we had an interrupt storm that made
 -               * it all the way around the buffer, bail, and warn
 -               * about it.
 -               */
 -              if (unlikely(next_page == commit_page)) {
 -                      WARN_ON_ONCE(1);
 +      if (next_page == head_page) {
 +              if (!(buffer->flags & RB_FL_OVERWRITE))
                        goto out_reset;
 -              }
 -
 -              if (next_page == head_page) {
 -                      if (!(buffer->flags & RB_FL_OVERWRITE))
 -                              goto out_reset;
  
 -                      /* tail_page has not moved yet? */
 -                      if (tail_page == cpu_buffer->tail_page) {
 -                              /* count overflows */
 -                              rb_update_overflow(cpu_buffer);
 +              /* tail_page has not moved yet? */
 +              if (tail_page == cpu_buffer->tail_page) {
 +                      /* count overflows */
 +                      cpu_buffer->overrun +=
 +                              local_read(&head_page->entries);
  
 -                              rb_inc_page(cpu_buffer, &head_page);
 -                              cpu_buffer->head_page = head_page;
 -                              cpu_buffer->head_page->read = 0;
 -                      }
 +                      rb_inc_page(cpu_buffer, &head_page);
 +                      cpu_buffer->head_page = head_page;
 +                      cpu_buffer->head_page->read = 0;
                }
 +      }
  
 -              /*
 -               * If the tail page is still the same as what we think
 -               * it is, then it is up to us to update the tail
 -               * pointer.
 -               */
 -              if (tail_page == cpu_buffer->tail_page) {
 -                      local_set(&next_page->write, 0);
 -                      local_set(&next_page->page->commit, 0);
 -                      cpu_buffer->tail_page = next_page;
 +      /*
 +       * If the tail page is still the same as what we think
 +       * it is, then it is up to us to update the tail
 +       * pointer.
 +       */
 +      if (tail_page == cpu_buffer->tail_page) {
 +              local_set(&next_page->write, 0);
 +              local_set(&next_page->entries, 0);
 +              local_set(&next_page->page->commit, 0);
 +              cpu_buffer->tail_page = next_page;
 +
 +              /* reread the time stamp */
 +              *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
 +              cpu_buffer->tail_page->page->time_stamp = *ts;
 +      }
  
 -                      /* reread the time stamp */
 -                      *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
 -                      cpu_buffer->tail_page->page->time_stamp = *ts;
 -              }
 +      /*
 +       * The actual tail page has moved forward.
 +       */
 +      if (tail < BUF_PAGE_SIZE) {
 +              /* Mark the rest of the page with padding */
 +              event = __rb_page_index(tail_page, tail);
++              kmemcheck_annotate_bitfield(event->bitfield);
 +              rb_event_set_padding(event);
 +      }
  
 -              /*
 -               * The actual tail page has moved forward.
 -               */
 -              if (tail < BUF_PAGE_SIZE) {
 -                      /* Mark the rest of the page with padding */
 -                      event = __rb_page_index(tail_page, tail);
 -                      kmemcheck_annotate_bitfield(event->bitfield);
 -                      rb_event_set_padding(event);
 -              }
 +      /* Set the write back to the previous setting */
 +      local_sub(length, &tail_page->write);
  
 -              if (tail <= BUF_PAGE_SIZE)
 -                      /* Set the write back to the previous setting */
 -                      local_set(&tail_page->write, tail);
 +      /*
 +       * If this was a commit entry that failed,
 +       * increment that too
 +       */
 +      if (tail_page == cpu_buffer->commit_page &&
 +          tail == rb_commit_index(cpu_buffer)) {
 +              rb_set_commit_to_write(cpu_buffer);
 +      }
  
 -              /*
 -               * If this was a commit entry that failed,
 -               * increment that too
 -               */
 -              if (tail_page == cpu_buffer->commit_page &&
 -                  tail == rb_commit_index(cpu_buffer)) {
 -                      rb_set_commit_to_write(cpu_buffer);
 -              }
 +      __raw_spin_unlock(&cpu_buffer->lock);
 +      local_irq_restore(flags);
 +
 +      /* fail and let the caller try again */
 +      return ERR_PTR(-EAGAIN);
 +
 + out_reset:
 +      /* reset write */
 +      local_sub(length, &tail_page->write);
  
 +      if (likely(lock_taken))
                __raw_spin_unlock(&cpu_buffer->lock);
 -              local_irq_restore(flags);
 +      local_irq_restore(flags);
 +      return NULL;
 +}
  
 -              /* fail and let the caller try again */
 -              return ERR_PTR(-EAGAIN);
 -      }
 +static struct ring_buffer_event *
 +__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 +                unsigned type, unsigned long length, u64 *ts)
 +{
 +      struct buffer_page *tail_page, *commit_page;
 +      struct ring_buffer_event *event;
 +      unsigned long tail, write;
 +
 +      commit_page = cpu_buffer->commit_page;
 +      /* we just need to protect against interrupts */
 +      barrier();
 +      tail_page = cpu_buffer->tail_page;
 +      write = local_add_return(length, &tail_page->write);
 +      tail = write - length;
 +
 +      /* See if we shot pass the end of this buffer page */
 +      if (write > BUF_PAGE_SIZE)
 +              return rb_move_tail(cpu_buffer, length, tail,
 +                                  commit_page, tail_page, ts);
  
        /* We reserved something on the buffer */
  
                return NULL;
  
        event = __rb_page_index(tail_page, tail);
+       kmemcheck_annotate_bitfield(event->bitfield);
        rb_update_event(event, type, length);
  
 +      /* The passed in type is zero for DATA */
 +      if (likely(!type))
 +              local_inc(&tail_page->entries);
 +
        /*
         * If this is a commit and the tail is zero, then update
         * this page's time stamp.
diff --cc mm/slab.c
Simple merge
diff --cc mm/slub.c
Simple merge
Simple merge