]> xenbits.xensource.com Git - people/ssmith/netchannel2-pvops.bak.git/.git/commitdiff
Merge branch 'linus' into kmemcheck
authorIngo Molnar <mingo@elte.hu>
Tue, 7 Apr 2009 04:54:16 +0000 (06:54 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 7 Apr 2009 04:54:16 +0000 (06:54 +0200)
Conflicts:
MAINTAINERS
arch/x86/Kconfig.debug
arch/x86/kernel/head_32.S
include/linux/mm_types.h
kernel/fork.c
kernel/trace/ring_buffer.c

34 files changed:
1  2 
MAINTAINERS
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/Makefile
arch/x86/include/asm/dma-mapping.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/process.c
arch/x86/kernel/setup.c
arch/x86/mm/Makefile
arch/x86/mm/init.c
arch/x86/mm/pageattr.c
crypto/xor.c
drivers/ieee1394/nodemgr.c
include/linux/gfp.h
include/linux/interrupt.h
include/linux/mm_types.h
include/linux/ring_buffer.h
include/linux/skbuff.h
include/linux/slab_def.h
include/net/sock.h
init/main.c
kernel/fork.c
kernel/softirq.c
kernel/sysctl.c
kernel/trace/ring_buffer.c
lib/Kconfig.debug
mm/Makefile
mm/page_alloc.c
mm/slab.c
mm/slub.c
net/core/skbuff.c
net/core/sock.c

diff --cc MAINTAINERS
index 75859ec3a1144af93adf5f3e4ab579de653d829a,9673cd28a69b3049f1c9f7b5b3eda9aa279404d1..b86ad83b8049379b42b017220d819e32db3d2cac
@@@ -2623,14 -2659,12 +2659,20 @@@ M:   jason.wessel@windriver.co
  L:    kgdb-bugreport@lists.sourceforge.net
  S:    Maintained
  
 +KMEMCHECK
 +P:    Vegard Nossum
 +M:    vegardno@ifi.uio.no
 +P     Pekka Enberg
 +M:    penberg@cs.helsinki.fi
 +L:    linux-kernel@vger.kernel.org
 +S:    Maintained
 +
+ KMEMTRACE
+ P:    Eduard - Gabriel Munteanu
+ M:    eduard.munteanu@linux360.ro
+ L:    linux-kernel@vger.kernel.org
+ S:    Maintained
  KPROBES
  P:    Ananth N Mavinakayanahalli
  M:    ananth@in.ibm.com
Simple merge
index c2c1fb6aec75eca4462e74e02912067b70ecf48d,d8359e73317f8dfb8b929f15f555f5593469e7fd..7d50631e8b8acd5f92878e6444b26081675cccfc
@@@ -66,6 -66,6 +66,7 @@@ config DEBUG_STACKOVERFLO
  config DEBUG_STACK_USAGE
        bool "Stack utilization instrumentation"
        depends on DEBUG_KERNEL
++      depends on !KMEMCHECK
        ---help---
          Enables the display of the minimum amount of free stack which each
          task has ever had available in the sysrq-T and sysrq-P debug output.
Simple merge
index 84b7abc602a01c93c5bde5ca8bdaa0ef5bb0edfb,cea7b74963e9758e9338c7c739c02bb53e9d63e2..4750303ff44206c4f1a50cc376cb974f64171d5f
@@@ -6,8 -6,8 +6,9 @@@
   * Documentation/DMA-API.txt for documentation.
   */
  
 +#include <linux/kmemcheck.h>
  #include <linux/scatterlist.h>
+ #include <linux/dma-debug.h>
  #include <linux/dma-attrs.h>
  #include <asm/io.h>
  #include <asm/swiotlb.h>
@@@ -57,12 -57,16 +58,17 @@@ dma_map_single(struct device *hwdev, vo
               enum dma_data_direction dir)
  {
        struct dma_map_ops *ops = get_dma_ops(hwdev);
+       dma_addr_t addr;
  
 +      kmemcheck_mark_initialized(ptr, size);
        BUG_ON(!valid_dma_direction(dir));
-       return ops->map_page(hwdev, virt_to_page(ptr),
+       addr = ops->map_page(hwdev, virt_to_page(ptr),
                             (unsigned long)ptr & ~PAGE_MASK, size,
                             dir, NULL);
+       debug_dma_map_page(hwdev, virt_to_page(ptr),
+                          (unsigned long)ptr & ~PAGE_MASK, size,
+                          dir, addr, true);
+       return addr;
  }
  
  static inline void
@@@ -81,14 -86,13 +88,18 @@@ dma_map_sg(struct device *hwdev, struc
           int nents, enum dma_data_direction dir)
  {
        struct dma_map_ops *ops = get_dma_ops(hwdev);
+       int ents;
  
 +      struct scatterlist *s;
 +      int i;
 +
 +      for_each_sg(sg, s, nents, i)
 +              kmemcheck_mark_initialized(sg_virt(s), s->length);
        BUG_ON(!valid_dma_direction(dir));
-       return ops->map_sg(hwdev, sg, nents, dir, NULL);
+       ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
+       debug_dma_map_sg(hwdev, sg, nents, ents, dir);
+       return ents;
  }
  
  static inline void
@@@ -184,10 -197,13 +204,14 @@@ static inline dma_addr_t dma_map_page(s
                                      enum dma_data_direction dir)
  {
        struct dma_map_ops *ops = get_dma_ops(dev);
+       dma_addr_t addr;
  
 +      kmemcheck_mark_initialized(page_address(page) + offset, size);
        BUG_ON(!valid_dma_direction(dir));
-       return ops->map_page(dev, page, offset, size, dir, NULL);
+       addr = ops->map_page(dev, page, offset, size, dir, NULL);
+       debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+       return addr;
  }
  
  static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc crypto/xor.c
Simple merge
Simple merge
Simple merge
Simple merge
index 72e90270e0bf2cafa4ef8f313baf021b455e3c8f,0e80e26ecf21220104d8d2abbeb9cca6a1215e6e..0042090a4d70cd839a97c6b436ea91f8ddf51d40
@@@ -94,14 -95,9 +95,17 @@@ struct page 
        void *virtual;                  /* Kernel virtual address (NULL if
                                           not kmapped, ie. highmem) */
  #endif /* WANT_PAGE_VIRTUAL */
+ #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
+       unsigned long debug_flags;      /* Use atomic bitops on this */
+ #endif
 +
 +#ifdef CONFIG_KMEMCHECK
 +      /*
 +       * kmemcheck wants to track the status of each byte in a page; this
 +       * is a pointer to such a status block. NULL if not tracked.
 +       */
 +      void *shadow;
 +#endif
  };
  
  /*
index 1f49883438aae3b1756f98f357eed1677b308c8b,e1b7b2173885f8f14f4a8da3979d0a6eff08e186..ae5b2100f26c13fbaf6fdf5c91783be2207a21a8
@@@ -9,13 -8,10 +9,13 @@@ struct ring_buffer
  struct ring_buffer_iter;
  
  /*
-  * Don't reference this struct directly, use functions below.
+  * Don't refer to this struct directly, use functions below.
   */
  struct ring_buffer_event {
 -      u32             type:2, len:3, time_delta:27;
 +      kmemcheck_define_bitfield(bitfield, {
 +              u32             type:2, len:3, time_delta:27;
 +      });
 +
        u32             array[];
  };
  
Simple merge
index 96135f4b9318aae01c6f33365b9224881f98c058,5ac9b0bcaf9adef1fdfddebd2aff6290a44f7b9f..1a78b7ab62a226e391811f9efb9776637aa31337
  #include <asm/page.h>         /* kmalloc_sizes.h needs PAGE_SIZE */
  #include <asm/cache.h>                /* kmalloc_sizes.h needs L1_CACHE_BYTES */
  #include <linux/compiler.h>
+ #include <trace/kmemtrace.h>
  
 +/*
 + * struct kmem_cache
 + *
 + * manages a cache.
 + */
 +
 +struct kmem_cache {
 +/* 1) per-cpu data, touched during every alloc/free */
 +      struct array_cache *array[NR_CPUS];
 +/* 2) Cache tunables. Protected by cache_chain_mutex */
 +      unsigned int batchcount;
 +      unsigned int limit;
 +      unsigned int shared;
 +
 +      unsigned int buffer_size;
 +      u32 reciprocal_buffer_size;
 +/* 3) touched by every alloc & free from the backend */
 +
 +      unsigned int flags;             /* constant flags */
 +      unsigned int num;               /* # of objs per slab */
 +
 +/* 4) cache_grow/shrink */
 +      /* order of pgs per slab (2^n) */
 +      unsigned int gfporder;
 +
 +      /* force GFP flags, e.g. GFP_DMA */
 +      gfp_t gfpflags;
 +
 +      size_t colour;                  /* cache colouring range */
 +      unsigned int colour_off;        /* colour offset */
 +      struct kmem_cache *slabp_cache;
 +      unsigned int slab_size;
 +      unsigned int dflags;            /* dynamic flags */
 +
 +      /* constructor func */
 +      void (*ctor)(void *obj);
 +
 +/* 5) cache creation/removal */
 +      const char *name;
 +      struct list_head next;
 +
 +/* 6) statistics */
 +#ifdef CONFIG_DEBUG_SLAB
 +      unsigned long num_active;
 +      unsigned long num_allocations;
 +      unsigned long high_mark;
 +      unsigned long grown;
 +      unsigned long reaped;
 +      unsigned long errors;
 +      unsigned long max_freeable;
 +      unsigned long node_allocs;
 +      unsigned long node_frees;
 +      unsigned long node_overflow;
 +      atomic_t allochit;
 +      atomic_t allocmiss;
 +      atomic_t freehit;
 +      atomic_t freemiss;
 +
 +      /*
 +       * If debugging is enabled, then the allocator can add additional
 +       * fields and/or padding to every object. buffer_size contains the total
 +       * object size including these internal fields, the following two
 +       * variables contain the offset to the user object and its size.
 +       */
 +      int obj_offset;
 +      int obj_size;
 +#endif /* CONFIG_DEBUG_SLAB */
 +
 +      /*
 +       * We put nodelists[] at the end of kmem_cache, because we want to size
 +       * this array to nr_node_ids slots instead of MAX_NUMNODES
 +       * (see kmem_cache_init())
 +       * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
 +       * is statically defined, so we reserve the max number of nodes.
 +       */
 +      struct kmem_list3 *nodelists[MAX_NUMNODES];
 +      /*
 +       * Do not add fields after nodelists[]
 +       */
 +};
 +
  /* Size description struct for general caches. */
  struct cache_sizes {
        size_t                  cs_size;
Simple merge
diff --cc init/main.c
Simple merge
diff --cc kernel/fork.c
index 06e87f36b1a5ac4cdd5b14514cb0eb403093ab5b,660c2b8765bce0e112c26460fe037714b6ea3ac5..e08f1c8b27175e848c54089aab4f827e663722d9
@@@ -1475,20 -1458,21 +1458,21 @@@ void __init proc_caches_init(void
  {
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
 -                      SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
 -                      sighand_ctor);
 +                      SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
 +                      SLAB_NOTRACK, sighand_ctor);
        signal_cachep = kmem_cache_create("signal_cache",
                        sizeof(struct signal_struct), 0,
 -                      SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 +                      SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        files_cachep = kmem_cache_create("files_cache",
                        sizeof(struct files_struct), 0,
 -                      SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 +                      SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        fs_cachep = kmem_cache_create("fs_cache",
                        sizeof(struct fs_struct), 0,
 -                      SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 +                      SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        mm_cachep = kmem_cache_create("mm_struct",
                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
 -                      SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 +                      SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
+       vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
        mmap_init();
  }
  
Simple merge
diff --cc kernel/sysctl.c
Simple merge
index b1f2f602918647ae28b30c4805f137da916cb9cd,960cbf44c844a17dd156b25927d6e98c89840600..d26c74af951b99155c823cf436c0d46a5fe420f2
@@@ -7,7 -9,7 +9,8 @@@
  #include <linux/spinlock.h>
  #include <linux/debugfs.h>
  #include <linux/uaccess.h>
+ #include <linux/hardirq.h>
 +#include <linux/kmemcheck.h>
  #include <linux/module.h>
  #include <linux/percpu.h>
  #include <linux/mutex.h>
@@@ -1061,8 -1258,7 +1259,8 @@@ __rb_reserve_next(struct ring_buffer_pe
                if (tail < BUF_PAGE_SIZE) {
                        /* Mark the rest of the page with padding */
                        event = __rb_page_index(tail_page, tail);
-                       event->type = RINGBUF_TYPE_PADDING;
 +                      kmemcheck_annotate_bitfield(event->bitfield);
+                       rb_event_set_padding(event);
                }
  
                if (tail <= BUF_PAGE_SIZE)
Simple merge
diff --cc mm/Makefile
index b64972bee9fe3dabe9115a8d93763da41a2654b2,ec73c68b601547680e6b1f49df5462b093a7e278..d19c9e759bdf3ec580c4f9613c17d6f5f729fb96
@@@ -24,9 -24,9 +24,10 @@@ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += spar
  obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
  obj-$(CONFIG_SLOB) += slob.o
  obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
+ obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
  obj-$(CONFIG_SLAB) += slab.o
  obj-$(CONFIG_SLUB) += slub.o
 +obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
  obj-$(CONFIG_FAILSLAB) += failslab.o
  obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
  obj-$(CONFIG_FS_XIP) += filemap_xip.o
diff --cc mm/page_alloc.c
Simple merge
diff --cc mm/slab.c
Simple merge
diff --cc mm/slub.c
index 0302da61a683aac9f538cdd73d708d4c27873ac8,7ab54ecbd3f3a5abe50eba2c0a5dc035bf4d5efc..4ac5e3e6bb0c8ce2ff6af62003ebb4c8ee5f9677
+++ b/mm/slub.c
@@@ -1746,10 -1745,9 +1769,10 @@@ static __always_inline void slab_free(s
  
        local_irq_save(flags);
        c = get_cpu_slab(s, smp_processor_id());
 +      kmemcheck_slab_free(s, object, c->objsize);
        debug_check_no_locks_freed(object, c->objsize);
        if (!(s->flags & SLAB_DEBUG_OBJECTS))
-               debug_check_no_obj_freed(object, s->objsize);
+               debug_check_no_obj_freed(object, c->objsize);
        if (likely(page == c->page && c->node >= 0)) {
                object[c->offset] = c->freelist;
                c->freelist = object;
Simple merge
diff --cc net/core/sock.c
Simple merge