ia64/xen-unstable

changeset 3616:552dd1f1c64c

bitkeeper revision 1.1159.238.5 (4200df8a5A2pMKPYmS5iOV3Q8d2zIw)

Cset exclude: iap10@labyrinth.cl.cam.ac.uk|ChangeSet|20050202130109|29824
author iap10@labyrinth.cl.cam.ac.uk
date Wed Feb 02 14:11:22 2005 +0000 (2005-02-02)
parents 49103eca5edb
children f3e117cf73c2
files xen/arch/x86/setup.c xen/common/malloc.c xen/common/page_alloc.c xen/include/xen/domain.h xen/include/xen/lib.h xen/include/xen/list.h xen/include/xen/slab.h
line diff
     1.1 --- a/xen/arch/x86/setup.c	Wed Feb 02 13:01:09 2005 +0000
     1.2 +++ b/xen/arch/x86/setup.c	Wed Feb 02 14:11:22 2005 +0000
     1.3 @@ -598,6 +598,10 @@ void __init __start_xen(multiboot_info_t
     1.4  
     1.5      early_boot = 0;
     1.6  
     1.7 +    /* Initialise the slab allocator. */
     1.8 +    xmem_cache_init();
     1.9 +    xmem_cache_sizes_init(max_page);
    1.10 +
    1.11      start_of_day();
    1.12  
    1.13      grant_table_init();
     2.1 --- a/xen/common/malloc.c	Wed Feb 02 13:01:09 2005 +0000
     2.2 +++ b/xen/common/malloc.c	Wed Feb 02 14:11:22 2005 +0000
     2.3 @@ -1,164 +0,0 @@
     2.4 -/* Simple allocator for Xen.  If larger than a page, simply use the
     2.5 - * page-order allocator.
     2.6 - *
     2.7 - * Copyright (C) 2005 Rusty Russell IBM Corporation
     2.8 - *
     2.9 - *  This program is free software; you can redistribute it and/or modify
    2.10 - *  it under the terms of the GNU General Public License as published by
    2.11 - *  the Free Software Foundation; either version 2 of the License, or
    2.12 - *  (at your option) any later version.
    2.13 - *
    2.14 - *  This program is distributed in the hope that it will be useful,
    2.15 - *  but WITHOUT ANY WARRANTY; without even the implied warranty of
    2.16 - *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    2.17 - *  GNU General Public License for more details.
    2.18 - *
    2.19 - *  You should have received a copy of the GNU General Public License
    2.20 - *  along with this program; if not, write to the Free Software
    2.21 - *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    2.22 - */
    2.23 -#include <xen/mm.h>
    2.24 -#include <xen/spinlock.h>
    2.25 -#include <xen/ac_timer.h>
    2.26 -
    2.27 -#define BUG_ON(x) do { if (x) BUG(); }while(0)
    2.28 -
    2.29 -static LIST_HEAD(freelist);
    2.30 -static spinlock_t freelist_lock = SPIN_LOCK_UNLOCKED;
    2.31 -
    2.32 -struct xmalloc_hdr
    2.33 -{
    2.34 -	/* Total including this hdr: negative means allocated. */
    2.35 -	long size;
    2.36 -	union {
    2.37 -		struct list_head freelist;
    2.38 -		char data[0];
    2.39 -	} u;
    2.40 -};
    2.41 -
    2.42 -static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block)
    2.43 -{
    2.44 -	size_t leftover = block - size;
    2.45 -
    2.46 -	/* If enough left to make a block, put it on free list. */
    2.47 -	if (leftover >= sizeof(struct xmalloc_hdr)) {
    2.48 -		struct xmalloc_hdr *extra;
    2.49 -
    2.50 -		extra = (void *)hdr + size;
    2.51 -		extra->size = leftover;
    2.52 -		list_add(&extra->u.freelist, &freelist);
    2.53 -	} else
    2.54 -		size = block;
    2.55 -
    2.56 -	hdr->size = -size;
    2.57 -}
    2.58 -
    2.59 -static void *xmalloc_new_page(size_t size)
    2.60 -{
    2.61 -	struct xmalloc_hdr *hdr;
    2.62 -	unsigned long flags;
    2.63 -
    2.64 -	hdr = (void *)alloc_xenheap_pages(0);
    2.65 -	if (!hdr)
    2.66 -		return NULL;
    2.67 -
    2.68 -	spin_lock_irqsave(&freelist_lock, flags);
    2.69 -	maybe_split(hdr, size, PAGE_SIZE);
    2.70 -	spin_unlock_irqrestore(&freelist_lock, flags);
    2.71 -	return hdr->u.data;
    2.72 -}
    2.73 -
    2.74 -/* Big object?  Just use page allocator. */
    2.75 -static void *xmalloc_whole_pages(size_t size)
    2.76 -{
    2.77 -	struct xmalloc_hdr *hdr;
    2.78 -	unsigned int pageorder = get_order(size);
    2.79 -
    2.80 -	hdr = (void *)alloc_xenheap_pages(pageorder);
    2.81 -	if (!hdr)
    2.82 -		return NULL;
    2.83 -
    2.84 -	hdr->size = -(1 << (pageorder + PAGE_SHIFT));
    2.85 -	return hdr->u.data;
    2.86 -}
    2.87 -
    2.88 -void *__xmalloc(size_t size, const char *file, unsigned int line)
    2.89 -{
    2.90 -	struct xmalloc_hdr *i;
    2.91 -	unsigned long flags;
    2.92 -
    2.93 -	/* Add room for header, align to unsigned long. */
    2.94 -	size += offsetof(struct xmalloc_hdr, u.data);
    2.95 -	size = ((size + sizeof(unsigned long)-1)&~(sizeof(unsigned long)-1));
    2.96 -
    2.97 -	/* Minimum size is size of freelist entry. */
    2.98 -	if (size < sizeof(*i))
    2.99 -		size = sizeof(*i);
   2.100 -
   2.101 -	/* For big allocs, give them whole pages. */
   2.102 -	if (size >= PAGE_SIZE)
   2.103 -		return xmalloc_whole_pages(size);
   2.104 -
   2.105 -	/* Search free list */
   2.106 -	spin_lock_irqsave(&freelist_lock, flags);
   2.107 -	list_for_each_entry(i, &freelist, u.freelist) {
   2.108 -		if (i->size >= size) {
   2.109 -			list_del(&i->u.freelist);
   2.110 -			maybe_split(i, size, i->size);
   2.111 -			spin_unlock_irqrestore(&freelist_lock, flags);
   2.112 -			return i->u.data;
   2.113 -		}
   2.114 -	}
   2.115 -	spin_unlock_irqrestore(&freelist_lock, flags);
   2.116 -
   2.117 -	/* Alloc a new page and return from that. */
   2.118 -	return xmalloc_new_page(size);
   2.119 -}
   2.120 -
   2.121 -void __xfree(const void *p, const char *file, unsigned int line)
   2.122 -{
   2.123 -	unsigned long flags;
   2.124 -	struct xmalloc_hdr *i, *tmp, *hdr;
   2.125 -
   2.126 -	if (!p)
   2.127 -		return;
   2.128 -
   2.129 -	hdr = container_of((void *)p, struct xmalloc_hdr, u.data);
   2.130 -
   2.131 -	/* We know hdr will be on same page. */
   2.132 -	BUG_ON(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK));
   2.133 -
   2.134 -	/* Not previously freed. */
   2.135 -	BUG_ON(hdr->size > 0);
   2.136 -	hdr->size = -hdr->size;
   2.137 -
   2.138 -	/* Big allocs free directly. */
   2.139 -	if (hdr->size >= PAGE_SIZE) {
   2.140 -		free_xenheap_pages((unsigned long)hdr, get_order(hdr->size));
   2.141 -		return;
   2.142 -	}
   2.143 -
   2.144 -	/* Merge with other free block, or put in list. */
   2.145 -	spin_lock_irqsave(&freelist_lock, flags);
   2.146 -	list_for_each_entry_safe(i, tmp, &freelist, u.freelist) {
   2.147 -		/* We follow this block?  Swallow it. */
   2.148 -		if ((void *)i + i->size == (void *)hdr) {
   2.149 -			list_del(&i->u.freelist);
   2.150 -			i->size += hdr->size;
   2.151 -			hdr = i;
   2.152 -		}
   2.153 -		/* It follows us?  Delete it and add it to us. */
   2.154 -		if ((void *)hdr + hdr->size == (void *)i) {
   2.155 -			list_del(&i->u.freelist);
   2.156 -			hdr->size += i->size;
   2.157 -		}
   2.158 -	}
   2.159 -
   2.160 -	/* Did we free entire page? */
   2.161 -	if (hdr->size == PAGE_SIZE) {
   2.162 -		BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0);
   2.163 -		free_xenheap_pages((unsigned long)hdr, 0);
   2.164 -	} else
   2.165 -		list_add(&hdr->u.freelist, &freelist);
   2.166 -	spin_unlock_irqrestore(&freelist_lock, flags);
   2.167 -}
     3.1 --- a/xen/common/page_alloc.c	Wed Feb 02 13:01:09 2005 +0000
     3.2 +++ b/xen/common/page_alloc.c	Wed Feb 02 14:11:22 2005 +0000
     3.3 @@ -403,8 +403,9 @@ unsigned long alloc_xenheap_pages(unsign
     3.4  {
     3.5      unsigned long flags;
     3.6      struct pfn_info *pg;
     3.7 -    int i;
     3.8 +    int i, attempts = 0;
     3.9  
    3.10 + retry:
    3.11      local_irq_save(flags);
    3.12      pg = alloc_heap_pages(MEMZONE_XEN, order);
    3.13      local_irq_restore(flags);
    3.14 @@ -424,7 +425,14 @@ unsigned long alloc_xenheap_pages(unsign
    3.15      return (unsigned long)page_to_virt(pg);
    3.16  
    3.17   no_memory:
    3.18 +    if ( attempts++ < 8 )
    3.19 +    {
    3.20 +        xmem_cache_reap();
    3.21 +        goto retry;
    3.22 +    }
    3.23 +
    3.24      printk("Cannot handle page request order %d!\n", order);
    3.25 +    dump_slabinfo();
    3.26      return 0;
    3.27  }
    3.28  
     4.1 --- a/xen/include/xen/domain.h	Wed Feb 02 13:01:09 2005 +0000
     4.2 +++ b/xen/include/xen/domain.h	Wed Feb 02 14:11:22 2005 +0000
     4.3 @@ -6,6 +6,8 @@
     4.4   * Arch-specifics.
     4.5   */
     4.6  
     4.7 +extern void domain_startofday(void);
     4.8 +
     4.9  extern struct domain *arch_alloc_domain_struct(void);
    4.10  
    4.11  extern void arch_free_domain_struct(struct domain *d);
     5.1 --- a/xen/include/xen/lib.h	Wed Feb 02 13:01:09 2005 +0000
     5.2 +++ b/xen/include/xen/lib.h	Wed Feb 02 14:11:22 2005 +0000
     5.3 @@ -20,18 +20,6 @@ struct domain;
     5.4  
     5.5  void cmdline_parse(char *cmdline);
     5.6  
     5.7 -/**
     5.8 - * container_of - cast a member of a structure out to the containing structure
     5.9 - *
    5.10 - * @ptr:	the pointer to the member.
    5.11 - * @type:	the type of the container struct this is embedded in.
    5.12 - * @member:	the name of the member within the struct.
    5.13 - *
    5.14 - */
    5.15 -#define container_of(ptr, type, member) ({			\
    5.16 -        const typeof( ((type *)0)->member ) *__mptr = (ptr);	\
    5.17 -        (type *)( (char *)__mptr - offsetof(type,member) );})
    5.18 -
    5.19  #define printk printf
    5.20  void printf(const char *format, ...);
    5.21  void panic(const char *format, ...);
     6.1 --- a/xen/include/xen/list.h	Wed Feb 02 13:01:09 2005 +0000
     6.2 +++ b/xen/include/xen/list.h	Wed Feb 02 14:11:22 2005 +0000
     6.3 @@ -174,17 +174,5 @@ static __inline__ void list_splice(struc
     6.4  	     pos = list_entry(pos->member.next, typeof(*pos), member),	\
     6.5  		     prefetch(pos->member.next))
     6.6  
     6.7 -/**
     6.8 - * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
     6.9 - * @pos:	the type * to use as a loop counter.
    6.10 - * @n:		another type * to use as temporary storage
    6.11 - * @head:	the head for your list.
    6.12 - * @member:	the name of the list_struct within the struct.
    6.13 - */
    6.14 -#define list_for_each_entry_safe(pos, n, head, member)			\
    6.15 -	for (pos = list_entry((head)->next, typeof(*pos), member),	\
    6.16 -		n = list_entry(pos->member.next, typeof(*pos), member);	\
    6.17 -	     &pos->member != (head); 					\
    6.18 -	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
    6.19  #endif /* _LINUX_LIST_H */
    6.20  
     7.1 --- a/xen/include/xen/slab.h	Wed Feb 02 13:01:09 2005 +0000
     7.2 +++ b/xen/include/xen/slab.h	Wed Feb 02 14:11:22 2005 +0000
     7.3 @@ -13,14 +13,45 @@
     7.4  #include <asm/slab.h>
     7.5  
     7.6  #else
     7.7 +
     7.8 +typedef struct xmem_cache_s xmem_cache_t;
     7.9 +
    7.10  #include <xen/mm.h>
    7.11  #include <xen/cache.h>
    7.12  #include <xen/types.h>
    7.13  
    7.14 -#define _xmalloc(size) __xmalloc(size, __FILE__, __LINE__)
    7.15 -#define xfree(ptr) __xfree(ptr, __FILE__, __LINE__)
    7.16 -extern void *__xmalloc(size_t size, const char *file, unsigned int line);
    7.17 -extern void __xfree(const void *p, const char *file, unsigned int line);
    7.18 +/* Flags to pass to xmem_cache_create(). */
    7.19 +/* NB. The first 3 are only valid when built with SLAB_DEBUG_SUPPORT. */
    7.20 +#define SLAB_DEBUG_INITIAL      0x00000200UL    /* Call constructor */
    7.21 +#define SLAB_RED_ZONE           0x00000400UL    /* Red zone objs in a cache */
    7.22 +#define SLAB_POISON             0x00000800UL    /* Poison objects */
    7.23 +#define SLAB_NO_REAP            0x00001000UL    /* never reap from the cache */
    7.24 +#define SLAB_HWCACHE_ALIGN      0x00002000UL    /* align obj on a cache line */
    7.25 +
    7.26 +/* Flags passed to a constructor function. */
    7.27 +#define SLAB_CTOR_CONSTRUCTOR   0x001UL /* if not set, then deconstructor */
    7.28 +#define SLAB_CTOR_ATOMIC        0x002UL /* tell cons. it can't sleep */
    7.29 +#define SLAB_CTOR_VERIFY        0x004UL /* tell cons. it's a verify call */
    7.30 +
    7.31 +extern void xmem_cache_init(void);
    7.32 +extern void xmem_cache_sizes_init(unsigned long);
    7.33 +
    7.34 +extern xmem_cache_t *xmem_find_general_cachep(size_t);
    7.35 +extern xmem_cache_t *xmem_cache_create(
    7.36 +    const char *, size_t, size_t, unsigned long,
    7.37 +    void (*)(void *, xmem_cache_t *, unsigned long),
    7.38 +    void (*)(void *, xmem_cache_t *, unsigned long));
    7.39 +extern int xmem_cache_destroy(xmem_cache_t *);
    7.40 +extern int xmem_cache_shrink(xmem_cache_t *);
    7.41 +extern void *xmem_cache_alloc(xmem_cache_t *);
    7.42 +extern void xmem_cache_free(xmem_cache_t *, void *);
    7.43 +
    7.44 +extern void *_xmalloc(size_t);
    7.45 +extern void xfree(const void *);
    7.46 +
    7.47 +extern int xmem_cache_reap(void);
    7.48 +
    7.49 +extern void dump_slabinfo();
    7.50  
    7.51  /* Nicely typesafe for you. */
    7.52  #define xmalloc(type) ((type *)_xmalloc(sizeof(type)))