ia64/xen-unstable

changeset 6446:581be7c5e9e4

The patch removes broken, and very complicated malloc in
favour of much simpler (and working) Xen's allocator
(xmalloc by Rusty).

Signed-off-by: Grzegorz Milos <gm281@cam.ac.uk>
author kaf24@firebug.cl.cam.ac.uk
date Fri Aug 26 10:35:36 2005 +0000 (2005-08-26)
parents 83c73802f02a
children a43cc4e06814
files extras/mini-os/include/lib.h extras/mini-os/include/mm.h extras/mini-os/include/types.h extras/mini-os/include/xmalloc.h extras/mini-os/lib/xmalloc.c extras/mini-os/mm.c
line diff
     1.1 --- a/extras/mini-os/include/lib.h	Fri Aug 26 09:29:54 2005 +0000
     1.2 +++ b/extras/mini-os/include/lib.h	Fri Aug 26 10:35:36 2005 +0000
     1.3 @@ -79,36 +79,4 @@ char  *strchr(const char *s, int c);
     1.4  char  *strstr(const char *s1, const char *s2);
     1.5  
     1.6  
     1.7 -/* dlmalloc functions */
     1.8 -struct mallinfo {
     1.9 -  int arena;    /* non-mmapped space allocated from system */
    1.10 -  int ordblks;  /* number of free chunks */
    1.11 -  int smblks;   /* number of fastbin blocks */
    1.12 -  int hblks;    /* number of mmapped regions */
    1.13 -  int hblkhd;   /* space in mmapped regions */
    1.14 -  int usmblks;  /* maximum total allocated space */
    1.15 -  int fsmblks;  /* space available in freed fastbin blocks */
    1.16 -  int uordblks; /* total allocated space */
    1.17 -  int fordblks; /* total free space */
    1.18 -  int keepcost; /* top-most, releasable (via malloc_trim) space */
    1.19 -};
    1.20 -
    1.21 -void *malloc(size_t n);
    1.22 -void *calloc(size_t n_elements, size_t element_size);
    1.23 -void  free(void* p);
    1.24 -void *realloc(void* p, size_t n);
    1.25 -void *memalign(size_t alignment, size_t n);
    1.26 -void *valloc(size_t n);
    1.27 -struct mallinfo mallinfo(void);
    1.28 -int  mallopt(int parameter_number, int parameter_value);
    1.29 -
    1.30 -void **independent_calloc(size_t n_elements, size_t size, void* chunks[]);
    1.31 -void **independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
    1.32 -void *pvalloc(size_t n);
    1.33 -void cfree(void* p);
    1.34 -int malloc_trim(size_t pad);
    1.35 -size_t malloc_usable_size(void* p);
    1.36 -void malloc_stats(void);
    1.37 -
    1.38 -
    1.39  #endif /* _LIB_H_ */
     2.1 --- a/extras/mini-os/include/mm.h	Fri Aug 26 09:29:54 2005 +0000
     2.2 +++ b/extras/mini-os/include/mm.h	Fri Aug 26 10:35:36 2005 +0000
     2.3 @@ -126,6 +126,18 @@ static __inline__ unsigned long machine_
     2.4  
     2.5  void init_mm(void);
     2.6  unsigned long alloc_pages(int order);
     2.7 -int is_mfn_mapped(unsigned long mfn);
     2.8 +#define alloc_page()    alloc_pages(0);
     2.9 +void free_pages(void *pointer, int order);
    2.10 +//int is_mfn_mapped(unsigned long mfn);
    2.11 +
    2.12 +static __inline__ int get_order(unsigned long size)
    2.13 +{
    2.14 +    int order;
    2.15 +    size = (size-1) >> PAGE_SHIFT;
    2.16 +    for ( order = 0; size; order++ )
    2.17 +        size >>= 1;
    2.18 +    return order;
    2.19 +}
    2.20 +
    2.21  
    2.22  #endif /* _MM_H_ */
     3.1 --- a/extras/mini-os/include/types.h	Fri Aug 26 09:29:54 2005 +0000
     3.2 +++ b/extras/mini-os/include/types.h	Fri Aug 26 10:35:36 2005 +0000
     3.3 @@ -49,4 +49,6 @@ typedef long                quad_t;
     3.4  typedef unsigned long       u_quad_t;
     3.5  typedef unsigned long       uintptr_t;
     3.6  #endif
     3.7 +
     3.8 +#define UINT_MAX            (~0U)
     3.9  #endif /* _TYPES_H_ */
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/extras/mini-os/include/xmalloc.h	Fri Aug 26 10:35:36 2005 +0000
     4.3 @@ -0,0 +1,23 @@
     4.4 +#ifndef __XMALLOC_H__
     4.5 +#define __XMALLOC_H__
     4.6 +
     4.7 +/* Allocate space for typed object. */
     4.8 +#define xmalloc(_type) ((_type *)_xmalloc(sizeof(_type), __alignof__(_type)))
     4.9 +
    4.10 +/* Allocate space for array of typed objects. */
    4.11 +#define xmalloc_array(_type, _num) ((_type *)_xmalloc_array(sizeof(_type), __alignof__(_type), _num))
    4.12 +
    4.13 +/* Free any of the above. */
    4.14 +extern void xfree(const void *);
    4.15 +
    4.16 +/* Underlying functions */
    4.17 +extern void *_xmalloc(size_t size, size_t align);
    4.18 +static inline void *_xmalloc_array(size_t size, size_t align, size_t num)
    4.19 +{
    4.20 +	/* Check for overflow. */
    4.21 +	if (size && num > UINT_MAX / size)
    4.22 +		return NULL;
    4.23 + 	return _xmalloc(size * num, align);
    4.24 +}
    4.25 +
    4.26 +#endif /* __XMALLOC_H__ */
     5.1 --- a/extras/mini-os/lib/malloc.c	Fri Aug 26 09:29:54 2005 +0000
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,5697 +0,0 @@
     5.4 -/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
     5.5 - ****************************************************************************
     5.6 - * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
     5.7 - ****************************************************************************
     5.8 - *
     5.9 - *        File: malloc.c
    5.10 - *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
    5.11 - *     Changes: 
    5.12 - *              
    5.13 - *        Date: Aug 2003
    5.14 - * 
    5.15 - * Environment: Xen Minimal OS
    5.16 - * Description: Library functions, maloc at al
    5.17 - *
    5.18 - ****************************************************************************
    5.19 - * $Id: c-insert.c,v 1.7 2002/11/08 16:04:34 rn Exp $
    5.20 - ****************************************************************************
    5.21 - */
    5.22 -
    5.23 -#include <os.h>
    5.24 -#include <mm.h>
    5.25 -#include <types.h>
    5.26 -#include <lib.h>
    5.27 -
    5.28 -/* standard compile option */
    5.29 -#define HAVE_MEMCOPY                1
    5.30 -#define USE_MEMCPY                  1
    5.31 -#undef  HAVE_MMAP
    5.32 -#undef  MMAP_CLEARS
    5.33 -#undef  HAVE_MREMAP
    5.34 -#define malloc_getpagesize          PAGE_SIZE
    5.35 -#undef  HAVE_USR_INCLUDE_MALLOC_H   
    5.36 -#define LACKS_UNISTD_H              1
    5.37 -#define LACKS_SYS_PARAM_H           1
    5.38 -#define LACKS_SYS_MMAN_H            1
    5.39 -#define LACKS_FCNTL_H               1
    5.40 -
    5.41 -
    5.42 -/* page allocator interface */
    5.43 -#define MORECORE             more_core
    5.44 -#define MORECORE_CONTIGUOUS  0
    5.45 -#define MORECORE_FAILURE     0
    5.46 -#define MORECORE_CANNOT_TRIM 1
    5.47 -
    5.48 -static void *more_core(size_t n)
    5.49 -{
    5.50 -    static void *last;
    5.51 -    unsigned long order, num_pages;
    5.52 -    void *ret;
    5.53 -
    5.54 -    if (n == 0)
    5.55 -        return last;
    5.56 -    
    5.57 -    n = PFN_UP(n);
    5.58 -    for ( order = 0; n > 1; order++ )
    5.59 -        n >>= 1;
    5.60 -    ret = (void *)alloc_pages(order);
    5.61 -
    5.62 -    /* work out pointer to end of chunk */
    5.63 -    if ( ret )
    5.64 -    {
    5.65 -        num_pages = 1 << order;
    5.66 -        last = (char *)ret + (num_pages * PAGE_SIZE);
    5.67 -    }
    5.68 -
    5.69 -    return ret;      
    5.70 -}
    5.71 -
    5.72 -/* other options commented out below */
    5.73 -#define __STD_C     1
    5.74 -#define Void_t      void
    5.75 -#define assert(x) ((void)0)
    5.76 -
    5.77 -#define CHUNK_SIZE_T unsigned long
    5.78 -#define PTR_UINT unsigned long
    5.79 -#define INTERNAL_SIZE_T size_t
    5.80 -#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
    5.81 -#define MALLOC_ALIGNMENT       (2 * SIZE_SZ)
    5.82 -#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
    5.83 -#define TRIM_FASTBINS  0
    5.84 -
    5.85 -#define M_MXFAST            1    
    5.86 -#define DEFAULT_MXFAST     64
    5.87 -#define M_TRIM_THRESHOLD       -1
    5.88 -#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
    5.89 -#define M_TOP_PAD              -2
    5.90 -#define DEFAULT_TOP_PAD        (0)
    5.91 -#define M_MMAP_THRESHOLD      -3
    5.92 -#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
    5.93 -#define M_MMAP_MAX             -4
    5.94 -#define DEFAULT_MMAP_MAX       (0)
    5.95 -#define MALLOC_FAILURE_ACTION   printf("malloc failure\n")
    5.96 -
    5.97 -#define cALLOc      public_cALLOc
    5.98 -#define fREe        public_fREe
    5.99 -#define cFREe       public_cFREe
   5.100 -#define mALLOc      public_mALLOc
   5.101 -#define mEMALIGn    public_mEMALIGn
   5.102 -#define rEALLOc     public_rEALLOc
   5.103 -#define vALLOc      public_vALLOc
   5.104 -#define pVALLOc     public_pVALLOc
   5.105 -#define mALLINFo    public_mALLINFo
   5.106 -#define mALLOPt     public_mALLOPt
   5.107 -#define mTRIm       public_mTRIm
   5.108 -#define mSTATs      public_mSTATs
   5.109 -#define mUSABLe     public_mUSABLe
   5.110 -#define iCALLOc     public_iCALLOc
   5.111 -#define iCOMALLOc   public_iCOMALLOc
   5.112 -
   5.113 -#define public_cALLOc    calloc
   5.114 -#define public_fREe      free
   5.115 -#define public_cFREe     cfree
   5.116 -#define public_mALLOc    malloc
   5.117 -#define public_mEMALIGn  memalign
   5.118 -#define public_rEALLOc   realloc
   5.119 -#define public_vALLOc    valloc
   5.120 -#define public_pVALLOc   pvalloc
   5.121 -#define public_mALLINFo  mallinfo
   5.122 -#define public_mALLOPt   mallopt
   5.123 -#define public_mTRIm     malloc_trim
   5.124 -#define public_mSTATs    malloc_stats
   5.125 -#define public_mUSABLe   malloc_usable_size
   5.126 -#define public_iCALLOc   independent_calloc
   5.127 -#define public_iCOMALLOc independent_comalloc
   5.128 -
   5.129 -
   5.130 -/*
   5.131 -  This is a version (aka dlmalloc) of malloc/free/realloc written by
   5.132 -  Doug Lea and released to the public domain.  Use, modify, and
   5.133 -  redistribute this code without permission or acknowledgement in any
   5.134 -  way you wish.  Send questions, comments, complaints, performance
   5.135 -  data, etc to dl@cs.oswego.edu
   5.136 -
   5.137 -* VERSION 2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
   5.138 -
   5.139 -   Note: There may be an updated version of this malloc obtainable at
   5.140 -           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
   5.141 -         Check before installing!
   5.142 -
   5.143 -* Quickstart
   5.144 -
   5.145 -  This library is all in one file to simplify the most common usage:
   5.146 -  ftp it, compile it (-O), and link it into another program. All
   5.147 -  of the compile-time options default to reasonable values for use on
   5.148 -  most unix platforms. Compile -DWIN32 for reasonable defaults on windows.
   5.149 -  You might later want to step through various compile-time and dynamic
   5.150 -  tuning options.
   5.151 -
   5.152 -  For convenience, an include file for code using this malloc is at:
   5.153 -     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.7.1.h
   5.154 -  You don't really need this .h file unless you call functions not
   5.155 -  defined in your system include files.  The .h file contains only the
   5.156 -  excerpts from this file needed for using this malloc on ANSI C/C++
   5.157 -  systems, so long as you haven't changed compile-time options about
   5.158 -  naming and tuning parameters.  If you do, then you can create your
   5.159 -  own malloc.h that does include all settings by cutting at the point
   5.160 -  indicated below.
   5.161 -
   5.162 -* Why use this malloc?
   5.163 -
   5.164 -  This is not the fastest, most space-conserving, most portable, or
   5.165 -  most tunable malloc ever written. However it is among the fastest
   5.166 -  while also being among the most space-conserving, portable and tunable.
   5.167 -  Consistent balance across these factors results in a good general-purpose
   5.168 -  allocator for malloc-intensive programs.
   5.169 -
   5.170 -  The main properties of the algorithms are:
   5.171 -  * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
   5.172 -    with ties normally decided via FIFO (i.e. least recently used).
   5.173 -  * For small (<= 64 bytes by default) requests, it is a caching
   5.174 -    allocator, that maintains pools of quickly recycled chunks.
   5.175 -  * In between, and for combinations of large and small requests, it does
   5.176 -    the best it can trying to meet both goals at once.
   5.177 -  * For very large requests (>= 128KB by default), it relies on system
   5.178 -    memory mapping facilities, if supported.
   5.179 -
   5.180 -  For a longer but slightly out of date high-level description, see
   5.181 -     http://gee.cs.oswego.edu/dl/html/malloc.html
   5.182 -
   5.183 -  You may already by default be using a C library containing a malloc
   5.184 -  that is  based on some version of this malloc (for example in
   5.185 -  linux). You might still want to use the one in this file in order to
   5.186 -  customize settings or to avoid overheads associated with library
   5.187 -  versions.
   5.188 -
   5.189 -* Contents, described in more detail in "description of public routines" below.
   5.190 -
   5.191 -  Standard (ANSI/SVID/...)  functions:
   5.192 -    malloc(size_t n);
   5.193 -    calloc(size_t n_elements, size_t element_size);
   5.194 -    free(Void_t* p);
   5.195 -    realloc(Void_t* p, size_t n);
   5.196 -    memalign(size_t alignment, size_t n);
   5.197 -    valloc(size_t n);
   5.198 -    mallinfo()
   5.199 -    mallopt(int parameter_number, int parameter_value)
   5.200 -
   5.201 -  Additional functions:
   5.202 -    independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
   5.203 -    independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
   5.204 -    pvalloc(size_t n);
   5.205 -    cfree(Void_t* p);
   5.206 -    malloc_trim(size_t pad);
   5.207 -    malloc_usable_size(Void_t* p);
   5.208 -    malloc_stats();
   5.209 -
   5.210 -* Vital statistics:
   5.211 -
   5.212 -  Supported pointer representation:       4 or 8 bytes
   5.213 -  Supported size_t  representation:       4 or 8 bytes 
   5.214 -       Note that size_t is allowed to be 4 bytes even if pointers are 8.
   5.215 -       You can adjust this by defining INTERNAL_SIZE_T
   5.216 -
   5.217 -  Alignment:                              2 * sizeof(size_t) (default)
   5.218 -       (i.e., 8 byte alignment with 4byte size_t). This suffices for
   5.219 -       nearly all current machines and C compilers. However, you can
   5.220 -       define MALLOC_ALIGNMENT to be wider than this if necessary.
   5.221 -
   5.222 -  Minimum overhead per allocated chunk:   4 or 8 bytes
   5.223 -       Each malloced chunk has a hidden word of overhead holding size
   5.224 -       and status information.
   5.225 -
   5.226 -  Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
   5.227 -                          8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
   5.228 -
   5.229 -       When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
   5.230 -       ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
   5.231 -       needed; 4 (8) for a trailing size field and 8 (16) bytes for
   5.232 -       free list pointers. Thus, the minimum allocatable size is
   5.233 -       16/24/32 bytes.
   5.234 -
   5.235 -       Even a request for zero bytes (i.e., malloc(0)) returns a
   5.236 -       pointer to something of the minimum allocatable size.
   5.237 -
   5.238 -       The maximum overhead wastage (i.e., number of extra bytes
   5.239 -       allocated than were requested in malloc) is less than or equal
   5.240 -       to the minimum size, except for requests >= mmap_threshold that
   5.241 -       are serviced via mmap(), where the worst case wastage is 2 *
   5.242 -       sizeof(size_t) bytes plus the remainder from a system page (the
   5.243 -       minimal mmap unit); typically 4096 or 8192 bytes.
   5.244 -
   5.245 -  Maximum allocated size:  4-byte size_t: 2^32 minus about two pages 
   5.246 -                           8-byte size_t: 2^64 minus about two pages
   5.247 -
   5.248 -       It is assumed that (possibly signed) size_t values suffice to
   5.249 -       represent chunk sizes. `Possibly signed' is due to the fact
   5.250 -       that `size_t' may be defined on a system as either a signed or
   5.251 -       an unsigned type. The ISO C standard says that it must be
   5.252 -       unsigned, but a few systems are known not to adhere to this.
   5.253 -       Additionally, even when size_t is unsigned, sbrk (which is by
   5.254 -       default used to obtain memory from system) accepts signed
   5.255 -       arguments, and may not be able to handle size_t-wide arguments
   5.256 -       with negative sign bit.  Generally, values that would
   5.257 -       appear as negative after accounting for overhead and alignment
   5.258 -       are supported only via mmap(), which does not have this
   5.259 -       limitation.
   5.260 -
   5.261 -       Requests for sizes outside the allowed range will perform an optional
   5.262 -       failure action and then return null. (Requests may also
   5.263 -       also fail because a system is out of memory.)
   5.264 -
   5.265 -  Thread-safety: NOT thread-safe unless USE_MALLOC_LOCK defined
   5.266 -
   5.267 -       When USE_MALLOC_LOCK is defined, wrappers are created to
   5.268 -       surround every public call with either a pthread mutex or
   5.269 -       a win32 spinlock (depending on WIN32). This is not
   5.270 -       especially fast, and can be a major bottleneck.
   5.271 -       It is designed only to provide minimal protection
   5.272 -       in concurrent environments, and to provide a basis for
   5.273 -       extensions.  If you are using malloc in a concurrent program,
   5.274 -       you would be far better off obtaining ptmalloc, which is
   5.275 -       derived from a version of this malloc, and is well-tuned for
   5.276 -       concurrent programs. (See http://www.malloc.de) Note that
   5.277 -       even when USE_MALLOC_LOCK is defined, you can can guarantee
   5.278 -       full thread-safety only if no threads acquire memory through 
   5.279 -       direct calls to MORECORE or other system-level allocators.
   5.280 -
   5.281 -  Compliance: I believe it is compliant with the 1997 Single Unix Specification
   5.282 -       (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably 
   5.283 -       others as well.
   5.284 -
   5.285 -* Synopsis of compile-time options:
   5.286 -
   5.287 -    People have reported using previous versions of this malloc on all
   5.288 -    versions of Unix, sometimes by tweaking some of the defines
   5.289 -    below. It has been tested most extensively on Solaris and
   5.290 -    Linux. It is also reported to work on WIN32 platforms.
   5.291 -    People also report using it in stand-alone embedded systems.
   5.292 -
   5.293 -    The implementation is in straight, hand-tuned ANSI C.  It is not
   5.294 -    at all modular. (Sorry!)  It uses a lot of macros.  To be at all
   5.295 -    usable, this code should be compiled using an optimizing compiler
   5.296 -    (for example gcc -O3) that can simplify expressions and control
   5.297 -    paths. (FAQ: some macros import variables as arguments rather than
   5.298 -    declare locals because people reported that some debuggers
   5.299 -    otherwise get confused.)
   5.300 -
   5.301 -    OPTION                     DEFAULT VALUE
   5.302 -
   5.303 -    Compilation Environment options:
   5.304 -
   5.305 -    __STD_C                    derived from C compiler defines
   5.306 -    WIN32                      NOT defined
   5.307 -    HAVE_MEMCPY                defined
   5.308 -    USE_MEMCPY                 1 if HAVE_MEMCPY is defined
   5.309 -    HAVE_MMAP                  defined as 1 
   5.310 -    MMAP_CLEARS                1
   5.311 -    HAVE_MREMAP                0 unless linux defined
   5.312 -    malloc_getpagesize         derived from system #includes, or 4096 if not
   5.313 -    HAVE_USR_INCLUDE_MALLOC_H  NOT defined
   5.314 -    LACKS_UNISTD_H             NOT defined unless WIN32
   5.315 -    LACKS_SYS_PARAM_H          NOT defined unless WIN32
   5.316 -    LACKS_SYS_MMAN_H           NOT defined unless WIN32
   5.317 -    LACKS_FCNTL_H              NOT defined
   5.318 -
   5.319 -    Changing default word sizes:
   5.320 -
   5.321 -    INTERNAL_SIZE_T            size_t
   5.322 -    MALLOC_ALIGNMENT           2 * sizeof(INTERNAL_SIZE_T)
   5.323 -    PTR_UINT                   unsigned long
   5.324 -    CHUNK_SIZE_T               unsigned long
   5.325 -
   5.326 -    Configuration and functionality options:
   5.327 -
   5.328 -    USE_DL_PREFIX              NOT defined
   5.329 -    USE_PUBLIC_MALLOC_WRAPPERS NOT defined
   5.330 -    USE_MALLOC_LOCK            NOT defined
   5.331 -    DEBUG                      NOT defined
   5.332 -    REALLOC_ZERO_BYTES_FREES   NOT defined
   5.333 -    MALLOC_FAILURE_ACTION      errno = ENOMEM, if __STD_C defined, else no-op
   5.334 -    TRIM_FASTBINS              0
   5.335 -    FIRST_SORTED_BIN_SIZE      512
   5.336 -
   5.337 -    Options for customizing MORECORE:
   5.338 -
   5.339 -    MORECORE                   sbrk
   5.340 -    MORECORE_CONTIGUOUS        1 
   5.341 -    MORECORE_CANNOT_TRIM       NOT defined
   5.342 -    MMAP_AS_MORECORE_SIZE      (1024 * 1024) 
   5.343 -
   5.344 -    Tuning options that are also dynamically changeable via mallopt:
   5.345 -
   5.346 -    DEFAULT_MXFAST             64
   5.347 -    DEFAULT_TRIM_THRESHOLD     256 * 1024
   5.348 -    DEFAULT_TOP_PAD            0
   5.349 -    DEFAULT_MMAP_THRESHOLD     256 * 1024
   5.350 -    DEFAULT_MMAP_MAX           65536
   5.351 -
   5.352 -    There are several other #defined constants and macros that you
   5.353 -    probably don't want to touch unless you are extending or adapting malloc.
   5.354 -*/
   5.355 -
   5.356 -/* RN: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
   5.357 -#if 0
   5.358 -
   5.359 -/*
   5.360 -  WIN32 sets up defaults for MS environment and compilers.
   5.361 -  Otherwise defaults are for unix.
   5.362 -*/
   5.363 -
   5.364 -/* #define WIN32 */
   5.365 -
   5.366 -#ifdef WIN32
   5.367 -
   5.368 -#define WIN32_LEAN_AND_MEAN
   5.369 -#include <windows.h>
   5.370 -
   5.371 -/* Win32 doesn't supply or need the following headers */
   5.372 -#define LACKS_UNISTD_H
   5.373 -#define LACKS_SYS_PARAM_H
   5.374 -#define LACKS_SYS_MMAN_H
   5.375 -
   5.376 -/* Use the supplied emulation of sbrk */
   5.377 -#define MORECORE sbrk
   5.378 -#define MORECORE_CONTIGUOUS 1
   5.379 -#define MORECORE_FAILURE    ((void*)(-1))
   5.380 -
   5.381 -/* Use the supplied emulation of mmap and munmap */
   5.382 -#define HAVE_MMAP 1
   5.383 -#define MUNMAP_FAILURE  (-1)
   5.384 -#define MMAP_CLEARS 1
   5.385 -
   5.386 -/* These values don't really matter in windows mmap emulation */
   5.387 -#define MAP_PRIVATE 1
   5.388 -#define MAP_ANONYMOUS 2
   5.389 -#define PROT_READ 1
   5.390 -#define PROT_WRITE 2
   5.391 -
   5.392 -/* Emulation functions defined at the end of this file */
   5.393 -
   5.394 -/* If USE_MALLOC_LOCK, use supplied critical-section-based lock functions */
   5.395 -#ifdef USE_MALLOC_LOCK
   5.396 -static int slwait(int *sl);
   5.397 -static int slrelease(int *sl);
   5.398 -#endif
   5.399 -
   5.400 -static long getpagesize(void);
   5.401 -static long getregionsize(void);
   5.402 -static void *sbrk(long size);
   5.403 -static void *mmap(void *ptr, long size, long prot, long type, long handle, long arg);
   5.404 -static long munmap(void *ptr, long size);
   5.405 -
   5.406 -static void vminfo (unsigned long*free, unsigned long*reserved, unsigned long*committed);
   5.407 -static int cpuinfo (int whole, unsigned long*kernel, unsigned long*user);
   5.408 -
   5.409 -#endif
   5.410 -
   5.411 -/*
   5.412 -  __STD_C should be nonzero if using ANSI-standard C compiler, a C++
   5.413 -  compiler, or a C compiler sufficiently close to ANSI to get away
   5.414 -  with it.
   5.415 -*/
   5.416 -
   5.417 -#ifndef __STD_C
   5.418 -#if defined(__STDC__) || defined(_cplusplus)
   5.419 -#define __STD_C     1
   5.420 -#else
   5.421 -#define __STD_C     0
   5.422 -#endif 
   5.423 -#endif /*__STD_C*/
   5.424 -
   5.425 -
   5.426 -/*
   5.427 -  Void_t* is the pointer type that malloc should say it returns
   5.428 -*/
   5.429 -
   5.430 -#ifndef Void_t
   5.431 -#if (__STD_C || defined(WIN32))
   5.432 -#define Void_t      void
   5.433 -#else
   5.434 -#define Void_t      char
   5.435 -#endif
   5.436 -#endif /*Void_t*/
   5.437 -
   5.438 -#if __STD_C
   5.439 -#include <stddef.h>   /* for size_t */
   5.440 -#else
   5.441 -#include <sys/types.h>
   5.442 -#endif
   5.443 -
   5.444 -#ifdef __cplusplus
   5.445 -extern "C" {
   5.446 -#endif
   5.447 -
   5.448 -/* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
   5.449 -
   5.450 -/* #define  LACKS_UNISTD_H */
   5.451 -
   5.452 -#ifndef LACKS_UNISTD_H
   5.453 -#include <unistd.h>
   5.454 -#endif
   5.455 -
   5.456 -/* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
   5.457 -
   5.458 -/* #define  LACKS_SYS_PARAM_H */
   5.459 -
   5.460 -
   5.461 -#include <stdio.h>    /* needed for malloc_stats */
   5.462 -#include <errno.h>    /* needed for optional MALLOC_FAILURE_ACTION */
   5.463 -
   5.464 -
   5.465 -/*
   5.466 -  Debugging:
   5.467 -
   5.468 -  Because freed chunks may be overwritten with bookkeeping fields, this
   5.469 -  malloc will often die when freed memory is overwritten by user
   5.470 -  programs.  This can be very effective (albeit in an annoying way)
   5.471 -  in helping track down dangling pointers.
   5.472 -
   5.473 -  If you compile with -DDEBUG, a number of assertion checks are
   5.474 -  enabled that will catch more memory errors. You probably won't be
   5.475 -  able to make much sense of the actual assertion errors, but they
   5.476 -  should help you locate incorrectly overwritten memory.  The
   5.477 -  checking is fairly extensive, and will slow down execution
   5.478 -  noticeably. Calling malloc_stats or mallinfo with DEBUG set will
   5.479 -  attempt to check every non-mmapped allocated and free chunk in the
   5.480 -  course of computing the summmaries. (By nature, mmapped regions
   5.481 -  cannot be checked very much automatically.)
   5.482 -
   5.483 -  Setting DEBUG may also be helpful if you are trying to modify
   5.484 -  this code. The assertions in the check routines spell out in more
   5.485 -  detail the assumptions and invariants underlying the algorithms.
   5.486 -
   5.487 -  Setting DEBUG does NOT provide an automated mechanism for checking
   5.488 -  that all accesses to malloced memory stay within their
   5.489 -  bounds. However, there are several add-ons and adaptations of this
   5.490 -  or other mallocs available that do this.
   5.491 -*/
   5.492 -
   5.493 -#if DEBUG
   5.494 -#include <assert.h>
   5.495 -#else
   5.496 -#define assert(x) ((void)0)
   5.497 -#endif
   5.498 -
   5.499 -/*
   5.500 -  The unsigned integer type used for comparing any two chunk sizes.
   5.501 -  This should be at least as wide as size_t, but should not be signed.
   5.502 -*/
   5.503 -
   5.504 -#ifndef CHUNK_SIZE_T
   5.505 -#define CHUNK_SIZE_T unsigned long
   5.506 -#endif
   5.507 -
   5.508 -/* 
   5.509 -  The unsigned integer type used to hold addresses when they are are
   5.510 -  manipulated as integers. Except that it is not defined on all
   5.511 -  systems, intptr_t would suffice.
   5.512 -*/
   5.513 -#ifndef PTR_UINT
   5.514 -#define PTR_UINT unsigned long
   5.515 -#endif
   5.516 -
   5.517 -
   5.518 -/*
   5.519 -  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
   5.520 -  of chunk sizes.
   5.521 -
   5.522 -  The default version is the same as size_t.
   5.523 -
   5.524 -  While not strictly necessary, it is best to define this as an
   5.525 -  unsigned type, even if size_t is a signed type. This may avoid some
   5.526 -  artificial size limitations on some systems.
   5.527 -
   5.528 -  On a 64-bit machine, you may be able to reduce malloc overhead by
   5.529 -  defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
   5.530 -  expense of not being able to handle more than 2^32 of malloced
   5.531 -  space. If this limitation is acceptable, you are encouraged to set
   5.532 -  this unless you are on a platform requiring 16byte alignments. In
   5.533 -  this case the alignment requirements turn out to negate any
   5.534 -  potential advantages of decreasing size_t word size.
   5.535 -
   5.536 -  Implementors: Beware of the possible combinations of:
   5.537 -     - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
   5.538 -       and might be the same width as int or as long
   5.539 -     - size_t might have different width and signedness as INTERNAL_SIZE_T
   5.540 -     - int and long might be 32 or 64 bits, and might be the same width
   5.541 -  To deal with this, most comparisons and difference computations
   5.542 -  among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
   5.543 -  aware of the fact that casting an unsigned int to a wider long does
   5.544 -  not sign-extend. (This also makes checking for negative numbers
   5.545 -  awkward.) Some of these casts result in harmless compiler warnings
   5.546 -  on some systems.
   5.547 -*/
   5.548 -
   5.549 -#ifndef INTERNAL_SIZE_T
   5.550 -#define INTERNAL_SIZE_T size_t
   5.551 -#endif
   5.552 -
   5.553 -/* The corresponding word size */
   5.554 -#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
   5.555 -
   5.556 -
   5.557 -
   5.558 -/*
   5.559 -  MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
   5.560 -  It must be a power of two at least 2 * SIZE_SZ, even on machines
   5.561 -  for which smaller alignments would suffice. It may be defined as
   5.562 -  larger than this though. Note however that code and data structures
   5.563 -  are optimized for the case of 8-byte alignment.
   5.564 -*/
   5.565 -
   5.566 -
   5.567 -#ifndef MALLOC_ALIGNMENT
   5.568 -#define MALLOC_ALIGNMENT       (2 * SIZE_SZ)
   5.569 -#endif
   5.570 -
   5.571 -/* The corresponding bit mask value */
   5.572 -#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
   5.573 -
   5.574 -
   5.575 -
   5.576 -/*
   5.577 -  REALLOC_ZERO_BYTES_FREES should be set if a call to
   5.578 -  realloc with zero bytes should be the same as a call to free.
   5.579 -  Some people think it should. Otherwise, since this malloc
   5.580 -  returns a unique pointer for malloc(0), so does realloc(p, 0).
   5.581 -*/
   5.582 -
   5.583 -/*   #define REALLOC_ZERO_BYTES_FREES */
   5.584 -
   5.585 -/*
   5.586 -  TRIM_FASTBINS controls whether free() of a very small chunk can
   5.587 -  immediately lead to trimming. Setting to true (1) can reduce memory
   5.588 -  footprint, but will almost always slow down programs that use a lot
   5.589 -  of small chunks.
   5.590 -
   5.591 -  Define this only if you are willing to give up some speed to more
   5.592 -  aggressively reduce system-level memory footprint when releasing
   5.593 -  memory in programs that use many small chunks.  You can get
   5.594 -  essentially the same effect by setting MXFAST to 0, but this can
   5.595 -  lead to even greater slowdowns in programs using many small chunks.
   5.596 -  TRIM_FASTBINS is an in-between compile-time option, that disables
   5.597 -  only those chunks bordering topmost memory from being placed in
   5.598 -  fastbins.
   5.599 -*/
   5.600 -
   5.601 -#ifndef TRIM_FASTBINS
   5.602 -#define TRIM_FASTBINS  0
   5.603 -#endif
   5.604 -
   5.605 -
   5.606 -/*
   5.607 -  USE_DL_PREFIX will prefix all public routines with the string 'dl'.
   5.608 -  This is necessary when you only want to use this malloc in one part 
   5.609 -  of a program, using your regular system malloc elsewhere.
   5.610 -*/
   5.611 -
   5.612 -/* #define USE_DL_PREFIX */
   5.613 -
   5.614 -
   5.615 -/*
   5.616 -  USE_MALLOC_LOCK causes wrapper functions to surround each
   5.617 -  callable routine with pthread mutex lock/unlock.
   5.618 -
   5.619 -  USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
   5.620 -*/
   5.621 -
   5.622 -
   5.623 -/* #define USE_MALLOC_LOCK */
   5.624 -
   5.625 -
   5.626 -/*
   5.627 -  If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
   5.628 -  actually a wrapper function that first calls MALLOC_PREACTION, then
   5.629 -  calls the internal routine, and follows it with
   5.630 -  MALLOC_POSTACTION. This is needed for locking, but you can also use
   5.631 -  this, without USE_MALLOC_LOCK, for purposes of interception,
   5.632 -  instrumentation, etc. It is a sad fact that using wrappers often
   5.633 -  noticeably degrades performance of malloc-intensive programs.
   5.634 -*/
   5.635 -
   5.636 -#ifdef USE_MALLOC_LOCK
   5.637 -#define USE_PUBLIC_MALLOC_WRAPPERS
   5.638 -#else
   5.639 -/* #define USE_PUBLIC_MALLOC_WRAPPERS */
   5.640 -#endif
   5.641 -
   5.642 -
   5.643 -/* 
   5.644 -   Two-phase name translation.
   5.645 -   All of the actual routines are given mangled names.
   5.646 -   When wrappers are used, they become the public callable versions.
   5.647 -   When DL_PREFIX is used, the callable names are prefixed.
   5.648 -*/
   5.649 -
   5.650 -#ifndef USE_PUBLIC_MALLOC_WRAPPERS
   5.651 -#define cALLOc      public_cALLOc
   5.652 -#define fREe        public_fREe
   5.653 -#define cFREe       public_cFREe
   5.654 -#define mALLOc      public_mALLOc
   5.655 -#define mEMALIGn    public_mEMALIGn
   5.656 -#define rEALLOc     public_rEALLOc
   5.657 -#define vALLOc      public_vALLOc
   5.658 -#define pVALLOc     public_pVALLOc
   5.659 -#define mALLINFo    public_mALLINFo
   5.660 -#define mALLOPt     public_mALLOPt
   5.661 -#define mTRIm       public_mTRIm
   5.662 -#define mSTATs      public_mSTATs
   5.663 -#define mUSABLe     public_mUSABLe
   5.664 -#define iCALLOc     public_iCALLOc
   5.665 -#define iCOMALLOc   public_iCOMALLOc
   5.666 -#endif
   5.667 -
   5.668 -#ifdef USE_DL_PREFIX
   5.669 -#define public_cALLOc    dlcalloc
   5.670 -#define public_fREe      dlfree
   5.671 -#define public_cFREe     dlcfree
   5.672 -#define public_mALLOc    dlmalloc
   5.673 -#define public_mEMALIGn  dlmemalign
   5.674 -#define public_rEALLOc   dlrealloc
   5.675 -#define public_vALLOc    dlvalloc
   5.676 -#define public_pVALLOc   dlpvalloc
   5.677 -#define public_mALLINFo  dlmallinfo
   5.678 -#define public_mALLOPt   dlmallopt
   5.679 -#define public_mTRIm     dlmalloc_trim
   5.680 -#define public_mSTATs    dlmalloc_stats
   5.681 -#define public_mUSABLe   dlmalloc_usable_size
   5.682 -#define public_iCALLOc   dlindependent_calloc
   5.683 -#define public_iCOMALLOc dlindependent_comalloc
   5.684 -#else /* USE_DL_PREFIX */
   5.685 -#define public_cALLOc    calloc
   5.686 -#define public_fREe      free
   5.687 -#define public_cFREe     cfree
   5.688 -#define public_mALLOc    malloc
   5.689 -#define public_mEMALIGn  memalign
   5.690 -#define public_rEALLOc   realloc
   5.691 -#define public_vALLOc    valloc
   5.692 -#define public_pVALLOc   pvalloc
   5.693 -#define public_mALLINFo  mallinfo
   5.694 -#define public_mALLOPt   mallopt
   5.695 -#define public_mTRIm     malloc_trim
   5.696 -#define public_mSTATs    malloc_stats
   5.697 -#define public_mUSABLe   malloc_usable_size
   5.698 -#define public_iCALLOc   independent_calloc
   5.699 -#define public_iCOMALLOc independent_comalloc
   5.700 -#endif /* USE_DL_PREFIX */
   5.701 -
   5.702 -
   5.703 -/*
   5.704 -  HAVE_MEMCPY should be defined if you are not otherwise using
   5.705 -  ANSI STD C, but still have memcpy and memset in your C library
   5.706 -  and want to use them in calloc and realloc. Otherwise simple
   5.707 -  macro versions are defined below.
   5.708 -
   5.709 -  USE_MEMCPY should be defined as 1 if you actually want to
   5.710 -  have memset and memcpy called. People report that the macro
   5.711 -  versions are faster than libc versions on some systems.
   5.712 -  
   5.713 -  Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
   5.714 -  (of <= 36 bytes) are manually unrolled in realloc and calloc.
   5.715 -*/
   5.716 -
   5.717 -#define HAVE_MEMCPY
   5.718 -
   5.719 -#ifndef USE_MEMCPY
   5.720 -#ifdef HAVE_MEMCPY
   5.721 -#define USE_MEMCPY 1
   5.722 -#else
   5.723 -#define USE_MEMCPY 0
   5.724 -#endif
   5.725 -#endif
   5.726 -
   5.727 -
   5.728 -#if (__STD_C || defined(HAVE_MEMCPY))
   5.729 -
   5.730 -#ifdef WIN32
   5.731 -/* On Win32 memset and memcpy are already declared in windows.h */
   5.732 -#else
   5.733 -#if __STD_C
   5.734 -void* memset(void*, int, size_t);
   5.735 -void* memcpy(void*, const void*, size_t);
   5.736 -#else
   5.737 -Void_t* memset();
   5.738 -Void_t* memcpy();
   5.739 -#endif
   5.740 -#endif
   5.741 -#endif
   5.742 -
   5.743 -/*
   5.744 -  MALLOC_FAILURE_ACTION is the action to take before "return 0" when
   5.745 -  malloc fails to be able to return memory, either because memory is
   5.746 -  exhausted or because of illegal arguments.
   5.747 -  
   5.748 -  By default, sets errno if running on STD_C platform, else does nothing.  
   5.749 -*/
   5.750 -
   5.751 -#ifndef MALLOC_FAILURE_ACTION
   5.752 -#if __STD_C
   5.753 -#define MALLOC_FAILURE_ACTION \
   5.754 -   errno = ENOMEM;
   5.755 -
   5.756 -#else
   5.757 -#define MALLOC_FAILURE_ACTION
   5.758 -#endif
   5.759 -#endif
   5.760 -
   5.761 -/*
   5.762 -  MORECORE-related declarations. By default, rely on sbrk
   5.763 -*/
   5.764 -
   5.765 -
   5.766 -#ifdef LACKS_UNISTD_H
   5.767 -#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
   5.768 -#if __STD_C
   5.769 -extern Void_t*     sbrk(ptrdiff_t);
   5.770 -#else
   5.771 -extern Void_t*     sbrk();
   5.772 -#endif
   5.773 -#endif
   5.774 -#endif
   5.775 -
   5.776 -/*
   5.777 -  MORECORE is the name of the routine to call to obtain more memory
   5.778 -  from the system.  See below for general guidance on writing
   5.779 -  alternative MORECORE functions, as well as a version for WIN32 and a
   5.780 -  sample version for pre-OSX macos.
   5.781 -*/
   5.782 -
   5.783 -#ifndef MORECORE
   5.784 -#define MORECORE sbrk
   5.785 -#endif
   5.786 -
   5.787 -/*
   5.788 -  MORECORE_FAILURE is the value returned upon failure of MORECORE
   5.789 -  as well as mmap. Since it cannot be an otherwise valid memory address,
   5.790 -  and must reflect values of standard sys calls, you probably ought not
   5.791 -  try to redefine it.
   5.792 -*/
   5.793 -
   5.794 -#ifndef MORECORE_FAILURE
   5.795 -#define MORECORE_FAILURE (-1)
   5.796 -#endif
   5.797 -
   5.798 -/*
   5.799 -  If MORECORE_CONTIGUOUS is true, take advantage of fact that
   5.800 -  consecutive calls to MORECORE with positive arguments always return
   5.801 -  contiguous increasing addresses.  This is true of unix sbrk.  Even
   5.802 -  if not defined, when regions happen to be contiguous, malloc will
   5.803 -  permit allocations spanning regions obtained from different
   5.804 -  calls. But defining this when applicable enables some stronger
   5.805 -  consistency checks and space efficiencies. 
   5.806 -*/
   5.807 -
   5.808 -#ifndef MORECORE_CONTIGUOUS
   5.809 -#define MORECORE_CONTIGUOUS 1
   5.810 -#endif
   5.811 -
   5.812 -/*
   5.813 -  Define MORECORE_CANNOT_TRIM if your version of MORECORE
   5.814 -  cannot release space back to the system when given negative
   5.815 -  arguments. This is generally necessary only if you are using
   5.816 -  a hand-crafted MORECORE function that cannot handle negative arguments.
   5.817 -*/
   5.818 -
   5.819 -/* #define MORECORE_CANNOT_TRIM */
   5.820 -
   5.821 -
   5.822 -/*
   5.823 -  Define HAVE_MMAP as true to optionally make malloc() use mmap() to
   5.824 -  allocate very large blocks.  These will be returned to the
   5.825 -  operating system immediately after a free(). Also, if mmap
   5.826 -  is available, it is used as a backup strategy in cases where
   5.827 -  MORECORE fails to provide space from system.
   5.828 -
   5.829 -  This malloc is best tuned to work with mmap for large requests.
   5.830 -  If you do not have mmap, operations involving very large chunks (1MB
   5.831 -  or so) may be slower than you'd like.
   5.832 -*/
   5.833 -
   5.834 -#ifndef HAVE_MMAP
   5.835 -#define HAVE_MMAP 1
   5.836 -#endif
   5.837 -
   5.838 -#if HAVE_MMAP
   5.839 -/* 
   5.840 -   Standard unix mmap using /dev/zero clears memory so calloc doesn't
   5.841 -   need to.
   5.842 -*/
   5.843 -
   5.844 -#ifndef MMAP_CLEARS
   5.845 -#define MMAP_CLEARS 1
   5.846 -#endif
   5.847 -
   5.848 -#else /* no mmap */
   5.849 -#ifndef MMAP_CLEARS
   5.850 -#define MMAP_CLEARS 0
   5.851 -#endif
   5.852 -#endif
   5.853 -
   5.854 -
   5.855 -/* 
   5.856 -   MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
   5.857 -   sbrk fails, and mmap is used as a backup (which is done only if
   5.858 -   HAVE_MMAP).  The value must be a multiple of page size.  This
   5.859 -   backup strategy generally applies only when systems have "holes" in
   5.860 -   address space, so sbrk cannot perform contiguous expansion, but
   5.861 -   there is still space available on system.  On systems for which
   5.862 -   this is known to be useful (i.e. most linux kernels), this occurs
   5.863 -   only when programs allocate huge amounts of memory.  Between this,
   5.864 -   and the fact that mmap regions tend to be limited, the size should
   5.865 -   be large, to avoid too many mmap calls and thus avoid running out
   5.866 -   of kernel resources.
   5.867 -*/
   5.868 -
   5.869 -#ifndef MMAP_AS_MORECORE_SIZE
   5.870 -#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
   5.871 -#endif
   5.872 -
   5.873 -/*
   5.874 -  Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
   5.875 -  large blocks.  This is currently only possible on Linux with
   5.876 -  kernel versions newer than 1.3.77.
   5.877 -*/
   5.878 -
   5.879 -#ifndef HAVE_MREMAP
   5.880 -#ifdef linux
   5.881 -#define HAVE_MREMAP 1
   5.882 -#else
   5.883 -#define HAVE_MREMAP 0
   5.884 -#endif
   5.885 -
   5.886 -#endif /* HAVE_MMAP */
   5.887 -
   5.888 -
   5.889 -/*
   5.890 -  The system page size. To the extent possible, this malloc manages
   5.891 -  memory from the system in page-size units.  Note that this value is
   5.892 -  cached during initialization into a field of malloc_state. So even
   5.893 -  if malloc_getpagesize is a function, it is only called once.
   5.894 -
   5.895 -  The following mechanics for getpagesize were adapted from bsd/gnu
   5.896 -  getpagesize.h. If none of the system-probes here apply, a value of
   5.897 -  4096 is used, which should be OK: If they don't apply, then using
   5.898 -  the actual value probably doesn't impact performance.
   5.899 -*/
   5.900 -
   5.901 -
   5.902 -#ifndef malloc_getpagesize
   5.903 -
   5.904 -#ifndef LACKS_UNISTD_H
   5.905 -#  include <unistd.h>
   5.906 -#endif
   5.907 -
   5.908 -#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
   5.909 -#    ifndef _SC_PAGE_SIZE
   5.910 -#      define _SC_PAGE_SIZE _SC_PAGESIZE
   5.911 -#    endif
   5.912 -#  endif
   5.913 -
   5.914 -#  ifdef _SC_PAGE_SIZE
   5.915 -#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
   5.916 -#  else
   5.917 -#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
   5.918 -       extern size_t getpagesize();
   5.919 -#      define malloc_getpagesize getpagesize()
   5.920 -#    else
   5.921 -#      ifdef WIN32 /* use supplied emulation of getpagesize */
   5.922 -#        define malloc_getpagesize getpagesize() 
   5.923 -#      else
   5.924 -#        ifndef LACKS_SYS_PARAM_H
   5.925 -#          include <sys/param.h>
   5.926 -#        endif
   5.927 -#        ifdef EXEC_PAGESIZE
   5.928 -#          define malloc_getpagesize EXEC_PAGESIZE
   5.929 -#        else
   5.930 -#          ifdef NBPG
   5.931 -#            ifndef CLSIZE
   5.932 -#              define malloc_getpagesize NBPG
   5.933 -#            else
   5.934 -#              define malloc_getpagesize (NBPG * CLSIZE)
   5.935 -#            endif
   5.936 -#          else
   5.937 -#            ifdef NBPC
   5.938 -#              define malloc_getpagesize NBPC
   5.939 -#            else
   5.940 -#              ifdef PAGESIZE
   5.941 -#                define malloc_getpagesize PAGESIZE
   5.942 -#              else /* just guess */
   5.943 -#                define malloc_getpagesize (4096) 
   5.944 -#              endif
   5.945 -#            endif
   5.946 -#          endif
   5.947 -#        endif
   5.948 -#      endif
   5.949 -#    endif
   5.950 -#  endif
   5.951 -#endif
   5.952 -
   5.953 -/*
   5.954 -  This version of malloc supports the standard SVID/XPG mallinfo
   5.955 -  routine that returns a struct containing usage properties and
   5.956 -  statistics. It should work on any SVID/XPG compliant system that has
   5.957 -  a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
   5.958 -  install such a thing yourself, cut out the preliminary declarations
   5.959 -  as described above and below and save them in a malloc.h file. But
   5.960 -  there's no compelling reason to bother to do this.)
   5.961 -
   5.962 -  The main declaration needed is the mallinfo struct that is returned
   5.963 -  (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
   5.964 -  bunch of fields that are not even meaningful in this version of
   5.965 -  malloc.  These fields are are instead filled by mallinfo() with
   5.966 -  other numbers that might be of interest.
   5.967 -
   5.968 -  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
   5.969 -  /usr/include/malloc.h file that includes a declaration of struct
   5.970 -  mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
   5.971 -  version is declared below.  These must be precisely the same for
   5.972 -  mallinfo() to work.  The original SVID version of this struct,
   5.973 -  defined on most systems with mallinfo, declares all fields as
   5.974 -  ints. But some others define as unsigned long. If your system
   5.975 -  defines the fields using a type of different width than listed here,
   5.976 -  you must #include your system version and #define
   5.977 -  HAVE_USR_INCLUDE_MALLOC_H.
   5.978 -*/
   5.979 -
   5.980 -/* #define HAVE_USR_INCLUDE_MALLOC_H */
   5.981 -
   5.982 -#ifdef HAVE_USR_INCLUDE_MALLOC_H
   5.983 -#include "/usr/include/malloc.h"
   5.984 -#else
   5.985 -
   5.986 -/* SVID2/XPG mallinfo structure */
   5.987 -
   5.988 -struct mallinfo {
   5.989 -  int arena;    /* non-mmapped space allocated from system */
   5.990 -  int ordblks;  /* number of free chunks */
   5.991 -  int smblks;   /* number of fastbin blocks */
   5.992 -  int hblks;    /* number of mmapped regions */
   5.993 -  int hblkhd;   /* space in mmapped regions */
   5.994 -  int usmblks;  /* maximum total allocated space */
   5.995 -  int fsmblks;  /* space available in freed fastbin blocks */
   5.996 -  int uordblks; /* total allocated space */
   5.997 -  int fordblks; /* total free space */
   5.998 -  int keepcost; /* top-most, releasable (via malloc_trim) space */
   5.999 -};
  5.1000 -
  5.1001 -/*
  5.1002 -  SVID/XPG defines four standard parameter numbers for mallopt,
  5.1003 -  normally defined in malloc.h.  Only one of these (M_MXFAST) is used
  5.1004 -  in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
  5.1005 -  so setting them has no effect. But this malloc also supports other
  5.1006 -  options in mallopt described below.
  5.1007 -*/
  5.1008 -#endif
  5.1009 -
  5.1010 -
  5.1011 -/* ---------- description of public routines ------------ */
  5.1012 -
  5.1013 -/*
  5.1014 -  malloc(size_t n)
  5.1015 -  Returns a pointer to a newly allocated chunk of at least n bytes, or null
  5.1016 -  if no space is available. Additionally, on failure, errno is
  5.1017 -  set to ENOMEM on ANSI C systems.
  5.1018 -
  5.1019 -  If n is zero, malloc returns a minumum-sized chunk. (The minimum
  5.1020 -  size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
  5.1021 -  systems.)  On most systems, size_t is an unsigned type, so calls
  5.1022 -  with negative arguments are interpreted as requests for huge amounts
  5.1023 -  of space, which will often fail. The maximum supported value of n
  5.1024 -  differs across systems, but is in all cases less than the maximum
  5.1025 -  representable value of a size_t.
  5.1026 -*/
  5.1027 -#if __STD_C
  5.1028 -Void_t*  public_mALLOc(size_t);
  5.1029 -#else
  5.1030 -Void_t*  public_mALLOc();
  5.1031 -#endif
  5.1032 -
  5.1033 -/*
  5.1034 -  free(Void_t* p)
  5.1035 -  Releases the chunk of memory pointed to by p, that had been previously
  5.1036 -  allocated using malloc or a related routine such as realloc.
  5.1037 -  It has no effect if p is null. It can have arbitrary (i.e., bad!)
  5.1038 -  effects if p has already been freed.
  5.1039 -
  5.1040 -  Unless disabled (using mallopt), freeing very large spaces will
  5.1041 -  when possible, automatically trigger operations that give
  5.1042 -  back unused memory to the system, thus reducing program footprint.
  5.1043 -*/
  5.1044 -#if __STD_C
  5.1045 -void     public_fREe(Void_t*);
  5.1046 -#else
  5.1047 -void     public_fREe();
  5.1048 -#endif
  5.1049 -
  5.1050 -/*
  5.1051 -  calloc(size_t n_elements, size_t element_size);
  5.1052 -  Returns a pointer to n_elements * element_size bytes, with all locations
  5.1053 -  set to zero.
  5.1054 -*/
  5.1055 -#if __STD_C
  5.1056 -Void_t*  public_cALLOc(size_t, size_t);
  5.1057 -#else
  5.1058 -Void_t*  public_cALLOc();
  5.1059 -#endif
  5.1060 -
  5.1061 -/*
  5.1062 -  realloc(Void_t* p, size_t n)
  5.1063 -  Returns a pointer to a chunk of size n that contains the same data
  5.1064 -  as does chunk p up to the minimum of (n, p's size) bytes, or null
  5.1065 -  if no space is available. 
  5.1066 -
  5.1067 -  The returned pointer may or may not be the same as p. The algorithm
  5.1068 -  prefers extending p when possible, otherwise it employs the
  5.1069 -  equivalent of a malloc-copy-free sequence.
  5.1070 -
  5.1071 -  If p is null, realloc is equivalent to malloc.  
  5.1072 -
  5.1073 -  If space is not available, realloc returns null, errno is set (if on
  5.1074 -  ANSI) and p is NOT freed.
  5.1075 -
  5.1076 -  if n is for fewer bytes than already held by p, the newly unused
  5.1077 -  space is lopped off and freed if possible.  Unless the #define
  5.1078 -  REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
  5.1079 -  zero (re)allocates a minimum-sized chunk.
  5.1080 -
  5.1081 -  Large chunks that were internally obtained via mmap will always
  5.1082 -  be reallocated using malloc-copy-free sequences unless
  5.1083 -  the system supports MREMAP (currently only linux).
  5.1084 -
  5.1085 -  The old unix realloc convention of allowing the last-free'd chunk
  5.1086 -  to be used as an argument to realloc is not supported.
  5.1087 -*/
  5.1088 -#if __STD_C
  5.1089 -Void_t*  public_rEALLOc(Void_t*, size_t);
  5.1090 -#else
  5.1091 -Void_t*  public_rEALLOc();
  5.1092 -#endif
  5.1093 -
  5.1094 -/*
  5.1095 -  memalign(size_t alignment, size_t n);
  5.1096 -  Returns a pointer to a newly allocated chunk of n bytes, aligned
  5.1097 -  in accord with the alignment argument.
  5.1098 -
  5.1099 -  The alignment argument should be a power of two. If the argument is
  5.1100 -  not a power of two, the nearest greater power is used.
  5.1101 -  8-byte alignment is guaranteed by normal malloc calls, so don't
  5.1102 -  bother calling memalign with an argument of 8 or less.
  5.1103 -
  5.1104 -  Overreliance on memalign is a sure way to fragment space.
  5.1105 -*/
  5.1106 -#if __STD_C
  5.1107 -Void_t*  public_mEMALIGn(size_t, size_t);
  5.1108 -#else
  5.1109 -Void_t*  public_mEMALIGn();
  5.1110 -#endif
  5.1111 -
  5.1112 -/*
  5.1113 -  valloc(size_t n);
  5.1114 -  Equivalent to memalign(pagesize, n), where pagesize is the page
  5.1115 -  size of the system. If the pagesize is unknown, 4096 is used.
  5.1116 -*/
  5.1117 -#if __STD_C
  5.1118 -Void_t*  public_vALLOc(size_t);
  5.1119 -#else
  5.1120 -Void_t*  public_vALLOc();
  5.1121 -#endif
  5.1122 -
  5.1123 -
  5.1124 -
  5.1125 -/*
  5.1126 -  mallopt(int parameter_number, int parameter_value)
  5.1127 -  Sets tunable parameters The format is to provide a
  5.1128 -  (parameter-number, parameter-value) pair.  mallopt then sets the
  5.1129 -  corresponding parameter to the argument value if it can (i.e., so
  5.1130 -  long as the value is meaningful), and returns 1 if successful else
  5.1131 -  0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
  5.1132 -  normally defined in malloc.h.  Only one of these (M_MXFAST) is used
  5.1133 -  in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
  5.1134 -  so setting them has no effect. But this malloc also supports four
  5.1135 -  other options in mallopt. See below for details.  Briefly, supported
  5.1136 -  parameters are as follows (listed defaults are for "typical"
  5.1137 -  configurations).
  5.1138 -
  5.1139 -  Symbol            param #   default    allowed param values
  5.1140 -  M_MXFAST          1         64         0-80  (0 disables fastbins)
  5.1141 -  M_TRIM_THRESHOLD -1         256*1024   any   (-1U disables trimming)
  5.1142 -  M_TOP_PAD        -2         0          any  
  5.1143 -  M_MMAP_THRESHOLD -3         256*1024   any   (or 0 if no MMAP support)
  5.1144 -  M_MMAP_MAX       -4         65536      any   (0 disables use of mmap)
  5.1145 -*/
  5.1146 -#if __STD_C
  5.1147 -int      public_mALLOPt(int, int);
  5.1148 -#else
  5.1149 -int      public_mALLOPt();
  5.1150 -#endif
  5.1151 -
  5.1152 -
  5.1153 -/*
  5.1154 -  mallinfo()
  5.1155 -  Returns (by copy) a struct containing various summary statistics:
  5.1156 -
  5.1157 -  arena:     current total non-mmapped bytes allocated from system 
  5.1158 -  ordblks:   the number of free chunks 
  5.1159 -  smblks:    the number of fastbin blocks (i.e., small chunks that
  5.1160 -               have been freed but not use resused or consolidated)
  5.1161 -  hblks:     current number of mmapped regions 
  5.1162 -  hblkhd:    total bytes held in mmapped regions 
  5.1163 -  usmblks:   the maximum total allocated space. This will be greater
  5.1164 -                than current total if trimming has occurred.
  5.1165 -  fsmblks:   total bytes held in fastbin blocks 
  5.1166 -  uordblks:  current total allocated space (normal or mmapped)
  5.1167 -  fordblks:  total free space 
  5.1168 -  keepcost:  the maximum number of bytes that could ideally be released
  5.1169 -               back to system via malloc_trim. ("ideally" means that
  5.1170 -               it ignores page restrictions etc.)
  5.1171 -
  5.1172 -  Because these fields are ints, but internal bookkeeping may
  5.1173 -  be kept as longs, the reported values may wrap around zero and 
  5.1174 -  thus be inaccurate.
  5.1175 -*/
  5.1176 -#if __STD_C
  5.1177 -struct mallinfo public_mALLINFo(void);
  5.1178 -#else
  5.1179 -struct mallinfo public_mALLINFo();
  5.1180 -#endif
  5.1181 -
  5.1182 -/*
  5.1183 -  independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
  5.1184 -
  5.1185 -  independent_calloc is similar to calloc, but instead of returning a
  5.1186 -  single cleared space, it returns an array of pointers to n_elements
  5.1187 -  independent elements that can hold contents of size elem_size, each
  5.1188 -  of which starts out cleared, and can be independently freed,
  5.1189 -  realloc'ed etc. The elements are guaranteed to be adjacently
  5.1190 -  allocated (this is not guaranteed to occur with multiple callocs or
  5.1191 -  mallocs), which may also improve cache locality in some
  5.1192 -  applications.
  5.1193 -
  5.1194 -  The "chunks" argument is optional (i.e., may be null, which is
  5.1195 -  probably the most typical usage). If it is null, the returned array
  5.1196 -  is itself dynamically allocated and should also be freed when it is
  5.1197 -  no longer needed. Otherwise, the chunks array must be of at least
  5.1198 -  n_elements in length. It is filled in with the pointers to the
  5.1199 -  chunks.
  5.1200 -
  5.1201 -  In either case, independent_calloc returns this pointer array, or
  5.1202 -  null if the allocation failed.  If n_elements is zero and "chunks"
  5.1203 -  is null, it returns a chunk representing an array with zero elements
  5.1204 -  (which should be freed if not wanted).
  5.1205 -
  5.1206 -  Each element must be individually freed when it is no longer
  5.1207 -  needed. If you'd like to instead be able to free all at once, you
  5.1208 -  should instead use regular calloc and assign pointers into this
  5.1209 -  space to represent elements.  (In this case though, you cannot
  5.1210 -  independently free elements.)
  5.1211 -  
  5.1212 -  independent_calloc simplifies and speeds up implementations of many
  5.1213 -  kinds of pools.  It may also be useful when constructing large data
  5.1214 -  structures that initially have a fixed number of fixed-sized nodes,
  5.1215 -  but the number is not known at compile time, and some of the nodes
  5.1216 -  may later need to be freed. For example:
  5.1217 -
  5.1218 -  struct Node { int item; struct Node* next; };
  5.1219 -  
  5.1220 -  struct Node* build_list() {
  5.1221 -    struct Node** pool;
  5.1222 -    int n = read_number_of_nodes_needed();
  5.1223 -    if (n <= 0) return 0;
  5.1224 -    pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
  5.1225 -    if (pool == 0) die(); 
  5.1226 -    // organize into a linked list... 
  5.1227 -    struct Node* first = pool[0];
  5.1228 -    for (i = 0; i < n-1; ++i) 
  5.1229 -      pool[i]->next = pool[i+1];
  5.1230 -    free(pool);     // Can now free the array (or not, if it is needed later)
  5.1231 -    return first;
  5.1232 -  }
  5.1233 -*/
  5.1234 -#if __STD_C
  5.1235 -Void_t** public_iCALLOc(size_t, size_t, Void_t**);
  5.1236 -#else
  5.1237 -Void_t** public_iCALLOc();
  5.1238 -#endif
  5.1239 -
  5.1240 -/*
  5.1241 -  independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
  5.1242 -
  5.1243 -  independent_comalloc allocates, all at once, a set of n_elements
  5.1244 -  chunks with sizes indicated in the "sizes" array.    It returns
  5.1245 -  an array of pointers to these elements, each of which can be
  5.1246 -  independently freed, realloc'ed etc. The elements are guaranteed to
  5.1247 -  be adjacently allocated (this is not guaranteed to occur with
  5.1248 -  multiple callocs or mallocs), which may also improve cache locality
  5.1249 -  in some applications.
  5.1250 -
  5.1251 -  The "chunks" argument is optional (i.e., may be null). If it is null
  5.1252 -  the returned array is itself dynamically allocated and should also
  5.1253 -  be freed when it is no longer needed. Otherwise, the chunks array
  5.1254 -  must be of at least n_elements in length. It is filled in with the
  5.1255 -  pointers to the chunks.
  5.1256 -
  5.1257 -  In either case, independent_comalloc returns this pointer array, or
  5.1258 -  null if the allocation failed.  If n_elements is zero and chunks is
  5.1259 -  null, it returns a chunk representing an array with zero elements
  5.1260 -  (which should be freed if not wanted).
  5.1261 -  
  5.1262 -  Each element must be individually freed when it is no longer
  5.1263 -  needed. If you'd like to instead be able to free all at once, you
  5.1264 -  should instead use a single regular malloc, and assign pointers at
  5.1265 -  particular offsets in the aggregate space. (In this case though, you 
  5.1266 -  cannot independently free elements.)
  5.1267 -
  5.1268 -  independent_comallac differs from independent_calloc in that each
  5.1269 -  element may have a different size, and also that it does not
  5.1270 -  automatically clear elements.
  5.1271 -
  5.1272 -  independent_comalloc can be used to speed up allocation in cases
  5.1273 -  where several structs or objects must always be allocated at the
  5.1274 -  same time.  For example:
  5.1275 -
  5.1276 -  struct Head { ... }
  5.1277 -  struct Foot { ... }
  5.1278 -
  5.1279 -  void send_message(char* msg) {
  5.1280 -    int msglen = strlen(msg);
  5.1281 -    size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
  5.1282 -    void* chunks[3];
  5.1283 -    if (independent_comalloc(3, sizes, chunks) == 0)
  5.1284 -      die();
  5.1285 -    struct Head* head = (struct Head*)(chunks[0]);
  5.1286 -    char*        body = (char*)(chunks[1]);
  5.1287 -    struct Foot* foot = (struct Foot*)(chunks[2]);
  5.1288 -    // ...
  5.1289 -  }
  5.1290 -
  5.1291 -  In general though, independent_comalloc is worth using only for
  5.1292 -  larger values of n_elements. For small values, you probably won't
  5.1293 -  detect enough difference from series of malloc calls to bother.
  5.1294 -
  5.1295 -  Overuse of independent_comalloc can increase overall memory usage,
  5.1296 -  since it cannot reuse existing noncontiguous small chunks that
  5.1297 -  might be available for some of the elements.
  5.1298 -*/
  5.1299 -#if __STD_C
  5.1300 -Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
  5.1301 -#else
  5.1302 -Void_t** public_iCOMALLOc();
  5.1303 -#endif
  5.1304 -
  5.1305 -
  5.1306 -/*
  5.1307 -  pvalloc(size_t n);
  5.1308 -  Equivalent to valloc(minimum-page-that-holds(n)), that is,
  5.1309 -  round up n to nearest pagesize.
  5.1310 - */
  5.1311 -#if __STD_C
  5.1312 -Void_t*  public_pVALLOc(size_t);
  5.1313 -#else
  5.1314 -Void_t*  public_pVALLOc();
  5.1315 -#endif
  5.1316 -
  5.1317 -/*
  5.1318 -  cfree(Void_t* p);
  5.1319 -  Equivalent to free(p).
  5.1320 -
  5.1321 -  cfree is needed/defined on some systems that pair it with calloc,
  5.1322 -  for odd historical reasons (such as: cfree is used in example 
  5.1323 -  code in the first edition of K&R).
  5.1324 -*/
  5.1325 -#if __STD_C
  5.1326 -void     public_cFREe(Void_t*);
  5.1327 -#else
  5.1328 -void     public_cFREe();
  5.1329 -#endif
  5.1330 -
  5.1331 -/*
  5.1332 -  malloc_trim(size_t pad);
  5.1333 -
  5.1334 -  If possible, gives memory back to the system (via negative
  5.1335 -  arguments to sbrk) if there is unused memory at the `high' end of
  5.1336 -  the malloc pool. You can call this after freeing large blocks of
  5.1337 -  memory to potentially reduce the system-level memory requirements
  5.1338 -  of a program. However, it cannot guarantee to reduce memory. Under
  5.1339 -  some allocation patterns, some large free blocks of memory will be
  5.1340 -  locked between two used chunks, so they cannot be given back to
  5.1341 -  the system.
  5.1342 -  
  5.1343 -  The `pad' argument to malloc_trim represents the amount of free
  5.1344 -  trailing space to leave untrimmed. If this argument is zero,
  5.1345 -  only the minimum amount of memory to maintain internal data
  5.1346 -  structures will be left (one page or less). Non-zero arguments
  5.1347 -  can be supplied to maintain enough trailing space to service
  5.1348 -  future expected allocations without having to re-obtain memory
  5.1349 -  from the system.
  5.1350 -  
  5.1351 -  Malloc_trim returns 1 if it actually released any memory, else 0.
  5.1352 -  On systems that do not support "negative sbrks", it will always
  5.1353 -  rreturn 0.
  5.1354 -*/
  5.1355 -#if __STD_C
  5.1356 -int      public_mTRIm(size_t);
  5.1357 -#else
  5.1358 -int      public_mTRIm();
  5.1359 -#endif
  5.1360 -
  5.1361 -/*
  5.1362 -  malloc_usable_size(Void_t* p);
  5.1363 -
  5.1364 -  Returns the number of bytes you can actually use in
  5.1365 -  an allocated chunk, which may be more than you requested (although
  5.1366 -  often not) due to alignment and minimum size constraints.
  5.1367 -  You can use this many bytes without worrying about
  5.1368 -  overwriting other allocated objects. This is not a particularly great
  5.1369 -  programming practice. malloc_usable_size can be more useful in
  5.1370 -  debugging and assertions, for example:
  5.1371 -
  5.1372 -  p = malloc(n);
  5.1373 -  assert(malloc_usable_size(p) >= 256);
  5.1374 -
  5.1375 -*/
  5.1376 -#if __STD_C
  5.1377 -size_t   public_mUSABLe(Void_t*);
  5.1378 -#else
  5.1379 -size_t   public_mUSABLe();
  5.1380 -#endif
  5.1381 -
  5.1382 -/*
  5.1383 -  malloc_stats();
  5.1384 -  Prints on stderr the amount of space obtained from the system (both
  5.1385 -  via sbrk and mmap), the maximum amount (which may be more than
  5.1386 -  current if malloc_trim and/or munmap got called), and the current
  5.1387 -  number of bytes allocated via malloc (or realloc, etc) but not yet
  5.1388 -  freed. Note that this is the number of bytes allocated, not the
  5.1389 -  number requested. It will be larger than the number requested
  5.1390 -  because of alignment and bookkeeping overhead. Because it includes
  5.1391 -  alignment wastage as being in use, this figure may be greater than
  5.1392 -  zero even when no user-level chunks are allocated.
  5.1393 -
  5.1394 -  The reported current and maximum system memory can be inaccurate if
  5.1395 -  a program makes other calls to system memory allocation functions
  5.1396 -  (normally sbrk) outside of malloc.
  5.1397 -
  5.1398 -  malloc_stats prints only the most commonly interesting statistics.
  5.1399 -  More information can be obtained by calling mallinfo.
  5.1400 -
  5.1401 -*/
  5.1402 -#if __STD_C
  5.1403 -void     public_mSTATs();
  5.1404 -#else
  5.1405 -void     public_mSTATs();
  5.1406 -#endif
  5.1407 -
  5.1408 -/* mallopt tuning options */
  5.1409 -
  5.1410 -/*
  5.1411 -  M_MXFAST is the maximum request size used for "fastbins", special bins
  5.1412 -  that hold returned chunks without consolidating their spaces. This
  5.1413 -  enables future requests for chunks of the same size to be handled
  5.1414 -  very quickly, but can increase fragmentation, and thus increase the
  5.1415 -  overall memory footprint of a program.
  5.1416 -
  5.1417 -  This malloc manages fastbins very conservatively yet still
  5.1418 -  efficiently, so fragmentation is rarely a problem for values less
  5.1419 -  than or equal to the default.  The maximum supported value of MXFAST
  5.1420 -  is 80. You wouldn't want it any higher than this anyway.  Fastbins
  5.1421 -  are designed especially for use with many small structs, objects or
  5.1422 -  strings -- the default handles structs/objects/arrays with sizes up
  5.1423 -  to 16 4byte fields, or small strings representing words, tokens,
  5.1424 -  etc. Using fastbins for larger objects normally worsens
  5.1425 -  fragmentation without improving speed.
  5.1426 -
  5.1427 -  M_MXFAST is set in REQUEST size units. It is internally used in
  5.1428 -  chunksize units, which adds padding and alignment.  You can reduce
  5.1429 -  M_MXFAST to 0 to disable all use of fastbins.  This causes the malloc
  5.1430 -  algorithm to be a closer approximation of fifo-best-fit in all cases,
  5.1431 -  not just for larger requests, but will generally cause it to be
  5.1432 -  slower.
  5.1433 -*/
  5.1434 -
  5.1435 -
  5.1436 -/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
  5.1437 -#ifndef M_MXFAST
  5.1438 -#define M_MXFAST            1    
  5.1439 -#endif
  5.1440 -
  5.1441 -#ifndef DEFAULT_MXFAST
  5.1442 -#define DEFAULT_MXFAST     64
  5.1443 -#endif
  5.1444 -
  5.1445 -
  5.1446 -/*
  5.1447 -  M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
  5.1448 -  to keep before releasing via malloc_trim in free().
  5.1449 -
  5.1450 -  Automatic trimming is mainly useful in long-lived programs.
  5.1451 -  Because trimming via sbrk can be slow on some systems, and can
  5.1452 -  sometimes be wasteful (in cases where programs immediately
  5.1453 -  afterward allocate more large chunks) the value should be high
  5.1454 -  enough so that your overall system performance would improve by
  5.1455 -  releasing this much memory.
  5.1456 -
  5.1457 -  The trim threshold and the mmap control parameters (see below)
  5.1458 -  can be traded off with one another. Trimming and mmapping are
  5.1459 -  two different ways of releasing unused memory back to the
  5.1460 -  system. Between these two, it is often possible to keep
  5.1461 -  system-level demands of a long-lived program down to a bare
  5.1462 -  minimum. For example, in one test suite of sessions measuring
  5.1463 -  the XF86 X server on Linux, using a trim threshold of 128K and a
  5.1464 -  mmap threshold of 192K led to near-minimal long term resource
  5.1465 -  consumption.
  5.1466 -
  5.1467 -  If you are using this malloc in a long-lived program, it should
  5.1468 -  pay to experiment with these values.  As a rough guide, you
  5.1469 -  might set to a value close to the average size of a process
  5.1470 -  (program) running on your system.  Releasing this much memory
  5.1471 -  would allow such a process to run in memory.  Generally, it's
  5.1472 -  worth it to tune for trimming rather tham memory mapping when a
  5.1473 -  program undergoes phases where several large chunks are
  5.1474 -  allocated and released in ways that can reuse each other's
  5.1475 -  storage, perhaps mixed with phases where there are no such
  5.1476 -  chunks at all.  And in well-behaved long-lived programs,
  5.1477 -  controlling release of large blocks via trimming versus mapping
  5.1478 -  is usually faster.
  5.1479 -
  5.1480 -  However, in most programs, these parameters serve mainly as
  5.1481 -  protection against the system-level effects of carrying around
  5.1482 -  massive amounts of unneeded memory. Since frequent calls to
  5.1483 -  sbrk, mmap, and munmap otherwise degrade performance, the default
  5.1484 -  parameters are set to relatively high values that serve only as
  5.1485 -  safeguards.
  5.1486 -
  5.1487 -  The trim value must be greater than page size to have any useful
  5.1488 -  effect.  To disable trimming completely, you can set to 
  5.1489 -  (unsigned long)(-1)
  5.1490 -
  5.1491 -  Trim settings interact with fastbin (MXFAST) settings: Unless
  5.1492 -  TRIM_FASTBINS is defined, automatic trimming never takes place upon
  5.1493 -  freeing a chunk with size less than or equal to MXFAST. Trimming is
  5.1494 -  instead delayed until subsequent freeing of larger chunks. However,
  5.1495 -  you can still force an attempted trim by calling malloc_trim.
  5.1496 -
  5.1497 -  Also, trimming is not generally possible in cases where
  5.1498 -  the main arena is obtained via mmap.
  5.1499 -
  5.1500 -  Note that the trick some people use of mallocing a huge space and
  5.1501 -  then freeing it at program startup, in an attempt to reserve system
  5.1502 -  memory, doesn't have the intended effect under automatic trimming,
  5.1503 -  since that memory will immediately be returned to the system.
  5.1504 -*/
  5.1505 -
  5.1506 -#define M_TRIM_THRESHOLD       -1
  5.1507 -
  5.1508 -#ifndef DEFAULT_TRIM_THRESHOLD
  5.1509 -#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
  5.1510 -#endif
  5.1511 -
  5.1512 -/*
  5.1513 -  M_TOP_PAD is the amount of extra `padding' space to allocate or
  5.1514 -  retain whenever sbrk is called. It is used in two ways internally:
  5.1515 -
  5.1516 -  * When sbrk is called to extend the top of the arena to satisfy
  5.1517 -  a new malloc request, this much padding is added to the sbrk
  5.1518 -  request.
  5.1519 -
  5.1520 -  * When malloc_trim is called automatically from free(),
  5.1521 -  it is used as the `pad' argument.
  5.1522 -
  5.1523 -  In both cases, the actual amount of padding is rounded
  5.1524 -  so that the end of the arena is always a system page boundary.
  5.1525 -
  5.1526 -  The main reason for using padding is to avoid calling sbrk so
  5.1527 -  often. Having even a small pad greatly reduces the likelihood
  5.1528 -  that nearly every malloc request during program start-up (or
  5.1529 -  after trimming) will invoke sbrk, which needlessly wastes
  5.1530 -  time.
  5.1531 -
  5.1532 -  Automatic rounding-up to page-size units is normally sufficient
  5.1533 -  to avoid measurable overhead, so the default is 0.  However, in
  5.1534 -  systems where sbrk is relatively slow, it can pay to increase
  5.1535 -  this value, at the expense of carrying around more memory than
  5.1536 -  the program needs.
  5.1537 -*/
  5.1538 -
  5.1539 -#define M_TOP_PAD              -2
  5.1540 -
  5.1541 -#ifndef DEFAULT_TOP_PAD
  5.1542 -#define DEFAULT_TOP_PAD        (0)
  5.1543 -#endif
  5.1544 -
  5.1545 -/*
  5.1546 -  M_MMAP_THRESHOLD is the request size threshold for using mmap()
  5.1547 -  to service a request. Requests of at least this size that cannot
  5.1548 -  be allocated using already-existing space will be serviced via mmap.
  5.1549 -  (If enough normal freed space already exists it is used instead.)
  5.1550 -
  5.1551 -  Using mmap segregates relatively large chunks of memory so that
  5.1552 -  they can be individually obtained and released from the host
  5.1553 -  system. A request serviced through mmap is never reused by any
  5.1554 -  other request (at least not directly; the system may just so
  5.1555 -  happen to remap successive requests to the same locations).
  5.1556 -
  5.1557 -  Segregating space in this way has the benefits that:
  5.1558 -
  5.1559 -   1. Mmapped space can ALWAYS be individually released back 
  5.1560 -      to the system, which helps keep the system level memory 
  5.1561 -      demands of a long-lived program low. 
  5.1562 -   2. Mapped memory can never become `locked' between
  5.1563 -      other chunks, as can happen with normally allocated chunks, which
  5.1564 -      means that even trimming via malloc_trim would not release them.
  5.1565 -   3. On some systems with "holes" in address spaces, mmap can obtain
  5.1566 -      memory that sbrk cannot.
  5.1567 -
  5.1568 -  However, it has the disadvantages that:
  5.1569 -
  5.1570 -   1. The space cannot be reclaimed, consolidated, and then
  5.1571 -      used to service later requests, as happens with normal chunks.
  5.1572 -   2. It can lead to more wastage because of mmap page alignment
  5.1573 -      requirements
  5.1574 -   3. It causes malloc performance to be more dependent on host
  5.1575 -      system memory management support routines which may vary in
  5.1576 -      implementation quality and may impose arbitrary
  5.1577 -      limitations. Generally, servicing a request via normal
  5.1578 -      malloc steps is faster than going through a system's mmap.
  5.1579 -
  5.1580 -  The advantages of mmap nearly always outweigh disadvantages for
  5.1581 -  "large" chunks, but the value of "large" varies across systems.  The
  5.1582 -  default is an empirically derived value that works well in most
  5.1583 -  systems.
  5.1584 -*/
  5.1585 -
  5.1586 -#define M_MMAP_THRESHOLD      -3
  5.1587 -
  5.1588 -#ifndef DEFAULT_MMAP_THRESHOLD
  5.1589 -#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
  5.1590 -#endif
  5.1591 -
  5.1592 -/*
  5.1593 -  M_MMAP_MAX is the maximum number of requests to simultaneously
  5.1594 -  service using mmap. This parameter exists because
  5.1595 -. Some systems have a limited number of internal tables for
  5.1596 -  use by mmap, and using more than a few of them may degrade
  5.1597 -  performance.
  5.1598 -
  5.1599 -  The default is set to a value that serves only as a safeguard.
  5.1600 -  Setting to 0 disables use of mmap for servicing large requests.  If
  5.1601 -  HAVE_MMAP is not set, the default value is 0, and attempts to set it
  5.1602 -  to non-zero values in mallopt will fail.
  5.1603 -*/
  5.1604 -
  5.1605 -#define M_MMAP_MAX             -4
  5.1606 -
  5.1607 -#ifndef DEFAULT_MMAP_MAX
  5.1608 -#if HAVE_MMAP
  5.1609 -#define DEFAULT_MMAP_MAX       (65536)
  5.1610 -#else
  5.1611 -#define DEFAULT_MMAP_MAX       (0)
  5.1612 -#endif
  5.1613 -#endif
  5.1614 -
  5.1615 -#ifdef __cplusplus
  5.1616 -};  /* end of extern "C" */
  5.1617 -#endif
  5.1618 -
  5.1619 -
  5.1620 -/* RN XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
  5.1621 -#endif 
  5.1622 -
  5.1623 -/* 
  5.1624 -  ========================================================================
  5.1625 -  To make a fully customizable malloc.h header file, cut everything
  5.1626 -  above this line, put into file malloc.h, edit to suit, and #include it 
  5.1627 -  on the next line, as well as in programs that use this malloc.
  5.1628 -  ========================================================================
  5.1629 -*/
  5.1630 -
  5.1631 -/* #include "malloc.h" */
  5.1632 -
  5.1633 -/* --------------------- public wrappers ---------------------- */
  5.1634 -
  5.1635 -#ifdef USE_PUBLIC_MALLOC_WRAPPERS
  5.1636 -
  5.1637 -/* Declare all routines as internal */
  5.1638 -#if __STD_C
  5.1639 -static Void_t*  mALLOc(size_t);
  5.1640 -static void     fREe(Void_t*);
  5.1641 -static Void_t*  rEALLOc(Void_t*, size_t);
  5.1642 -static Void_t*  mEMALIGn(size_t, size_t);
  5.1643 -static Void_t*  vALLOc(size_t);
  5.1644 -static Void_t*  pVALLOc(size_t);
  5.1645 -static Void_t*  cALLOc(size_t, size_t);
  5.1646 -static Void_t** iCALLOc(size_t, size_t, Void_t**);
  5.1647 -static Void_t** iCOMALLOc(size_t, size_t*, Void_t**);
  5.1648 -static void     cFREe(Void_t*);
  5.1649 -static int      mTRIm(size_t);
  5.1650 -static size_t   mUSABLe(Void_t*);
  5.1651 -static void     mSTATs();
  5.1652 -static int      mALLOPt(int, int);
  5.1653 -static struct mallinfo mALLINFo(void);
  5.1654 -#else
  5.1655 -static Void_t*  mALLOc();
  5.1656 -static void     fREe();
  5.1657 -static Void_t*  rEALLOc();
  5.1658 -static Void_t*  mEMALIGn();
  5.1659 -static Void_t*  vALLOc();
  5.1660 -static Void_t*  pVALLOc();
  5.1661 -static Void_t*  cALLOc();
  5.1662 -static Void_t** iCALLOc();
  5.1663 -static Void_t** iCOMALLOc();
  5.1664 -static void     cFREe();
  5.1665 -static int      mTRIm();
  5.1666 -static size_t   mUSABLe();
  5.1667 -static void     mSTATs();
  5.1668 -static int      mALLOPt();
  5.1669 -static struct mallinfo mALLINFo();
  5.1670 -#endif
  5.1671 -
  5.1672 -/*
  5.1673 -  MALLOC_PREACTION and MALLOC_POSTACTION should be
  5.1674 -  defined to return 0 on success, and nonzero on failure.
  5.1675 -  The return value of MALLOC_POSTACTION is currently ignored
  5.1676 -  in wrapper functions since there is no reasonable default
  5.1677 -  action to take on failure.
  5.1678 -*/
  5.1679 -
  5.1680 -
  5.1681 -#ifdef USE_MALLOC_LOCK
  5.1682 -
  5.1683 -#ifdef WIN32
  5.1684 -
  5.1685 -static int mALLOC_MUTEx;
  5.1686 -#define MALLOC_PREACTION   slwait(&mALLOC_MUTEx)
  5.1687 -#define MALLOC_POSTACTION  slrelease(&mALLOC_MUTEx)
  5.1688 -
  5.1689 -#else
  5.1690 -
  5.1691 -#include <pthread.h>
  5.1692 -
  5.1693 -static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
  5.1694 -
  5.1695 -#define MALLOC_PREACTION   pthread_mutex_lock(&mALLOC_MUTEx)
  5.1696 -#define MALLOC_POSTACTION  pthread_mutex_unlock(&mALLOC_MUTEx)
  5.1697 -
  5.1698 -#endif /* USE_MALLOC_LOCK */
  5.1699 -
  5.1700 -#else
  5.1701 -
  5.1702 -/* Substitute anything you like for these */
  5.1703 -
  5.1704 -#define MALLOC_PREACTION   (0)
  5.1705 -#define MALLOC_POSTACTION  (0)
  5.1706 -
  5.1707 -#endif
  5.1708 -
  5.1709 -Void_t* public_mALLOc(size_t bytes) {
  5.1710 -  Void_t* m;
  5.1711 -  if (MALLOC_PREACTION != 0) {
  5.1712 -    return 0;
  5.1713 -  }
  5.1714 -  m = mALLOc(bytes);
  5.1715 -  if (MALLOC_POSTACTION != 0) {
  5.1716 -  }
  5.1717 -  return m;
  5.1718 -}
  5.1719 -
  5.1720 -void public_fREe(Void_t* m) {
  5.1721 -  if (MALLOC_PREACTION != 0) {
  5.1722 -    return;
  5.1723 -  }
  5.1724 -  fREe(m);
  5.1725 -  if (MALLOC_POSTACTION != 0) {
  5.1726 -  }
  5.1727 -}
  5.1728 -
  5.1729 -Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
  5.1730 -  if (MALLOC_PREACTION != 0) {
  5.1731 -    return 0;
  5.1732 -  }
  5.1733 -  m = rEALLOc(m, bytes);
  5.1734 -  if (MALLOC_POSTACTION != 0) {
  5.1735 -  }
  5.1736 -  return m;
  5.1737 -}
  5.1738 -
  5.1739 -Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
  5.1740 -  Void_t* m;
  5.1741 -  if (MALLOC_PREACTION != 0) {
  5.1742 -    return 0;
  5.1743 -  }
  5.1744 -  m = mEMALIGn(alignment, bytes);
  5.1745 -  if (MALLOC_POSTACTION != 0) {
  5.1746 -  }
  5.1747 -  return m;
  5.1748 -}
  5.1749 -
  5.1750 -Void_t* public_vALLOc(size_t bytes) {
  5.1751 -  Void_t* m;
  5.1752 -  if (MALLOC_PREACTION != 0) {
  5.1753 -    return 0;
  5.1754 -  }
  5.1755 -  m = vALLOc(bytes);
  5.1756 -  if (MALLOC_POSTACTION != 0) {
  5.1757 -  }
  5.1758 -  return m;
  5.1759 -}
  5.1760 -
  5.1761 -Void_t* public_pVALLOc(size_t bytes) {
  5.1762 -  Void_t* m;
  5.1763 -  if (MALLOC_PREACTION != 0) {
  5.1764 -    return 0;
  5.1765 -  }
  5.1766 -  m = pVALLOc(bytes);
  5.1767 -  if (MALLOC_POSTACTION != 0) {
  5.1768 -  }
  5.1769 -  return m;
  5.1770 -}
  5.1771 -
  5.1772 -Void_t* public_cALLOc(size_t n, size_t elem_size) {
  5.1773 -  Void_t* m;
  5.1774 -  if (MALLOC_PREACTION != 0) {
  5.1775 -    return 0;
  5.1776 -  }
  5.1777 -  m = cALLOc(n, elem_size);
  5.1778 -  if (MALLOC_POSTACTION != 0) {
  5.1779 -  }
  5.1780 -  return m;
  5.1781 -}
  5.1782 -
  5.1783 -
  5.1784 -Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) {
  5.1785 -  Void_t** m;
  5.1786 -  if (MALLOC_PREACTION != 0) {
  5.1787 -    return 0;
  5.1788 -  }
  5.1789 -  m = iCALLOc(n, elem_size, chunks);
  5.1790 -  if (MALLOC_POSTACTION != 0) {
  5.1791 -  }
  5.1792 -  return m;
  5.1793 -}
  5.1794 -
  5.1795 -Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) {
  5.1796 -  Void_t** m;
  5.1797 -  if (MALLOC_PREACTION != 0) {
  5.1798 -    return 0;
  5.1799 -  }
  5.1800 -  m = iCOMALLOc(n, sizes, chunks);
  5.1801 -  if (MALLOC_POSTACTION != 0) {
  5.1802 -  }
  5.1803 -  return m;
  5.1804 -}
  5.1805 -
  5.1806 -void public_cFREe(Void_t* m) {
  5.1807 -  if (MALLOC_PREACTION != 0) {
  5.1808 -    return;
  5.1809 -  }
  5.1810 -  cFREe(m);
  5.1811 -  if (MALLOC_POSTACTION != 0) {
  5.1812 -  }
  5.1813 -}
  5.1814 -
  5.1815 -int public_mTRIm(size_t s) {
  5.1816 -  int result;
  5.1817 -  if (MALLOC_PREACTION != 0) {
  5.1818 -    return 0;
  5.1819 -  }
  5.1820 -  result = mTRIm(s);
  5.1821 -  if (MALLOC_POSTACTION != 0) {
  5.1822 -  }
  5.1823 -  return result;
  5.1824 -}
  5.1825 -
  5.1826 -size_t public_mUSABLe(Void_t* m) {
  5.1827 -  size_t result;
  5.1828 -  if (MALLOC_PREACTION != 0) {
  5.1829 -    return 0;
  5.1830 -  }
  5.1831 -  result = mUSABLe(m);
  5.1832 -  if (MALLOC_POSTACTION != 0) {
  5.1833 -  }
  5.1834 -  return result;
  5.1835 -}
  5.1836 -
  5.1837 -void public_mSTATs() {
  5.1838 -  if (MALLOC_PREACTION != 0) {
  5.1839 -    return;
  5.1840 -  }
  5.1841 -  mSTATs();
  5.1842 -  if (MALLOC_POSTACTION != 0) {
  5.1843 -  }
  5.1844 -}
  5.1845 -
  5.1846 -struct mallinfo public_mALLINFo() {
  5.1847 -  struct mallinfo m;
  5.1848 -  if (MALLOC_PREACTION != 0) {
  5.1849 -    struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  5.1850 -    return nm;
  5.1851 -  }
  5.1852 -  m = mALLINFo();
  5.1853 -  if (MALLOC_POSTACTION != 0) {
  5.1854 -  }
  5.1855 -  return m;
  5.1856 -}
  5.1857 -
  5.1858 -int public_mALLOPt(int p, int v) {
  5.1859 -  int result;
  5.1860 -  if (MALLOC_PREACTION != 0) {
  5.1861 -    return 0;
  5.1862 -  }
  5.1863 -  result = mALLOPt(p, v);
  5.1864 -  if (MALLOC_POSTACTION != 0) {
  5.1865 -  }
  5.1866 -  return result;
  5.1867 -}
  5.1868 -
  5.1869 -#endif
  5.1870 -
  5.1871 -
  5.1872 -
  5.1873 -/* ------------- Optional versions of memcopy ---------------- */
  5.1874 -
  5.1875 -
  5.1876 -#if USE_MEMCPY
  5.1877 -
  5.1878 -/* 
  5.1879 -  Note: memcpy is ONLY invoked with non-overlapping regions,
  5.1880 -  so the (usually slower) memmove is not needed.
  5.1881 -*/
  5.1882 -
  5.1883 -#define MALLOC_COPY(dest, src, nbytes)  memcpy(dest, src, nbytes)
  5.1884 -#define MALLOC_ZERO(dest, nbytes)       memset(dest, 0,   nbytes)
  5.1885 -
  5.1886 -#else /* !USE_MEMCPY */
  5.1887 -
  5.1888 -/* Use Duff's device for good zeroing/copying performance. */
  5.1889 -
  5.1890 -#define MALLOC_ZERO(charp, nbytes)                                            \
  5.1891 -do {                                                                          \
  5.1892 -  INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
  5.1893 -  CHUNK_SIZE_T  mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                     \
  5.1894 -  long mcn;                                                                   \
  5.1895 -  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
  5.1896 -  switch (mctmp) {                                                            \
  5.1897 -    case 0: for(;;) { *mzp++ = 0;                                             \
  5.1898 -    case 7:           *mzp++ = 0;                                             \
  5.1899 -    case 6:           *mzp++ = 0;                                             \
  5.1900 -    case 5:           *mzp++ = 0;                                             \
  5.1901 -    case 4:           *mzp++ = 0;                                             \
  5.1902 -    case 3:           *mzp++ = 0;                                             \
  5.1903 -    case 2:           *mzp++ = 0;                                             \
  5.1904 -    case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
  5.1905 -  }                                                                           \
  5.1906 -} while(0)
  5.1907 -
  5.1908 -#define MALLOC_COPY(dest,src,nbytes)                                          \
  5.1909 -do {                                                                          \
  5.1910 -  INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
  5.1911 -  INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
  5.1912 -  CHUNK_SIZE_T  mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                     \
  5.1913 -  long mcn;                                                                   \
  5.1914 -  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
  5.1915 -  switch (mctmp) {                                                            \
  5.1916 -    case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
  5.1917 -    case 7:           *mcdst++ = *mcsrc++;                                    \
  5.1918 -    case 6:           *mcdst++ = *mcsrc++;                                    \
  5.1919 -    case 5:           *mcdst++ = *mcsrc++;                                    \
  5.1920 -    case 4:           *mcdst++ = *mcsrc++;                                    \
  5.1921 -    case 3:           *mcdst++ = *mcsrc++;                                    \
  5.1922 -    case 2:           *mcdst++ = *mcsrc++;                                    \
  5.1923 -    case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
  5.1924 -  }                                                                           \
  5.1925 -} while(0)
  5.1926 -
  5.1927 -#endif
  5.1928 -
  5.1929 -/* ------------------ MMAP support ------------------  */
  5.1930 -
  5.1931 -
  5.1932 -#if HAVE_MMAP
  5.1933 -
  5.1934 -#ifndef LACKS_FCNTL_H
  5.1935 -#include <fcntl.h>
  5.1936 -#endif
  5.1937 -
  5.1938 -#ifndef LACKS_SYS_MMAN_H
  5.1939 -#include <sys/mman.h>
  5.1940 -#endif
  5.1941 -
  5.1942 -#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
  5.1943 -#define MAP_ANONYMOUS MAP_ANON
  5.1944 -#endif
  5.1945 -
  5.1946 -/* 
  5.1947 -   Nearly all versions of mmap support MAP_ANONYMOUS, 
  5.1948 -   so the following is unlikely to be needed, but is
  5.1949 -   supplied just in case.
  5.1950 -*/
  5.1951 -
  5.1952 -#ifndef MAP_ANONYMOUS
  5.1953 -
  5.1954 -static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
  5.1955 -
  5.1956 -#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
  5.1957 - (dev_zero_fd = open("/dev/zero", O_RDWR), \
  5.1958 -  mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
  5.1959 -   mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
  5.1960 -
  5.1961 -#else
  5.1962 -
  5.1963 -#define MMAP(addr, size, prot, flags) \
  5.1964 - (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
  5.1965 -
  5.1966 -#endif
  5.1967 -
  5.1968 -
  5.1969 -#endif /* HAVE_MMAP */
  5.1970 -
  5.1971 -
  5.1972 -/*
  5.1973 -  -----------------------  Chunk representations -----------------------
  5.1974 -*/
  5.1975 -
  5.1976 -
  5.1977 -/*
  5.1978 -  This struct declaration is misleading (but accurate and necessary).
  5.1979 -  It declares a "view" into memory allowing access to necessary
  5.1980 -  fields at known offsets from a given base. See explanation below.
  5.1981 -*/
  5.1982 -
  5.1983 -struct malloc_chunk {
  5.1984 -
  5.1985 -  INTERNAL_SIZE_T      prev_size;  /* Size of previous chunk (if free).  */
  5.1986 -  INTERNAL_SIZE_T      size;       /* Size in bytes, including overhead. */
  5.1987 -
  5.1988 -  struct malloc_chunk* fd;         /* double links -- used only if free. */
  5.1989 -  struct malloc_chunk* bk;
  5.1990 -};
  5.1991 -
  5.1992 -
  5.1993 -typedef struct malloc_chunk* mchunkptr;
  5.1994 -
  5.1995 -/*
  5.1996 -   malloc_chunk details:
  5.1997 -
  5.1998 -    (The following includes lightly edited explanations by Colin Plumb.)
  5.1999 -
  5.2000 -    Chunks of memory are maintained using a `boundary tag' method as
  5.2001 -    described in e.g., Knuth or Standish.  (See the paper by Paul
  5.2002 -    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
  5.2003 -    survey of such techniques.)  Sizes of free chunks are stored both
  5.2004 -    in the front of each chunk and at the end.  This makes
  5.2005 -    consolidating fragmented chunks into bigger chunks very fast.  The
  5.2006 -    size fields also hold bits representing whether chunks are free or
  5.2007 -    in use.
  5.2008 -
  5.2009 -    An allocated chunk looks like this:
  5.2010 -
  5.2011 -
  5.2012 -    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2013 -            |             Size of previous chunk, if allocated            | |
  5.2014 -            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2015 -            |             Size of chunk, in bytes                         |P|
  5.2016 -      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2017 -            |             User data starts here...                          .
  5.2018 -            .                                                               .
  5.2019 -            .             (malloc_usable_space() bytes)                     .
  5.2020 -            .                                                               |
  5.2021 -nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2022 -            |             Size of chunk                                     |
  5.2023 -            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2024 -
  5.2025 -
  5.2026 -    Where "chunk" is the front of the chunk for the purpose of most of
  5.2027 -    the malloc code, but "mem" is the pointer that is returned to the
  5.2028 -    user.  "Nextchunk" is the beginning of the next contiguous chunk.
  5.2029 -
  5.2030 -    Chunks always begin on even word boundries, so the mem portion
  5.2031 -    (which is returned to the user) is also on an even word boundary, and
  5.2032 -    thus at least double-word aligned.
  5.2033 -
  5.2034 -    Free chunks are stored in circular doubly-linked lists, and look like this:
  5.2035 -
  5.2036 -    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2037 -            |             Size of previous chunk                            |
  5.2038 -            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2039 -    `head:' |             Size of chunk, in bytes                         |P|
  5.2040 -      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2041 -            |             Forward pointer to next chunk in list             |
  5.2042 -            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2043 -            |             Back pointer to previous chunk in list            |
  5.2044 -            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2045 -            |             Unused space (may be 0 bytes long)                .
  5.2046 -            .                                                               .
  5.2047 -            .                                                               |
  5.2048 -nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2049 -    `foot:' |             Size of chunk, in bytes                           |
  5.2050 -            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  5.2051 -
  5.2052 -    The P (PREV_INUSE) bit, stored in the unused low-order bit of the
  5.2053 -    chunk size (which is always a multiple of two words), is an in-use
  5.2054 -    bit for the *previous* chunk.  If that bit is *clear*, then the
  5.2055 -    word before the current chunk size contains the previous chunk
  5.2056 -    size, and can be used to find the front of the previous chunk.
  5.2057 -    The very first chunk allocated always has this bit set,
  5.2058 -    preventing access to non-existent (or non-owned) memory. If
  5.2059 -    prev_inuse is set for any given chunk, then you CANNOT determine
  5.2060 -    the size of the previous chunk, and might even get a memory
  5.2061 -    addressing fault when trying to do so.
  5.2062 -
  5.2063 -    Note that the `foot' of the current chunk is actually represented
  5.2064 -    as the prev_size of the NEXT chunk. This makes it easier to
  5.2065 -    deal with alignments etc but can be very confusing when trying
  5.2066 -    to extend or adapt this code.
  5.2067 -
  5.2068 -    The two exceptions to all this are
  5.2069 -
  5.2070 -     1. The special chunk `top' doesn't bother using the
  5.2071 -        trailing size field since there is no next contiguous chunk
  5.2072 -        that would have to index off it. After initialization, `top'
  5.2073 -        is forced to always exist.  If it would become less than
  5.2074 -        MINSIZE bytes long, it is replenished.
  5.2075 -
  5.2076 -     2. Chunks allocated via mmap, which have the second-lowest-order
  5.2077 -        bit (IS_MMAPPED) set in their size fields.  Because they are
  5.2078 -        allocated one-by-one, each must contain its own trailing size field.
  5.2079 -
  5.2080 -*/
  5.2081 -
  5.2082 -/*
  5.2083 -  ---------- Size and alignment checks and conversions ----------
  5.2084 -*/
  5.2085 -
  5.2086 -/* conversion from malloc headers to user pointers, and back */
  5.2087 -
  5.2088 -#define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
  5.2089 -#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
  5.2090 -
  5.2091 -/* The smallest possible chunk */
  5.2092 -#define MIN_CHUNK_SIZE        (sizeof(struct malloc_chunk))
  5.2093 -
  5.2094 -/* The smallest size we can malloc is an aligned minimal chunk */
  5.2095 -
  5.2096 -#define MINSIZE  \
  5.2097 -  (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
  5.2098 -
  5.2099 -/* Check if m has acceptable alignment */
  5.2100 -
  5.2101 -#define aligned_OK(m)  (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
  5.2102 -
  5.2103 -
  5.2104 -/* 
  5.2105 -   Check if a request is so large that it would wrap around zero when
  5.2106 -   padded and aligned. To simplify some other code, the bound is made
  5.2107 -   low enough so that adding MINSIZE will also not wrap around sero.
  5.2108 -*/
  5.2109 -
  5.2110 -#define REQUEST_OUT_OF_RANGE(req)                                 \
  5.2111 -  ((CHUNK_SIZE_T)(req) >=                                        \
  5.2112 -   (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-2 * MINSIZE))    
  5.2113 -
  5.2114 -/* pad request bytes into a usable size -- internal version */
  5.2115 -
  5.2116 -#define request2size(req)                                         \
  5.2117 -  (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?             \
  5.2118 -   MINSIZE :                                                      \
  5.2119 -   ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
  5.2120 -
  5.2121 -/*  Same, except also perform argument check */
  5.2122 -
  5.2123 -#define checked_request2size(req, sz)                             \
  5.2124 -  if (REQUEST_OUT_OF_RANGE(req)) {                                \
  5.2125 -    MALLOC_FAILURE_ACTION;                                        \
  5.2126 -    return 0;                                                     \
  5.2127 -  }                                                               \
  5.2128 -  (sz) = request2size(req);                                              
  5.2129 -
  5.2130 -/*
  5.2131 -  --------------- Physical chunk operations ---------------
  5.2132 -*/
  5.2133 -
  5.2134 -
  5.2135 -/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
  5.2136 -#define PREV_INUSE 0x1
  5.2137 -
  5.2138 -/* extract inuse bit of previous chunk */
  5.2139 -#define prev_inuse(p)       ((p)->size & PREV_INUSE)
  5.2140 -
  5.2141 -
  5.2142 -/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
  5.2143 -#define IS_MMAPPED 0x2
  5.2144 -
  5.2145 -/* check for mmap()'ed chunk */
  5.2146 -#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
  5.2147 -
  5.2148 -/* 
  5.2149 -  Bits to mask off when extracting size 
  5.2150 -
  5.2151 -  Note: IS_MMAPPED is intentionally not masked off from size field in
  5.2152 -  macros for which mmapped chunks should never be seen. This should
  5.2153 -  cause helpful core dumps to occur if it is tried by accident by
  5.2154 -  people extending or adapting this malloc.
  5.2155 -*/
  5.2156 -#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
  5.2157 -
  5.2158 -/* Get size, ignoring use bits */
  5.2159 -#define chunksize(p)         ((p)->size & ~(SIZE_BITS))
  5.2160 -
  5.2161 -
  5.2162 -/* Ptr to next physical malloc_chunk. */
  5.2163 -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
  5.2164 -
  5.2165 -/* Ptr to previous physical malloc_chunk */
  5.2166 -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
  5.2167 -
  5.2168 -/* Treat space at ptr + offset as a chunk */
  5.2169 -#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
  5.2170 -
  5.2171 -/* extract p's inuse bit */
  5.2172 -#define inuse(p)\
  5.2173 -((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
  5.2174 -
  5.2175 -/* set/clear chunk as being inuse without otherwise disturbing */
  5.2176 -#define set_inuse(p)\
  5.2177 -((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
  5.2178 -
  5.2179 -#define clear_inuse(p)\
  5.2180 -((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
  5.2181 -
  5.2182 -
  5.2183 -/* check/set/clear inuse bits in known places */
  5.2184 -#define inuse_bit_at_offset(p, s)\
  5.2185 - (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
  5.2186 -
  5.2187 -#define set_inuse_bit_at_offset(p, s)\
  5.2188 - (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
  5.2189 -
  5.2190 -#define clear_inuse_bit_at_offset(p, s)\
  5.2191 - (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
  5.2192 -
  5.2193 -
  5.2194 -/* Set size at head, without disturbing its use bit */
  5.2195 -#define set_head_size(p, s)  ((p)->size = (((p)->size & PREV_INUSE) | (s)))
  5.2196 -
  5.2197 -/* Set size/use field */
  5.2198 -#define set_head(p, s)       ((p)->size = (s))
  5.2199 -
  5.2200 -/* Set size at footer (only when chunk is not in use) */
  5.2201 -#define set_foot(p, s)       (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
  5.2202 -
  5.2203 -
  5.2204 -/*
  5.2205 -  -------------------- Internal data structures --------------------
  5.2206 -
  5.2207 -   All internal state is held in an instance of malloc_state defined
  5.2208 -   below. There are no other static variables, except in two optional
  5.2209 -   cases: 
  5.2210 -   * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. 
  5.2211 -   * If HAVE_MMAP is true, but mmap doesn't support
  5.2212 -     MAP_ANONYMOUS, a dummy file descriptor for mmap.
  5.2213 -
  5.2214 -   Beware of lots of tricks that minimize the total bookkeeping space
  5.2215 -   requirements. The result is a little over 1K bytes (for 4byte
  5.2216 -   pointers and size_t.)
  5.2217 -*/
  5.2218 -
  5.2219 -/*
  5.2220 -  Bins
  5.2221 -
  5.2222 -    An array of bin headers for free chunks. Each bin is doubly
  5.2223 -    linked.  The bins are approximately proportionally (log) spaced.
  5.2224 -    There are a lot of these bins (128). This may look excessive, but
  5.2225 -    works very well in practice.  Most bins hold sizes that are
  5.2226 -    unusual as malloc request sizes, but are more usual for fragments
  5.2227 -    and consolidated sets of chunks, which is what these bins hold, so
  5.2228 -    they can be found quickly.  All procedures maintain the invariant
  5.2229 -    that no consolidated chunk physically borders another one, so each
  5.2230 -    chunk in a list is known to be preceeded and followed by either
  5.2231 -    inuse chunks or the ends of memory.
  5.2232 -
  5.2233 -    Chunks in bins are kept in size order, with ties going to the
  5.2234 -    approximately least recently used chunk. Ordering isn't needed
  5.2235 -    for the small bins, which all contain the same-sized chunks, but
  5.2236 -    facilitates best-fit allocation for larger chunks. These lists
  5.2237 -    are just sequential. Keeping them in order almost never requires
  5.2238 -    enough traversal to warrant using fancier ordered data
  5.2239 -    structures.  
  5.2240 -
  5.2241 -    Chunks of the same size are linked with the most
  5.2242 -    recently freed at the front, and allocations are taken from the
  5.2243 -    back.  This results in LRU (FIFO) allocation order, which tends
  5.2244 -    to give each chunk an equal opportunity to be consolidated with
  5.2245 -    adjacent freed chunks, resulting in larger free chunks and less
  5.2246 -    fragmentation.
  5.2247 -
  5.2248 -    To simplify use in double-linked lists, each bin header acts
  5.2249 -    as a malloc_chunk. This avoids special-casing for headers.
  5.2250 -    But to conserve space and improve locality, we allocate
  5.2251 -    only the fd/bk pointers of bins, and then use repositioning tricks
  5.2252 -    to treat these as the fields of a malloc_chunk*.  
  5.2253 -*/
  5.2254 -
  5.2255 -typedef struct malloc_chunk* mbinptr;
  5.2256 -
  5.2257 -/* addressing -- note that bin_at(0) does not exist */
  5.2258 -#define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
  5.2259 -
  5.2260 -/* analog of ++bin */
  5.2261 -#define next_bin(b)  ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
  5.2262 -
  5.2263 -/* Reminders about list directionality within bins */
  5.2264 -#define first(b)     ((b)->fd)
  5.2265 -#define last(b)      ((b)->bk)
  5.2266 -
  5.2267 -/* Take a chunk off a bin list */
  5.2268 -#define unlink(P, BK, FD) {                                            \
  5.2269 -  FD = P->fd;                                                          \
  5.2270 -  BK = P->bk;                                                          \
  5.2271 -  FD->bk = BK;                                                         \
  5.2272 -  BK->fd = FD;                                                         \
  5.2273 -}
  5.2274 -
  5.2275 -/*
  5.2276 -  Indexing
  5.2277 -
  5.2278 -    Bins for sizes < 512 bytes contain chunks of all the same size, spaced
  5.2279 -    8 bytes apart. Larger bins are approximately logarithmically spaced:
  5.2280 -
  5.2281 -    64 bins of size       8
  5.2282 -    32 bins of size      64
  5.2283 -    16 bins of size     512
  5.2284 -     8 bins of size    4096
  5.2285 -     4 bins of size   32768
  5.2286 -     2 bins of size  262144
  5.2287 -     1 bin  of size what's left
  5.2288 -
  5.2289 -    The bins top out around 1MB because we expect to service large
  5.2290 -    requests via mmap.
  5.2291 -*/
  5.2292 -
  5.2293 -#define NBINS              96
  5.2294 -#define NSMALLBINS         32
  5.2295 -#define SMALLBIN_WIDTH      8
  5.2296 -#define MIN_LARGE_SIZE    256
  5.2297 -
  5.2298 -#define in_smallbin_range(sz)  \
  5.2299 -  ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
  5.2300 -
  5.2301 -#define smallbin_index(sz)     (((unsigned)(sz)) >> 3)
  5.2302 -
  5.2303 -/*
  5.2304 -  Compute index for size. We expect this to be inlined when
  5.2305 -  compiled with optimization, else not, which works out well.
  5.2306 -*/
  5.2307 -static int largebin_index(unsigned int sz) {
  5.2308 -  unsigned int  x = sz >> SMALLBIN_WIDTH; 
  5.2309 -  unsigned int m;            /* bit position of highest set bit of m */
  5.2310 -
  5.2311 -  if (x >= 0x10000) return NBINS-1;
  5.2312 -
  5.2313 -  /* On intel, use BSRL instruction to find highest bit */
  5.2314 -#if defined(__GNUC__) && defined(i386)
  5.2315 -
  5.2316 -  __asm__("bsrl %1,%0\n\t"
  5.2317 -          : "=r" (m) 
  5.2318 -          : "g"  (x));
  5.2319 -
  5.2320 -#else
  5.2321 -  {
  5.2322 -    /*
  5.2323 -      Based on branch-free nlz algorithm in chapter 5 of Henry
  5.2324 -      S. Warren Jr's book "Hacker's Delight".
  5.2325 -    */
  5.2326 -
  5.2327 -    unsigned int n = ((x - 0x100) >> 16) & 8;
  5.2328 -    x <<= n; 
  5.2329 -    m = ((x - 0x1000) >> 16) & 4;
  5.2330 -    n += m; 
  5.2331 -    x <<= m; 
  5.2332 -    m = ((x - 0x4000) >> 16) & 2;
  5.2333 -    n += m; 
  5.2334 -    x = (x << m) >> 14;
  5.2335 -    m = 13 - n + (x & ~(x>>1));
  5.2336 -  }
  5.2337 -#endif
  5.2338 -
  5.2339 -  /* Use next 2 bits to create finer-granularity bins */
  5.2340 -  return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
  5.2341 -}
  5.2342 -
  5.2343 -#define bin_index(sz) \
  5.2344 - ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
  5.2345 -
  5.2346 -/*
  5.2347 -  FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
  5.2348 -  first bin that is maintained in sorted order. This must
  5.2349 -  be the smallest size corresponding to a given bin.
  5.2350 -
  5.2351 -  Normally, this should be MIN_LARGE_SIZE. But you can weaken
  5.2352 -  best fit guarantees to sometimes speed up malloc by increasing value.
  5.2353 -  Doing this means that malloc may choose a chunk that is 
  5.2354 -  non-best-fitting by up to the width of the bin.
  5.2355 -
  5.2356 -  Some useful cutoff values:
  5.2357 -      512 - all bins sorted
  5.2358 -     2560 - leaves bins <=     64 bytes wide unsorted  
  5.2359 -    12288 - leaves bins <=    512 bytes wide unsorted
  5.2360 -    65536 - leaves bins <=   4096 bytes wide unsorted
  5.2361 -   262144 - leaves bins <=  32768 bytes wide unsorted
  5.2362 -       -1 - no bins sorted (not recommended!)
  5.2363 -*/
  5.2364 -
  5.2365 -#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE 
  5.2366 -/* #define FIRST_SORTED_BIN_SIZE 65536 */
  5.2367 -
  5.2368 -/*
  5.2369 -  Unsorted chunks
  5.2370 -
  5.2371 -    All remainders from chunk splits, as well as all returned chunks,
  5.2372 -    are first placed in the "unsorted" bin. They are then placed
  5.2373 -    in regular bins after malloc gives them ONE chance to be used before
  5.2374 -    binning. So, basically, the unsorted_chunks list acts as a queue,
  5.2375 -    with chunks being placed on it in free (and malloc_consolidate),
  5.2376 -    and taken off (to be either used or placed in bins) in malloc.
  5.2377 -*/
  5.2378 -
  5.2379 -/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
  5.2380 -#define unsorted_chunks(M)          (bin_at(M, 1))
  5.2381 -
  5.2382 -/*
  5.2383 -  Top
  5.2384 -
  5.2385 -    The top-most available chunk (i.e., the one bordering the end of
  5.2386 -    available memory) is treated specially. It is never included in
  5.2387 -    any bin, is used only if no other chunk is available, and is
  5.2388 -    released back to the system if it is very large (see
  5.2389 -    M_TRIM_THRESHOLD).  Because top initially
  5.2390 -    points to its own bin with initial zero size, thus forcing
  5.2391 -    extension on the first malloc request, we avoid having any special
  5.2392 -    code in malloc to check whether it even exists yet. But we still
  5.2393 -    need to do so when getting memory from system, so we make
  5.2394 -    initial_top treat the bin as a legal but unusable chunk during the
  5.2395 -    interval between initialization and the first call to
  5.2396 -    sYSMALLOc. (This is somewhat delicate, since it relies on
  5.2397 -    the 2 preceding words to be zero during this interval as well.)
  5.2398 -*/
  5.2399 -
  5.2400 -/* Conveniently, the unsorted bin can be used as dummy top on first call */
  5.2401 -#define initial_top(M)              (unsorted_chunks(M))
  5.2402 -
  5.2403 -/*
  5.2404 -  Binmap
  5.2405 -
  5.2406 -    To help compensate for the large number of bins, a one-level index
  5.2407 -    structure is used for bin-by-bin searching.  `binmap' is a
  5.2408 -    bitvector recording whether bins are definitely empty so they can
  5.2409 -    be skipped over during during traversals.  The bits are NOT always
  5.2410 -    cleared as soon as bins are empty, but instead only
  5.2411 -    when they are noticed to be empty during traversal in malloc.
  5.2412 -*/
  5.2413 -
  5.2414 -/* Conservatively use 32 bits per map word, even if on 64bit system */
  5.2415 -#define BINMAPSHIFT      5
  5.2416 -#define BITSPERMAP       (1U << BINMAPSHIFT)
  5.2417 -#define BINMAPSIZE       (NBINS / BITSPERMAP)
  5.2418 -
  5.2419 -#define idx2block(i)     ((i) >> BINMAPSHIFT)
  5.2420 -#define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
  5.2421 -
  5.2422 -#define mark_bin(m,i)    ((m)->binmap[idx2block(i)] |=  idx2bit(i))
  5.2423 -#define unmark_bin(m,i)  ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
  5.2424 -#define get_binmap(m,i)  ((m)->binmap[idx2block(i)] &   idx2bit(i))
  5.2425 -
  5.2426 -/*
  5.2427 -  Fastbins
  5.2428 -
  5.2429 -    An array of lists holding recently freed small chunks.  Fastbins
  5.2430 -    are not doubly linked.  It is faster to single-link them, and
  5.2431 -    since chunks are never removed from the middles of these lists,
  5.2432 -    double linking is not necessary. Also, unlike regular bins, they
  5.2433 -    are not even processed in FIFO order (they use faster LIFO) since
  5.2434 -    ordering doesn't much matter in the transient contexts in which
  5.2435 -    fastbins are normally used.
  5.2436 -
  5.2437 -    Chunks in fastbins keep their inuse bit set, so they cannot
  5.2438 -    be consolidated with other free chunks. malloc_consolidate
  5.2439 -    releases all chunks in fastbins and consolidates them with
  5.2440 -    other free chunks. 
  5.2441 -*/
  5.2442 -
  5.2443 -typedef struct malloc_chunk* mfastbinptr;
  5.2444 -
  5.2445 -/* offset 2 to use otherwise unindexable first 2 bins */
  5.2446 -#define fastbin_index(sz)        ((((unsigned int)(sz)) >> 3) - 2)
  5.2447 -
  5.2448 -/* The maximum fastbin request size we support */
  5.2449 -#define MAX_FAST_SIZE     80
  5.2450 -
  5.2451 -#define NFASTBINS  (fastbin_index(request2size(MAX_FAST_SIZE))+1)
  5.2452 -
  5.2453 -/*
  5.2454 -  FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
  5.2455 -  that triggers automatic consolidation of possibly-surrounding
  5.2456 -  fastbin chunks. This is a heuristic, so the exact value should not
  5.2457 -  matter too much. It is defined at half the default trim threshold as a
  5.2458 -  compromise heuristic to only attempt consolidation if it is likely
  5.2459 -  to lead to trimming. However, it is not dynamically tunable, since
  5.2460 -  consolidation reduces fragmentation surrounding loarge chunks even 
  5.2461 -  if trimming is not used.
  5.2462 -*/
  5.2463 -
  5.2464 -#define FASTBIN_CONSOLIDATION_THRESHOLD  \
  5.2465 -  ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
  5.2466 -
  5.2467 -/*
  5.2468 -  Since the lowest 2 bits in max_fast don't matter in size comparisons, 
  5.2469 -  they are used as flags.
  5.2470 -*/
  5.2471 -
  5.2472 -/*
  5.2473 -  ANYCHUNKS_BIT held in max_fast indicates that there may be any
  5.2474 -  freed chunks at all. It is set true when entering a chunk into any
  5.2475 -  bin.
  5.2476 -*/
  5.2477 -
  5.2478 -#define ANYCHUNKS_BIT        (1U)
  5.2479 -
  5.2480 -#define have_anychunks(M)     (((M)->max_fast &  ANYCHUNKS_BIT))
  5.2481 -#define set_anychunks(M)      ((M)->max_fast |=  ANYCHUNKS_BIT)
  5.2482 -#define clear_anychunks(M)    ((M)->max_fast &= ~ANYCHUNKS_BIT)
  5.2483 -
  5.2484 -/*
  5.2485 -  FASTCHUNKS_BIT held in max_fast indicates that there are probably
  5.2486 -  some fastbin chunks. It is set true on entering a chunk into any
  5.2487 -  fastbin, and cleared only in malloc_consolidate.
  5.2488 -*/
  5.2489 -
  5.2490 -#define FASTCHUNKS_BIT        (2U)
  5.2491 -
  5.2492 -#define have_fastchunks(M)   (((M)->max_fast &  FASTCHUNKS_BIT))
  5.2493 -#define set_fastchunks(M)    ((M)->max_fast |=  (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
  5.2494 -#define clear_fastchunks(M)  ((M)->max_fast &= ~(FASTCHUNKS_BIT))
  5.2495 -
  5.2496 -/* 
  5.2497 -   Set value of max_fast. 
  5.2498 -   Use impossibly small value if 0.
  5.2499 -*/
  5.2500 -
  5.2501 -#define set_max_fast(M, s) \
  5.2502 -  (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
  5.2503 -  ((M)->max_fast &  (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
  5.2504 -
  5.2505 -#define get_max_fast(M) \
  5.2506 -  ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
  5.2507 -
  5.2508 -
  5.2509 -/*
  5.2510 -  morecore_properties is a status word holding dynamically discovered
  5.2511 -  or controlled properties of the morecore function
  5.2512 -*/
  5.2513 -
  5.2514 -#define MORECORE_CONTIGUOUS_BIT  (1U)
  5.2515 -
  5.2516 -#define contiguous(M) \
  5.2517 -        (((M)->morecore_properties &  MORECORE_CONTIGUOUS_BIT))
  5.2518 -#define noncontiguous(M) \
  5.2519 -        (((M)->morecore_properties &  MORECORE_CONTIGUOUS_BIT) == 0)
  5.2520 -#define set_contiguous(M) \
  5.2521 -        ((M)->morecore_properties |=  MORECORE_CONTIGUOUS_BIT)
  5.2522 -#define set_noncontiguous(M) \
  5.2523 -        ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
  5.2524 -
  5.2525 -
  5.2526 -/*
  5.2527 -   ----------- Internal state representation and initialization -----------
  5.2528 -*/
  5.2529 -
  5.2530 -struct malloc_state {
  5.2531 -
  5.2532 -  /* The maximum chunk size to be eligible for fastbin */
  5.2533 -  INTERNAL_SIZE_T  max_fast;   /* low 2 bits used as flags */
  5.2534 -
  5.2535 -  /* Fastbins */
  5.2536 -  mfastbinptr      fastbins[NFASTBINS];
  5.2537 -
  5.2538 -  /* Base of the topmost chunk -- not otherwise kept in a bin */
  5.2539 -  mchunkptr        top;
  5.2540 -
  5.2541 -  /* The remainder from the most recent split of a small request */
  5.2542 -  mchunkptr        last_remainder;
  5.2543 -
  5.2544 -  /* Normal bins packed as described above */
  5.2545 -  mchunkptr        bins[NBINS * 2];
  5.2546 -
  5.2547 -  /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
  5.2548 -  unsigned int     binmap[BINMAPSIZE+1];
  5.2549 -
  5.2550 -  /* Tunable parameters */
  5.2551 -  CHUNK_SIZE_T     trim_threshold;
  5.2552 -  INTERNAL_SIZE_T  top_pad;
  5.2553 -  INTERNAL_SIZE_T  mmap_threshold;
  5.2554 -
  5.2555 -  /* Memory map support */
  5.2556 -  int              n_mmaps;
  5.2557 -  int              n_mmaps_max;
  5.2558 -  int              max_n_mmaps;
  5.2559 -
  5.2560 -  /* Cache malloc_getpagesize */
  5.2561 -  unsigned int     pagesize;    
  5.2562 -
  5.2563 -  /* Track properties of MORECORE */
  5.2564 -  unsigned int     morecore_properties;
  5.2565 -
  5.2566 -  /* Statistics */
  5.2567 -  INTERNAL_SIZE_T  mmapped_mem;
  5.2568 -  INTERNAL_SIZE_T  sbrked_mem;
  5.2569 -  INTERNAL_SIZE_T  max_sbrked_mem;
  5.2570 -  INTERNAL_SIZE_T  max_mmapped_mem;
  5.2571 -  INTERNAL_SIZE_T  max_total_mem;
  5.2572 -};
  5.2573 -
  5.2574 -typedef struct malloc_state *mstate;
  5.2575 -
  5.2576 -/* 
  5.2577 -   There is exactly one instance of this struct in this malloc.
  5.2578 -   If you are adapting this malloc in a way that does NOT use a static
  5.2579 -   malloc_state, you MUST explicitly zero-fill it before using. This
  5.2580 -   malloc relies on the property that malloc_state is initialized to
  5.2581 -   all zeroes (as is true of C statics).
  5.2582 -*/
  5.2583 -
  5.2584 -static struct malloc_state av_;  /* never directly referenced */
  5.2585 -
  5.2586 -/*
  5.2587 -   All uses of av_ are via get_malloc_state().
  5.2588 -   At most one "call" to get_malloc_state is made per invocation of
  5.2589 -   the public versions of malloc and free, but other routines
  5.2590 -   that in turn invoke malloc and/or free may call more then once. 
  5.2591 -   Also, it is called in check* routines if DEBUG is set.
  5.2592 -*/
  5.2593 -
  5.2594 -#define get_malloc_state() (&(av_))
  5.2595 -
  5.2596 -/*
  5.2597 -  Initialize a malloc_state struct.
  5.2598 -
  5.2599 -  This is called only from within malloc_consolidate, which needs
  5.2600 -  be called in the same contexts anyway.  It is never called directly
  5.2601 -  outside of malloc_consolidate because some optimizing compilers try
  5.2602 -  to inline it at all call points, which turns out not to be an
  5.2603 -  optimization at all. (Inlining it in malloc_consolidate is fine though.)
  5.2604 -*/
  5.2605 -
  5.2606 -#if __STD_C
  5.2607 -static void malloc_init_state(mstate av)
  5.2608 -#else
  5.2609 -static void malloc_init_state(av) mstate av;
  5.2610 -#endif
  5.2611 -{
  5.2612 -  int     i;
  5.2613 -  mbinptr bin;
  5.2614 -  
  5.2615 -  /* Establish circular links for normal bins */
  5.2616 -  for (i = 1; i < NBINS; ++i) { 
  5.2617 -    bin = bin_at(av,i);
  5.2618 -    bin->fd = bin->bk = bin;
  5.2619 -  }
  5.2620 -
  5.2621 -  av->top_pad        = DEFAULT_TOP_PAD;
  5.2622 -  av->n_mmaps_max    = DEFAULT_MMAP_MAX;
  5.2623 -  av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
  5.2624 -  av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
  5.2625 -
  5.2626 -#if MORECORE_CONTIGUOUS
  5.2627 -  set_contiguous(av);
  5.2628 -#else
  5.2629 -  set_noncontiguous(av);
  5.2630 -#endif
  5.2631 -
  5.2632 -
  5.2633 -  set_max_fast(av, DEFAULT_MXFAST);
  5.2634 -
  5.2635 -  av->top            = initial_top(av);
  5.2636 -  av->pagesize       = malloc_getpagesize;
  5.2637 -}
  5.2638 -
  5.2639 -/* 
  5.2640 -   Other internal utilities operating on mstates
  5.2641 -*/
  5.2642 -
  5.2643 -static Void_t*  sYSMALLOc(INTERNAL_SIZE_T, mstate);
  5.2644 -#ifndef MORECORE_CANNOT_TRIM
  5.2645 -static int      sYSTRIm(size_t, mstate);
  5.2646 -#endif
  5.2647 -static void     malloc_consolidate(mstate);
  5.2648 -static Void_t** iALLOc(size_t, size_t*, int, Void_t**);
  5.2649 -
  5.2650 -/*
  5.2651 -  Debugging support
  5.2652 -
  5.2653 -  These routines make a number of assertions about the states
  5.2654 -  of data structures that should be true at all times. If any
  5.2655 -  are not true, it's very likely that a user program has somehow
  5.2656 -  trashed memory. (It's also possible that there is a coding error
  5.2657 -  in malloc. In which case, please report it!)
  5.2658 -*/
  5.2659 -
  5.2660 -#if ! DEBUG
  5.2661 -
  5.2662 -#define check_chunk(P)
  5.2663 -#define check_free_chunk(P)
  5.2664 -#define check_inuse_chunk(P)
  5.2665 -#define check_remalloced_chunk(P,N)
  5.2666 -#define check_malloced_chunk(P,N)
  5.2667 -#define check_malloc_state()
  5.2668 -
  5.2669 -#else
  5.2670 -#define check_chunk(P)              do_check_chunk(P)
  5.2671 -#define check_free_chunk(P)         do_check_free_chunk(P)
  5.2672 -#define check_inuse_chunk(P)        do_check_inuse_chunk(P)
  5.2673 -#define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N)
  5.2674 -#define check_malloced_chunk(P,N)   do_check_malloced_chunk(P,N)
  5.2675 -#define check_malloc_state()        do_check_malloc_state()
  5.2676 -
  5.2677 -/*
  5.2678 -  Properties of all chunks
  5.2679 -*/
  5.2680 -
  5.2681 -#if __STD_C
  5.2682 -static void do_check_chunk(mchunkptr p)
  5.2683 -#else
  5.2684 -static void do_check_chunk(p) mchunkptr p;
  5.2685 -#endif
  5.2686 -{
  5.2687 -  mstate av = get_malloc_state();
  5.2688 -  CHUNK_SIZE_T  sz = chunksize(p);
  5.2689 -  /* min and max possible addresses assuming contiguous allocation */
  5.2690 -  char* max_address = (char*)(av->top) + chunksize(av->top);
  5.2691 -  char* min_address = max_address - av->sbrked_mem;
  5.2692 -
  5.2693 -  if (!chunk_is_mmapped(p)) {
  5.2694 -    
  5.2695 -    /* Has legal address ... */
  5.2696 -    if (p != av->top) {
  5.2697 -      if (contiguous(av)) {
  5.2698 -        assert(((char*)p) >= min_address);
  5.2699 -        assert(((char*)p + sz) <= ((char*)(av->top)));
  5.2700 -      }
  5.2701 -    }
  5.2702 -    else {
  5.2703 -      /* top size is always at least MINSIZE */
  5.2704 -      assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
  5.2705 -      /* top predecessor always marked inuse */
  5.2706 -      assert(prev_inuse(p));
  5.2707 -    }
  5.2708 -      
  5.2709 -  }
  5.2710 -  else {
  5.2711 -#if HAVE_MMAP
  5.2712 -    /* address is outside main heap  */
  5.2713 -    if (contiguous(av) && av->top != initial_top(av)) {
  5.2714 -      assert(((char*)p) < min_address || ((char*)p) > max_address);
  5.2715 -    }
  5.2716 -    /* chunk is page-aligned */
  5.2717 -    assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
  5.2718 -    /* mem is aligned */
  5.2719 -    assert(aligned_OK(chunk2mem(p)));
  5.2720 -#else
  5.2721 -    /* force an appropriate assert violation if debug set */
  5.2722 -    assert(!chunk_is_mmapped(p));
  5.2723 -#endif
  5.2724 -  }
  5.2725 -}
  5.2726 -
  5.2727 -/*
  5.2728 -  Properties of free chunks
  5.2729 -*/
  5.2730 -
  5.2731 -#if __STD_C
  5.2732 -static void do_check_free_chunk(mchunkptr p)
  5.2733 -#else
  5.2734 -static void do_check_free_chunk(p) mchunkptr p;
  5.2735 -#endif
  5.2736 -{
  5.2737 -  mstate av = get_malloc_state();
  5.2738 -
  5.2739 -  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
  5.2740 -  mchunkptr next = chunk_at_offset(p, sz);
  5.2741 -
  5.2742 -  do_check_chunk(p);
  5.2743 -
  5.2744 -  /* Chunk must claim to be free ... */
  5.2745 -  assert(!inuse(p));
  5.2746 -  assert (!chunk_is_mmapped(p));
  5.2747 -
  5.2748 -  /* Unless a special marker, must have OK fields */
  5.2749 -  if ((CHUNK_SIZE_T)(sz) >= MINSIZE)
  5.2750 -  {
  5.2751 -    assert((sz & MALLOC_ALIGN_MASK) == 0);
  5.2752 -    assert(aligned_OK(chunk2mem(p)));
  5.2753 -    /* ... matching footer field */
  5.2754 -    assert(next->prev_size == sz);
  5.2755 -    /* ... and is fully consolidated */
  5.2756 -    assert(prev_inuse(p));
  5.2757 -    assert (next == av->top || inuse(next));
  5.2758 -
  5.2759 -    /* ... and has minimally sane links */
  5.2760 -    assert(p->fd->bk == p);
  5.2761 -    assert(p->bk->fd == p);
  5.2762 -  }
  5.2763 -  else /* markers are always of size SIZE_SZ */
  5.2764 -    assert(sz == SIZE_SZ);
  5.2765 -}
  5.2766 -
  5.2767 -/*
  5.2768 -  Properties of inuse chunks
  5.2769 -*/
  5.2770 -
  5.2771 -#if __STD_C
  5.2772 -static void do_check_inuse_chunk(mchunkptr p)
  5.2773 -#else
  5.2774 -static void do_check_inuse_chunk(p) mchunkptr p;
  5.2775 -#endif
  5.2776 -{
  5.2777 -  mstate av = get_malloc_state();
  5.2778 -  mchunkptr next;
  5.2779 -  do_check_chunk(p);
  5.2780 -
  5.2781 -  if (chunk_is_mmapped(p))
  5.2782 -    return; /* mmapped chunks have no next/prev */
  5.2783 -
  5.2784 -  /* Check whether it claims to be in use ... */
  5.2785 -  assert(inuse(p));
  5.2786 -
  5.2787 -  next = next_chunk(p);
  5.2788 -
  5.2789 -  /* ... and is surrounded by OK chunks.
  5.2790 -    Since more things can be checked with free chunks than inuse ones,
  5.2791 -    if an inuse chunk borders them and debug is on, it's worth doing them.
  5.2792 -  */
  5.2793 -  if (!prev_inuse(p))  {
  5.2794 -    /* Note that we cannot even look at prev unless it is not inuse */
  5.2795 -    mchunkptr prv = prev_chunk(p);
  5.2796 -    assert(next_chunk(prv) == p);
  5.2797 -    do_check_free_chunk(prv);
  5.2798 -  }
  5.2799 -
  5.2800 -  if (next == av->top) {
  5.2801 -    assert(prev_inuse(next));
  5.2802 -    assert(chunksize(next) >= MINSIZE);
  5.2803 -  }
  5.2804 -  else if (!inuse(next))
  5.2805 -    do_check_free_chunk(next);
  5.2806 -}
  5.2807 -
  5.2808 -/*
  5.2809 -  Properties of chunks recycled from fastbins
  5.2810 -*/
  5.2811 -
  5.2812 -#if __STD_C
  5.2813 -static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
  5.2814 -#else
  5.2815 -static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
  5.2816 -#endif
  5.2817 -{
  5.2818 -  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
  5.2819 -
  5.2820 -  do_check_inuse_chunk(p);
  5.2821 -
  5.2822 -  /* Legal size ... */
  5.2823 -  assert((sz & MALLOC_ALIGN_MASK) == 0);
  5.2824 -  assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
  5.2825 -  /* ... and alignment */
  5.2826 -  assert(aligned_OK(chunk2mem(p)));
  5.2827 -  /* chunk is less than MINSIZE more than request */
  5.2828 -  assert((long)(sz) - (long)(s) >= 0);
  5.2829 -  assert((long)(sz) - (long)(s + MINSIZE) < 0);
  5.2830 -}
  5.2831 -
  5.2832 -/*
  5.2833 -  Properties of nonrecycled chunks at the point they are malloced
  5.2834 -*/
  5.2835 -
  5.2836 -#if __STD_C
  5.2837 -static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
  5.2838 -#else
  5.2839 -static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
  5.2840 -#endif
  5.2841 -{
  5.2842 -  /* same as recycled case ... */
  5.2843 -  do_check_remalloced_chunk(p, s);
  5.2844 -
  5.2845 -  /*
  5.2846 -    ... plus,  must obey implementation invariant that prev_inuse is
  5.2847 -    always true of any allocated chunk; i.e., that each allocated
  5.2848 -    chunk borders either a previously allocated and still in-use
  5.2849 -    chunk, or the base of its memory arena. This is ensured
  5.2850 -    by making all allocations from the the `lowest' part of any found
  5.2851 -    chunk.  This does not necessarily hold however for chunks
  5.2852 -    recycled via fastbins.
  5.2853 -  */
  5.2854 -
  5.2855 -  assert(prev_inuse(p));
  5.2856 -}
  5.2857 -
  5.2858 -
  5.2859 -/*
  5.2860 -  Properties of malloc_state.
  5.2861 -
  5.2862 -  This may be useful for debugging malloc, as well as detecting user
  5.2863 -  programmer errors that somehow write into malloc_state.
  5.2864 -
  5.2865 -  If you are extending or experimenting with this malloc, you can
  5.2866 -  probably figure out how to hack this routine to print out or
  5.2867 -  display chunk addresses, sizes, bins, and other instrumentation.
  5.2868 -*/
  5.2869 -
  5.2870 -static void do_check_malloc_state()
  5.2871 -{
  5.2872 -  mstate av = get_malloc_state();
  5.2873 -  int i;
  5.2874 -  mchunkptr p;
  5.2875 -  mchunkptr q;
  5.2876 -  mbinptr b;
  5.2877 -  unsigned int binbit;
  5.2878 -  int empty;
  5.2879 -  unsigned int idx;
  5.2880 -  INTERNAL_SIZE_T size;
  5.2881 -  CHUNK_SIZE_T  total = 0;
  5.2882 -  int max_fast_bin;
  5.2883 -
  5.2884 -  /* internal size_t must be no wider than pointer type */
  5.2885 -  assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
  5.2886 -
  5.2887 -  /* alignment is a power of 2 */
  5.2888 -  assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
  5.2889 -
  5.2890 -  /* cannot run remaining checks until fully initialized */
  5.2891 -  if (av->top == 0 || av->top == initial_top(av))
  5.2892 -    return;
  5.2893 -
  5.2894 -  /* pagesize is a power of 2 */
  5.2895 -  assert((av->pagesize & (av->pagesize-1)) == 0);
  5.2896 -
  5.2897 -  /* properties of fastbins */
  5.2898 -
  5.2899 -  /* max_fast is in allowed range */
  5.2900 -  assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE));
  5.2901 -
  5.2902 -  max_fast_bin = fastbin_index(av->max_fast);
  5.2903 -
  5.2904 -  for (i = 0; i < NFASTBINS; ++i) {
  5.2905 -    p = av->fastbins[i];
  5.2906 -
  5.2907 -    /* all bins past max_fast are empty */
  5.2908 -    if (i > max_fast_bin)
  5.2909 -      assert(p == 0);
  5.2910 -
  5.2911 -    while (p != 0) {
  5.2912 -      /* each chunk claims to be inuse */
  5.2913 -      do_check_inuse_chunk(p);
  5.2914 -      total += chunksize(p);
  5.2915 -      /* chunk belongs in this bin */
  5.2916 -      assert(fastbin_index(chunksize(p)) == i);
  5.2917 -      p = p->fd;
  5.2918 -    }
  5.2919 -  }
  5.2920 -
  5.2921 -  if (total != 0)
  5.2922 -    assert(have_fastchunks(av));
  5.2923 -  else if (!have_fastchunks(av))
  5.2924 -    assert(total == 0);
  5.2925 -
  5.2926 -  /* check normal bins */
  5.2927 -  for (i = 1; i < NBINS; ++i) {
  5.2928 -    b = bin_at(av,i);
  5.2929 -
  5.2930 -    /* binmap is accurate (except for bin 1 == unsorted_chunks) */
  5.2931 -    if (i >= 2) {
  5.2932 -      binbit = get_binmap(av,i);
  5.2933 -      empty = last(b) == b;
  5.2934 -      if (!binbit)
  5.2935 -        assert(empty);
  5.2936 -      else if (!empty)
  5.2937 -        assert(binbit);
  5.2938 -    }
  5.2939 -
  5.2940 -    for (p = last(b); p != b; p = p->bk) {
  5.2941 -      /* each chunk claims to be free */
  5.2942 -      do_check_free_chunk(p);
  5.2943 -      size = chunksize(p);
  5.2944 -      total += size;
  5.2945 -      if (i >= 2) {
  5.2946 -        /* chunk belongs in bin */
  5.2947 -        idx = bin_index(size);
  5.2948 -        assert(idx == i);
  5.2949 -        /* lists are sorted */
  5.2950 -        if ((CHUNK_SIZE_T) size >= (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
  5.2951 -          assert(p->bk == b || 
  5.2952 -                 (CHUNK_SIZE_T)chunksize(p->bk) >= 
  5.2953 -                 (CHUNK_SIZE_T)chunksize(p));
  5.2954 -        }
  5.2955 -      }
  5.2956 -      /* chunk is followed by a legal chain of inuse chunks */
  5.2957 -      for (q = next_chunk(p);
  5.2958 -           (q != av->top && inuse(q) && 
  5.2959 -             (CHUNK_SIZE_T)(chunksize(q)) >= MINSIZE);
  5.2960 -           q = next_chunk(q))
  5.2961 -        do_check_inuse_chunk(q);
  5.2962 -    }
  5.2963 -  }
  5.2964 -
  5.2965 -  /* top chunk is OK */
  5.2966 -  check_chunk(av->top);
  5.2967 -
  5.2968 -  /* sanity checks for statistics */
  5.2969 -
  5.2970 -  assert(total <= (CHUNK_SIZE_T)(av->max_total_mem));
  5.2971 -  assert(av->n_mmaps >= 0);
  5.2972 -  assert(av->n_mmaps <= av->max_n_mmaps);
  5.2973 -
  5.2974 -  assert((CHUNK_SIZE_T)(av->sbrked_mem) <=
  5.2975 -         (CHUNK_SIZE_T)(av->max_sbrked_mem));
  5.2976 -
  5.2977 -  assert((CHUNK_SIZE_T)(av->mmapped_mem) <=
  5.2978 -         (CHUNK_SIZE_T)(av->max_mmapped_mem));
  5.2979 -
  5.2980 -  assert((CHUNK_SIZE_T)(av->max_total_mem) >=
  5.2981 -         (CHUNK_SIZE_T)(av->mmapped_mem) + (CHUNK_SIZE_T)(av->sbrked_mem));
  5.2982 -}
  5.2983 -#endif
  5.2984 -
  5.2985 -
  5.2986 -/* ----------- Routines dealing with system allocation -------------- */
  5.2987 -
  5.2988 -/*
  5.2989 -  sysmalloc handles malloc cases requiring more memory from the system.
  5.2990 -  On entry, it is assumed that av->top does not have enough
  5.2991 -  space to service request for nb bytes, thus requiring that av->top
  5.2992 -  be extended or replaced.
  5.2993 -*/
  5.2994 -
  5.2995 -#if __STD_C
  5.2996 -static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
  5.2997 -#else
  5.2998 -static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
  5.2999 -#endif
  5.3000 -{
  5.3001 -  mchunkptr       old_top;        /* incoming value of av->top */
  5.3002 -  INTERNAL_SIZE_T old_size;       /* its size */
  5.3003 -  char*           old_end;        /* its end address */
  5.3004 -
  5.3005 -  long            size;           /* arg to first MORECORE or mmap call */
  5.3006 -  char*           brk;            /* return value from MORECORE */
  5.3007 -
  5.3008 -  long            correction;     /* arg to 2nd MORECORE call */
  5.3009 -  char*           snd_brk;        /* 2nd return val */
  5.3010 -
  5.3011 -  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
  5.3012 -  INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
  5.3013 -  char*           aligned_brk;    /* aligned offset into brk */
  5.3014 -
  5.3015 -  mchunkptr       p;              /* the allocated/returned chunk */
  5.3016 -  mchunkptr       remainder;      /* remainder from allocation */
  5.3017 -  CHUNK_SIZE_T    remainder_size; /* its size */
  5.3018 -
  5.3019 -  CHUNK_SIZE_T    sum;            /* for updating stats */
  5.3020 -
  5.3021 -  size_t          pagemask  = av->pagesize - 1;
  5.3022 -
  5.3023 -  /*
  5.3024 -    If there is space available in fastbins, consolidate and retry
  5.3025 -    malloc from scratch rather than getting memory from system.  This
  5.3026 -    can occur only if nb is in smallbin range so we didn't consolidate
  5.3027 -    upon entry to malloc. It is much easier to handle this case here
  5.3028 -    than in malloc proper.
  5.3029 -  */
  5.3030 -
  5.3031 -  if (have_fastchunks(av)) {
  5.3032 -    assert(in_smallbin_range(nb));
  5.3033 -    malloc_consolidate(av);
  5.3034 -    return mALLOc(nb - MALLOC_ALIGN_MASK);
  5.3035 -  }
  5.3036 -
  5.3037 -
  5.3038 -#if HAVE_MMAP
  5.3039 -
  5.3040 -  /*
  5.3041 -    If have mmap, and the request size meets the mmap threshold, and
  5.3042 -    the system supports mmap, and there are few enough currently
  5.3043 -    allocated mmapped regions, try to directly map this request
  5.3044 -    rather than expanding top.
  5.3045 -  */
  5.3046 -
  5.3047 -  if ((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
  5.3048 -      (av->n_mmaps < av->n_mmaps_max)) {
  5.3049 -
  5.3050 -    char* mm;             /* return value from mmap call*/
  5.3051 -
  5.3052 -    /*
  5.3053 -      Round up size to nearest page.  For mmapped chunks, the overhead
  5.3054 -      is one SIZE_SZ unit larger than for normal chunks, because there
  5.3055 -      is no following chunk whose prev_size field could be used.
  5.3056 -    */
  5.3057 -    size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
  5.3058 -
  5.3059 -    /* Don't try if size wraps around 0 */
  5.3060 -    if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
  5.3061 -
  5.3062 -      mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
  5.3063 -      
  5.3064 -      if (mm != (char*)(MORECORE_FAILURE)) {
  5.3065 -        
  5.3066 -        /*
  5.3067 -          The offset to the start of the mmapped region is stored
  5.3068 -          in the prev_size field of the chunk. This allows us to adjust
  5.3069 -          returned start address to meet alignment requirements here 
  5.3070 -          and in memalign(), and still be able to compute proper
  5.3071 -          address argument for later munmap in free() and realloc().
  5.3072 -        */
  5.3073 -        
  5.3074 -        front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
  5.3075 -        if (front_misalign > 0) {
  5.3076 -          correction = MALLOC_ALIGNMENT - front_misalign;
  5.3077 -          p = (mchunkptr)(mm + correction);
  5.3078 -          p->prev_size = correction;
  5.3079 -          set_head(p, (size - correction) |IS_MMAPPED);
  5.3080 -        }
  5.3081 -        else {
  5.3082 -          p = (mchunkptr)mm;
  5.3083 -          p->prev_size = 0;
  5.3084 -          set_head(p, size|IS_MMAPPED);
  5.3085 -        }
  5.3086 -        
  5.3087 -        /* update statistics */
  5.3088 -        
  5.3089 -        if (++av->n_mmaps > av->max_n_mmaps) 
  5.3090 -          av->max_n_mmaps = av->n_mmaps;
  5.3091 -        
  5.3092 -        sum = av->mmapped_mem += size;
  5.3093 -        if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem)) 
  5.3094 -          av->max_mmapped_mem = sum;
  5.3095 -        sum += av->sbrked_mem;
  5.3096 -        if (sum > (CHUNK_SIZE_T)(av->max_total_mem)) 
  5.3097 -          av->max_total_mem = sum;
  5.3098 -
  5.3099 -        check_chunk(p);
  5.3100 -        
  5.3101 -        return chunk2mem(p);
  5.3102 -      }
  5.3103 -    }
  5.3104 -  }
  5.3105 -#endif
  5.3106 -
  5.3107 -  /* Record incoming configuration of top */
  5.3108 -
  5.3109 -  old_top  = av->top;
  5.3110 -  old_size = chunksize(old_top);
  5.3111 -  old_end  = (char*)(chunk_at_offset(old_top, old_size));
  5.3112 -
  5.3113 -  brk = snd_brk = (char*)(MORECORE_FAILURE); 
  5.3114 -
  5.3115 -  /* 
  5.3116 -     If not the first time through, we require old_size to be
  5.3117 -     at least MINSIZE and to have prev_inuse set.
  5.3118 -  */
  5.3119 -
  5.3120 -  assert((old_top == initial_top(av) && old_size == 0) || 
  5.3121 -         ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
  5.3122 -          prev_inuse(old_top)));
  5.3123 -
  5.3124 -  /* Precondition: not enough current space to satisfy nb request */
  5.3125 -  assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
  5.3126 -
  5.3127 -  /* Precondition: all fastbins are consolidated */
  5.3128 -  assert(!have_fastchunks(av));
  5.3129 -
  5.3130 -
  5.3131 -  /* Request enough space for nb + pad + overhead */
  5.3132 -
  5.3133 -  size = nb + av->top_pad + MINSIZE;
  5.3134 -
  5.3135 -  /*
  5.3136 -    If contiguous, we can subtract out existing space that we hope to
  5.3137 -    combine with new space. We add it back later only if
  5.3138 -    we don't actually get contiguous space.
  5.3139 -  */
  5.3140 -
  5.3141 -  if (contiguous(av))
  5.3142 -    size -= old_size;
  5.3143 -
  5.3144 -  /*
  5.3145 -    Round to a multiple of page size.
  5.3146 -    If MORECORE is not contiguous, this ensures that we only call it
  5.3147 -    with whole-page arguments.  And if MORECORE is contiguous and
  5.3148 -    this is not first time through, this preserves page-alignment of
  5.3149 -    previous calls. Otherwise, we correct to page-align below.
  5.3150 -  */
  5.3151 -
  5.3152 -  size = (size + pagemask) & ~pagemask;
  5.3153 -
  5.3154 -  /*
  5.3155 -    Don't try to call MORECORE if argument is so big as to appear
  5.3156 -    negative. Note that since mmap takes size_t arg, it may succeed
  5.3157 -    below even if we cannot call MORECORE.
  5.3158 -  */
  5.3159 -
  5.3160 -  if (size > 0) 
  5.3161 -    brk = (char*)(MORECORE(size));
  5.3162 -
  5.3163 -  /*
  5.3164 -    If have mmap, try using it as a backup when MORECORE fails or
  5.3165 -    cannot be used. This is worth doing on systems that have "holes" in
  5.3166 -    address space, so sbrk cannot extend to give contiguous space, but
  5.3167 -    space is available elsewhere.  Note that we ignore mmap max count
  5.3168 -    and threshold limits, since the space will not be used as a
  5.3169 -    segregated mmap region.
  5.3170 -  */
  5.3171 -
  5.3172 -#if HAVE_MMAP
  5.3173 -  if (brk == (char*)(MORECORE_FAILURE)) {
  5.3174 -
  5.3175 -    /* Cannot merge with old top, so add its size back in */
  5.3176 -    if (contiguous(av))
  5.3177 -      size = (size + old_size + pagemask) & ~pagemask;
  5.3178 -
  5.3179 -    /* If we are relying on mmap as backup, then use larger units */
  5.3180 -    if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
  5.3181 -      size = MMAP_AS_MORECORE_SIZE;
  5.3182 -
  5.3183 -    /* Don't try if size wraps around 0 */
  5.3184 -    if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
  5.3185 -
  5.3186 -      brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
  5.3187 -      
  5.3188 -      if (brk != (char*)(MORECORE_FAILURE)) {
  5.3189 -        
  5.3190 -        /* We do not need, and cannot use, another sbrk call to find end */
  5.3191 -        snd_brk = brk + size;
  5.3192 -        
  5.3193 -        /* 
  5.3194 -           Record that we no longer have a contiguous sbrk region. 
  5.3195 -           After the first time mmap is used as backup, we do not
  5.3196 -           ever rely on contiguous space since this could incorrectly
  5.3197 -           bridge regions.
  5.3198 -        */
  5.3199 -        set_noncontiguous(av);
  5.3200 -      }
  5.3201 -    }
  5.3202 -  }
  5.3203 -#endif
  5.3204 -
  5.3205 -  if (brk != (char*)(MORECORE_FAILURE)) {
  5.3206 -    av->sbrked_mem += size;
  5.3207 -
  5.3208 -    /*
  5.3209 -      If MORECORE extends previous space, we can likewise extend top size.
  5.3210 -    */
  5.3211 -    
  5.3212 -    if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
  5.3213 -      set_head(old_top, (size + old_size) | PREV_INUSE);
  5.3214 -    }
  5.3215 -
  5.3216 -    /*
  5.3217 -      Otherwise, make adjustments:
  5.3218 -      
  5.3219 -      * If the first time through or noncontiguous, we need to call sbrk
  5.3220 -        just to find out where the end of memory lies.
  5.3221 -
  5.3222 -      * We need to ensure that all returned chunks from malloc will meet
  5.3223 -        MALLOC_ALIGNMENT
  5.3224 -
  5.3225 -      * If there was an intervening foreign sbrk, we need to adjust sbrk
  5.3226 -        request size to account for fact that we will not be able to
  5.3227 -        combine new space with existing space in old_top.
  5.3228 -
  5.3229 -      * Almost all systems internally allocate whole pages at a time, in
  5.3230 -        which case we might as well use the whole last page of request.
  5.3231 -        So we allocate enough more memory to hit a page boundary now,
  5.3232 -        which in turn causes future contiguous calls to page-align.
  5.3233 -    */
  5.3234 -    
  5.3235 -    else {
  5.3236 -      front_misalign = 0;
  5.3237 -      end_misalign = 0;
  5.3238 -      correction = 0;
  5.3239 -      aligned_brk = brk;
  5.3240 -
  5.3241 -      /*
  5.3242 -        If MORECORE returns an address lower than we have seen before,
  5.3243 -        we know it isn't really contiguous.  This and some subsequent
  5.3244 -        checks help cope with non-conforming MORECORE functions and
  5.3245 -        the presence of "foreign" calls to MORECORE from outside of
  5.3246 -        malloc or by other threads.  We cannot guarantee to detect
  5.3247 -        these in all cases, but cope with the ones we do detect.
  5.3248 -      */
  5.3249 -      if (contiguous(av) && old_size != 0 && brk < old_end) {
  5.3250 -        set_noncontiguous(av);
  5.3251 -      }
  5.3252 -      
  5.3253 -      /* handle contiguous cases */
  5.3254 -      if (contiguous(av)) { 
  5.3255 -
  5.3256 -        /* 
  5.3257 -           We can tolerate forward non-contiguities here (usually due
  5.3258 -           to foreign calls) but treat them as part of our space for
  5.3259 -           stats reporting.
  5.3260 -        */
  5.3261 -        if (old_size != 0) 
  5.3262 -          av->sbrked_mem += brk - old_end;
  5.3263 -        
  5.3264 -        /* Guarantee alignment of first new chunk made from this space */
  5.3265 -
  5.3266 -        front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
  5.3267 -        if (front_misalign > 0) {
  5.3268 -
  5.3269 -          /*
  5.3270 -            Skip over some bytes to arrive at an aligned position.
  5.3271 -            We don't need to specially mark these wasted front bytes.
  5.3272 -            They will never be accessed anyway because
  5.3273 -            prev_inuse of av->top (and any chunk created from its start)
  5.3274 -            is always true after initialization.
  5.3275 -          */
  5.3276 -
  5.3277 -          correction = MALLOC_ALIGNMENT - front_misalign;
  5.3278 -          aligned_brk += correction;
  5.3279 -        }
  5.3280 -        
  5.3281 -        /*
  5.3282 -          If this isn't adjacent to existing space, then we will not
  5.3283 -          be able to merge with old_top space, so must add to 2nd request.
  5.3284 -        */
  5.3285 -        
  5.3286 -        correction += old_size;
  5.3287 -        
  5.3288 -        /* Extend the end address to hit a page boundary */
  5.3289 -        end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
  5.3290 -        correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
  5.3291 -        
  5.3292 -        assert(correction >= 0);
  5.3293 -        snd_brk = (char*)(MORECORE(correction));
  5.3294 -        
  5.3295 -        if (snd_brk == (char*)(MORECORE_FAILURE)) {
  5.3296 -          /*
  5.3297 -            If can't allocate correction, try to at least find out current
  5.3298 -            brk.  It might be enough to proceed without failing.
  5.3299 -          */
  5.3300 -          correction = 0;
  5.3301 -          snd_brk = (char*)(MORECORE(0));
  5.3302 -        }
  5.3303 -        else if (snd_brk < brk) {
  5.3304 -          /*
  5.3305 -            If the second call gives noncontiguous space even though
  5.3306 -            it says it won't, the only course of action is to ignore
  5.3307 -            results of second call, and conservatively estimate where
  5.3308 -            the first call left us. Also set noncontiguous, so this
  5.3309 -            won't happen again, leaving at most one hole.
  5.3310 -            
  5.3311 -            Note that this check is intrinsically incomplete.  Because
  5.3312 -            MORECORE is allowed to give more space than we ask for,
  5.3313 -            there is no reliable way to detect a noncontiguity
  5.3314 -            producing a forward gap for the second call.
  5.3315 -          */
  5.3316 -          snd_brk = brk + size;
  5.3317 -          correction = 0;
  5.3318 -          set_noncontiguous(av);
  5.3319 -        }
  5.3320 -
  5.3321 -      }
  5.3322 -      
  5.3323 -      /* handle non-contiguous cases */
  5.3324 -      else { 
  5.3325 -        /* MORECORE/mmap must correctly align */
  5.3326 -        assert(aligned_OK(chunk2mem(brk)));
  5.3327 -        
  5.3328 -        /* Find out current end of memory */
  5.3329 -        if (snd_brk == (char*)(MORECORE_FAILURE)) {
  5.3330 -          snd_brk = (char*)(MORECORE(0));
  5.3331 -          av->sbrked_mem += snd_brk - brk - size;
  5.3332 -        }
  5.3333 -      }
  5.3334 -      
  5.3335 -      /* Adjust top based on results of second sbrk */
  5.3336 -      if (snd_brk != (char*)(MORECORE_FAILURE)) {
  5.3337 -        av->top = (mchunkptr)aligned_brk;
  5.3338 -        set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
  5.3339 -        av->sbrked_mem += correction;
  5.3340 -     
  5.3341 -        /*
  5.3342 -          If not the first time through, we either have a
  5.3343 -          gap due to foreign sbrk or a non-contiguous region.  Insert a
  5.3344 -          double fencepost at old_top to prevent consolidation with space
  5.3345 -          we don't own. These fenceposts are artificial chunks that are
  5.3346 -          marked as inuse and are in any case too small to use.  We need
  5.3347 -          two to make sizes and alignments work out.
  5.3348 -        */
  5.3349 -   
  5.3350 -        if (old_size != 0) {
  5.3351 -          /* 
  5.3352 -             Shrink old_top to insert fenceposts, keeping size a
  5.3353 -             multiple of MALLOC_ALIGNMENT. We know there is at least
  5.3354 -             enough space in old_top to do this.
  5.3355 -          */
  5.3356 -          old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
  5.3357 -          set_head(old_top, old_size | PREV_INUSE);
  5.3358 -          
  5.3359 -          /*
  5.3360 -            Note that the following assignments completely overwrite
  5.3361 -            old_top when old_size was previously MINSIZE.  This is
  5.3362 -            intentional. We need the fencepost, even if old_top otherwise gets
  5.3363 -            lost.
  5.3364 -          */
  5.3365 -          chunk_at_offset(old_top, old_size          )->size =
  5.3366 -            SIZE_SZ|PREV_INUSE;
  5.3367 -
  5.3368 -          chunk_at_offset(old_top, old_size + SIZE_SZ)->size =
  5.3369 -            SIZE_SZ|PREV_INUSE;
  5.3370 -
  5.3371 -          /* 
  5.3372 -             If possible, release the rest, suppressing trimming.
  5.3373 -          */
  5.3374 -          if (old_size >= MINSIZE) {
  5.3375 -            INTERNAL_SIZE_T tt = av->trim_threshold;
  5.3376 -            av->trim_threshold = (INTERNAL_SIZE_T)(-1);
  5.3377 -            fREe(chunk2mem(old_top));
  5.3378 -            av->trim_threshold = tt;
  5.3379 -          }
  5.3380 -        }
  5.3381 -      }
  5.3382 -    }
  5.3383 -    
  5.3384 -    /* Update statistics */
  5.3385 -    sum = av->sbrked_mem;
  5.3386 -    if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
  5.3387 -      av->max_sbrked_mem = sum;
  5.3388 -    
  5.3389 -    sum += av->mmapped_mem;
  5.3390 -    if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
  5.3391 -      av->max_total_mem = sum;
  5.3392 -
  5.3393 -    check_malloc_state();
  5.3394 -    
  5.3395 -    /* finally, do the allocation */
  5.3396 -
  5.3397 -    p = av->top;
  5.3398 -    size = chunksize(p);
  5.3399 -    
  5.3400 -    /* check that one of the above allocation paths succeeded */
  5.3401 -    if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
  5.3402 -      remainder_size = size - nb;
  5.3403 -      remainder = chunk_at_offset(p, nb);
  5.3404 -      av->top = remainder;
  5.3405 -      set_head(p, nb | PREV_INUSE);
  5.3406 -      set_head(remainder, remainder_size | PREV_INUSE);
  5.3407 -      check_malloced_chunk(p, nb);
  5.3408 -      return chunk2mem(p);
  5.3409 -    }
  5.3410 -
  5.3411 -  }
  5.3412 -
  5.3413 -  /* catch all failure paths */
  5.3414 -  MALLOC_FAILURE_ACTION;
  5.3415 -  return 0;
  5.3416 -}
  5.3417 -
  5.3418 -
  5.3419 -
  5.3420 -
  5.3421 -#ifndef MORECORE_CANNOT_TRIM
  5.3422 -/*
  5.3423 -  sYSTRIm is an inverse of sorts to sYSMALLOc.  It gives memory back
  5.3424 -  to the system (via negative arguments to sbrk) if there is unused
  5.3425 -  memory at the `high' end of the malloc pool. It is called
  5.3426 -  automatically by free() when top space exceeds the trim
  5.3427 -  threshold. It is also called by the public malloc_trim routine.  It
  5.3428 -  returns 1 if it actually released any memory, else 0.
  5.3429 -*/
  5.3430 -
  5.3431 -#if __STD_C
  5.3432 -static int sYSTRIm(size_t pad, mstate av)
  5.3433 -#else
  5.3434 -static int sYSTRIm(pad, av) size_t pad; mstate av;
  5.3435 -#endif
  5.3436 -{
  5.3437 -  long  top_size;        /* Amount of top-most memory */
  5.3438 -  long  extra;           /* Amount to release */
  5.3439 -  long  released;        /* Amount actually released */
  5.3440 -  char* current_brk;     /* address returned by pre-check sbrk call */
  5.3441 -  char* new_brk;         /* address returned by post-check sbrk call */
  5.3442 -  size_t pagesz;
  5.3443 -
  5.3444 -  pagesz = av->pagesize;
  5.3445 -  top_size = chunksize(av->top);
  5.3446 -  
  5.3447 -  /* Release in pagesize units, keeping at least one page */
  5.3448 -  extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
  5.3449 -  
  5.3450 -  if (extra > 0) {
  5.3451 -    
  5.3452 -    /*
  5.3453 -      Only proceed if end of memory is where we last set it.
  5.3454 -      This avoids problems if there were foreign sbrk calls.
  5.3455 -    */
  5.3456 -    current_brk = (char*)(MORECORE(0));
  5.3457 -    if (current_brk == (char*)(av->top) + top_size) {
  5.3458 -      
  5.3459 -      /*
  5.3460 -        Attempt to release memory. We ignore MORECORE return value,
  5.3461 -        and instead call again to find out where new end of memory is.
  5.3462 -        This avoids problems if first call releases less than we asked,
  5.3463 -        of if failure somehow altered brk value. (We could still
  5.3464 -        encounter problems if it altered brk in some very bad way,
  5.3465 -        but the only thing we can do is adjust anyway, which will cause
  5.3466 -        some downstream failure.)
  5.3467 -      */
  5.3468 -      
  5.3469 -      MORECORE(-extra);
  5.3470 -      new_brk = (char*)(MORECORE(0));
  5.3471 -      
  5.3472 -      if (new_brk != (char*)MORECORE_FAILURE) {
  5.3473 -        released = (long)(current_brk - new_brk);
  5.3474 -        
  5.3475 -        if (released != 0) {
  5.3476 -          /* Success. Adjust top. */
  5.3477 -          av->sbrked_mem -= released;
  5.3478 -          set_head(av->top, (top_size - released) | PREV_INUSE);
  5.3479 -          check_malloc_state();
  5.3480 -          return 1;
  5.3481 -        }
  5.3482 -      }
  5.3483 -    }
  5.3484 -  }
  5.3485 -  return 0;
  5.3486 -}
  5.3487 -#endif
  5.3488 -
  5.3489 -/*
  5.3490 -  ------------------------------ malloc ------------------------------
  5.3491 -*/
  5.3492 -
  5.3493 -
  5.3494 -#if __STD_C
  5.3495 -Void_t* mALLOc(size_t bytes)
  5.3496 -#else
  5.3497 -  Void_t* mALLOc(bytes) size_t bytes;
  5.3498 -#endif
  5.3499 -{
  5.3500 -  mstate av = get_malloc_state();
  5.3501 -
  5.3502 -  INTERNAL_SIZE_T nb;               /* normalized request size */
  5.3503 -  unsigned int    idx;              /* associated bin index */
  5.3504 -  mbinptr         bin;              /* associated bin */
  5.3505 -  mfastbinptr*    fb;               /* associated fastbin */
  5.3506 -
  5.3507 -  mchunkptr       victim;           /* inspected/selected chunk */
  5.3508 -  INTERNAL_SIZE_T size;             /* its size */
  5.3509 -  int             victim_index;     /* its bin index */
  5.3510 -
  5.3511 -  mchunkptr       remainder;        /* remainder from a split */
  5.3512 -  CHUNK_SIZE_T    remainder_size;   /* its size */
  5.3513 -
  5.3514 -  unsigned int    block;            /* bit map traverser */
  5.3515 -  unsigned int    bit;              /* bit map traverser */
  5.3516 -  unsigned int    map;              /* current word of binmap */
  5.3517 -
  5.3518 -  mchunkptr       fwd;              /* misc temp for linking */
  5.3519 -  mchunkptr       bck;              /* misc temp for linking */
  5.3520 -
  5.3521 -  /*
  5.3522 -    Convert request size to internal form by adding SIZE_SZ bytes
  5.3523 -    overhead plus possibly more to obtain necessary alignment and/or
  5.3524 -    to obtain a size of at least MINSIZE, the smallest allocatable
  5.3525 -    size. Also, checked_request2size traps (returning 0) request sizes
  5.3526 -    that are so large that they wrap around zero when padded and
  5.3527 -    aligned.
  5.3528 -  */
  5.3529 -
  5.3530 -  checked_request2size(bytes, nb);
  5.3531 -
  5.3532 -  /*
  5.3533 -    Bypass search if no frees yet
  5.3534 -   */
  5.3535 -  if (!have_anychunks(av)) {
  5.3536 -    if (av->max_fast == 0) /* initialization check */
  5.3537 -      malloc_consolidate(av);
  5.3538 -    goto use_top;
  5.3539 -  }
  5.3540 -
  5.3541 -  /*
  5.3542 -    If the size qualifies as a fastbin, first check corresponding bin.
  5.3543 -  */
  5.3544 -
  5.3545 -  if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) { 
  5.3546 -    fb = &(av->fastbins[(fastbin_index(nb))]);
  5.3547 -    if ( (victim = *fb) != 0) {
  5.3548 -      *fb = victim->fd;
  5.3549 -      check_remalloced_chunk(victim, nb);
  5.3550 -      return chunk2mem(victim);
  5.3551 -    }
  5.3552 -  }
  5.3553 -
  5.3554 -  /*
  5.3555 -    If a small request, check regular bin.  Since these "smallbins"
  5.3556 -    hold one size each, no searching within bins is necessary.
  5.3557 -    (For a large request, we need to wait until unsorted chunks are
  5.3558 -    processed to find best fit. But for small ones, fits are exact
  5.3559 -    anyway, so we can check now, which is faster.)
  5.3560 -  */
  5.3561 -
  5.3562 -  if (in_smallbin_range(nb)) {
  5.3563 -    idx = smallbin_index(nb);
  5.3564 -    bin = bin_at(av,idx);
  5.3565 -
  5.3566 -    if ( (victim = last(bin)) != bin) {
  5.3567 -      bck = victim->bk;
  5.3568 -      set_inuse_bit_at_offset(victim, nb);
  5.3569 -      bin->bk = bck;
  5.3570 -      bck->fd = bin;
  5.3571 -      
  5.3572 -      check_malloced_chunk(victim, nb);
  5.3573 -      return chunk2mem(victim);
  5.3574 -    }
  5.3575 -  }
  5.3576 -
  5.3577 -  /* 
  5.3578 -     If this is a large request, consolidate fastbins before continuing.
  5.3579 -     While it might look excessive to kill all fastbins before
  5.3580 -     even seeing if there is space available, this avoids
  5.3581 -     fragmentation problems normally associated with fastbins.
  5.3582 -     Also, in practice, programs tend to have runs of either small or
  5.3583 -     large requests, but less often mixtures, so consolidation is not 
  5.3584 -     invoked all that often in most programs. And the programs that
  5.3585 -     it is called frequently in otherwise tend to fragment.
  5.3586 -  */
  5.3587 -
  5.3588 -  else {
  5.3589 -    idx = largebin_index(nb);
  5.3590 -    if (have_fastchunks(av)) 
  5.3591 -      malloc_consolidate(av);
  5.3592 -  }
  5.3593 -
  5.3594 -  /*
  5.3595 -    Process recently freed or remaindered chunks, taking one only if
  5.3596 -    it is exact fit, or, if this a small request, the chunk is remainder from
  5.3597 -    the most recent non-exact fit.  Place other traversed chunks in
  5.3598 -    bins.  Note that this step is the only place in any routine where
  5.3599 -    chunks are placed in bins.
  5.3600 -  */
  5.3601 -    
  5.3602 -  while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
  5.3603 -    bck = victim->bk;
  5.3604 -    size = chunksize(victim);
  5.3605 -    
  5.3606 -    /* 
  5.3607 -       If a small request, try to use last remainder if it is the
  5.3608 -       only chunk in unsorted bin.  This helps promote locality for
  5.3609 -       runs of consecutive small requests. This is the only
  5.3610 -       exception to best-fit, and applies only when there is
  5.3611 -       no exact fit for a small chunk.
  5.3612 -    */
  5.3613 -    
  5.3614 -    if (in_smallbin_range(nb) && 
  5.3615 -        bck == unsorted_chunks(av) &&
  5.3616 -        victim == av->last_remainder &&
  5.3617 -        (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
  5.3618 -      
  5.3619 -      /* split and reattach remainder */
  5.3620 -      remainder_size = size - nb;
  5.3621 -      remainder = chunk_at_offset(victim, nb);
  5.3622 -      unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
  5.3623 -      av->last_remainder = remainder; 
  5.3624 -      remainder->bk = remainder->fd = unsorted_chunks(av);
  5.3625 -      
  5.3626 -      set_head(victim, nb | PREV_INUSE);
  5.3627 -      set_head(remainder, remainder_size | PREV_INUSE);
  5.3628 -      set_foot(remainder, remainder_size);
  5.3629 -      
  5.3630 -      check_malloced_chunk(victim, nb);
  5.3631 -      return chunk2mem(victim);
  5.3632 -    }
  5.3633 -    
  5.3634 -    /* remove from unsorted list */
  5.3635 -    unsorted_chunks(av)->bk = bck;
  5.3636 -    bck->fd = unsorted_chunks(av);
  5.3637 -    
  5.3638 -    /* Take now instead of binning if exact fit */
  5.3639 -    
  5.3640 -    if (size == nb) {
  5.3641 -      set_inuse_bit_at_offset(victim, size);
  5.3642 -      check_malloced_chunk(victim, nb);
  5.3643 -      return chunk2mem(victim);
  5.3644 -    }
  5.3645 -    
  5.3646 -    /* place chunk in bin */
  5.3647 -    
  5.3648 -    if (in_smallbin_range(size)) {
  5.3649 -      victim_index = smallbin_index(size);
  5.3650 -      bck = bin_at(av, victim_index);
  5.3651 -      fwd = bck->fd;
  5.3652 -    }
  5.3653 -    else {
  5.3654 -      victim_index = largebin_index(size);
  5.3655 -      bck = bin_at(av, victim_index);
  5.3656 -      fwd = bck->fd;
  5.3657 -      
  5.3658 -      if (fwd != bck) {
  5.3659 -        /* if smaller than smallest, place first */
  5.3660 -        if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
  5.3661 -          fwd = bck;
  5.3662 -          bck = bck->bk;
  5.3663 -        }
  5.3664 -        else if ((CHUNK_SIZE_T)(size) >= 
  5.3665 -                 (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
  5.3666 -          
  5.3667 -          /* maintain large bins in sorted order */
  5.3668 -          size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
  5.3669 -          while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size)) 
  5.3670 -            fwd = fwd->fd;
  5.3671 -          bck = fwd->bk;
  5.3672 -        }
  5.3673 -      }
  5.3674 -    }
  5.3675 -      
  5.3676 -    mark_bin(av, victim_index);
  5.3677 -    victim->bk = bck;
  5.3678 -    victim->fd = fwd;
  5.3679 -    fwd->bk = victim;
  5.3680 -    bck->fd = victim;
  5.3681 -  }
  5.3682 -  
  5.3683 -  /*
  5.3684 -    If a large request, scan through the chunks of current bin to
  5.3685 -    find one that fits.  (This will be the smallest that fits unless
  5.3686 -    FIRST_SORTED_BIN_SIZE has been changed from default.)  This is
  5.3687 -    the only step where an unbounded number of chunks might be
  5.3688 -    scanned without doing anything useful with them. However the
  5.3689 -    lists tend to be short.
  5.3690 -  */
  5.3691 -  
  5.3692 -  if (!in_smallbin_range(nb)) {
  5.3693 -    bin = bin_at(av, idx);
  5.3694 -    
  5.3695 -    for (victim = last(bin); victim != bin; victim = victim->bk) {
  5.3696 -      size = chunksize(victim);
  5.3697 -      
  5.3698 -      if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
  5.3699 -        remainder_size = size - nb;
  5.3700 -        unlink(victim, bck, fwd);
  5.3701 -        
  5.3702 -        /* Exhaust */
  5.3703 -        if (remainder_size < MINSIZE)  {
  5.3704 -          set_inuse_bit_at_offset(victim, size);
  5.3705 -          check_malloced_chunk(victim, nb);
  5.3706 -          return chunk2mem(victim);
  5.3707 -        }
  5.3708 -        /* Split */
  5.3709 -        else {
  5.3710 -          remainder = chunk_at_offset(victim, nb);
  5.3711 -          unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
  5.3712 -          remainder->bk = remainder->fd = unsorted_chunks(av);
  5.3713 -          set_head(victim, nb | PREV_INUSE);
  5.3714 -          set_head(remainder, remainder_size | PREV_INUSE);
  5.3715 -          set_foot(remainder, remainder_size);
  5.3716 -          check_malloced_chunk(victim, nb);
  5.3717 -          return chunk2mem(victim);
  5.3718 -        } 
  5.3719 -      }
  5.3720 -    }    
  5.3721 -  }
  5.3722 -
  5.3723 -  /*
  5.3724 -    Search for a chunk by scanning bins, starting with next largest
  5.3725 -    bin. This search is strictly by best-fit; i.e., the smallest
  5.3726 -    (with ties going to approximately the least recently used) chunk
  5.3727 -    that fits is selected.
  5.3728 -    
  5.3729 -    The bitmap avoids needing to check that most blocks are nonempty.
  5.3730 -  */
  5.3731 -    
  5.3732 -  ++idx;
  5.3733 -  bin = bin_at(av,idx);
  5.3734 -  block = idx2block(idx);
  5.3735 -  map = av->binmap[block];
  5.3736 -  bit = idx2bit(idx);
  5.3737 -  
  5.3738 -  for (;;) {
  5.3739 -    
  5.3740 -    /* Skip rest of block if there are no more set bits in this block.  */
  5.3741 -    if (bit > map || bit == 0) {
  5.3742 -      do {
  5.3743 -        if (++block >= BINMAPSIZE)  /* out of bins */
  5.3744 -          goto use_top;
  5.3745 -      } while ( (map = av->binmap[block]) == 0);
  5.3746 -      
  5.3747 -      bin = bin_at(av, (block << BINMAPSHIFT));
  5.3748 -      bit = 1;
  5.3749 -    }
  5.3750 -    
  5.3751 -    /* Advance to bin with set bit. There must be one. */
  5.3752 -    while ((bit & map) == 0) {
  5.3753 -      bin = next_bin(bin);
  5.3754 -      bit <<= 1;
  5.3755 -      assert(bit != 0);
  5.3756 -    }
  5.3757 -    
  5.3758 -    /* Inspect the bin. It is likely to be non-empty */
  5.3759 -    victim = last(bin);
  5.3760 -    
  5.3761 -    /*  If a false alarm (empty bin), clear the bit. */
  5.3762 -    if (victim == bin) {
  5.3763 -      av->binmap[block] = map &= ~bit; /* Write through */
  5.3764 -      bin = next_bin(bin);
  5.3765 -      bit <<= 1;
  5.3766 -    }
  5.3767 -    
  5.3768 -    else {
  5.3769 -      size = chunksize(victim);
  5.3770 -      
  5.3771 -      /*  We know the first chunk in this bin is big enough to use. */
  5.3772 -      assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
  5.3773 -      
  5.3774 -      remainder_size = size - nb;
  5.3775 -      
  5.3776 -      /* unlink */
  5.3777 -      bck = victim->bk;
  5.3778 -      bin->bk = bck;
  5.3779 -      bck->fd = bin;
  5.3780 -      
  5.3781 -      /* Exhaust */
  5.3782 -      if (remainder_size < MINSIZE) {
  5.3783 -        set_inuse_bit_at_offset(victim, size);
  5.3784 -        check_malloced_chunk(victim, nb);
  5.3785 -        return chunk2mem(victim);
  5.3786 -      }
  5.3787 -      
  5.3788 -      /* Split */
  5.3789 -      else {
  5.3790 -        remainder = chunk_at_offset(victim, nb);
  5.3791 -        
  5.3792 -        unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
  5.3793 -        remainder->bk = remainder->fd = unsorted_chunks(av);
  5.3794 -        /* advertise as last remainder */
  5.3795 -        if (in_smallbin_range(nb)) 
  5.3796 -          av->last_remainder = remainder; 
  5.3797 -        
  5.3798 -        set_head(victim, nb | PREV_INUSE);
  5.3799 -        set_head(remainder, remainder_size | PREV_INUSE);
  5.3800 -        set_foot(remainder, remainder_size);
  5.3801 -        check_malloced_chunk(victim, nb);
  5.3802 -        return chunk2mem(victim);
  5.3803 -      }
  5.3804 -    }
  5.3805 -  }
  5.3806 -
  5.3807 -  use_top:    
  5.3808 -  /*
  5.3809 -    If large enough, split off the chunk bordering the end of memory
  5.3810 -    (held in av->top). Note that this is in accord with the best-fit
  5.3811 -    search rule.  In effect, av->top is treated as larger (and thus
  5.3812 -    less well fitting) than any other available chunk since it can
  5.3813 -    be extended to be as large as necessary (up to system
  5.3814 -    limitations).
  5.3815 -    
  5.3816 -    We require that av->top always exists (i.e., has size >=
  5.3817 -    MINSIZE) after initialization, so if it would otherwise be
  5.3818 -    exhuasted by current request, it is replenished. (The main
  5.3819 -    reason for ensuring it exists is that we may need MINSIZE space
  5.3820 -    to put in fenceposts in sysmalloc.)
  5.3821 -  */
  5.3822 -  
  5.3823 -  victim = av->top;
  5.3824 -  size = chunksize(victim);
  5.3825 -  
  5.3826 -  if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
  5.3827 -    remainder_size = size - nb;
  5.3828 -    remainder = chunk_at_offset(victim, nb);
  5.3829 -    av->top = remainder;
  5.3830 -    set_head(victim, nb | PREV_INUSE);
  5.3831 -    set_head(remainder, remainder_size | PREV_INUSE);
  5.3832 -    
  5.3833 -    check_malloced_chunk(victim, nb);
  5.3834 -    return chunk2mem(victim);
  5.3835 -  }
  5.3836 -  
  5.3837 -  /* 
  5.3838 -     If no space in top, relay to handle system-dependent cases 
  5.3839 -  */
  5.3840 -  return sYSMALLOc(nb, av);    
  5.3841 -}
  5.3842 -
  5.3843 -/*
  5.3844 -  ------------------------------ free ------------------------------
  5.3845 -*/
  5.3846 -
  5.3847 -#if __STD_C
  5.3848 -void fREe(Void_t* mem)
  5.3849 -#else
  5.3850 -void fREe(mem) Void_t* mem;
  5.3851 -#endif
  5.3852 -{
  5.3853 -  mstate av = get_malloc_state();
  5.3854 -
  5.3855 -  mchunkptr       p;           /* chunk corresponding to mem */
  5.3856 -  INTERNAL_SIZE_T size;        /* its size */
  5.3857 -  mfastbinptr*    fb;          /* associated fastbin */
  5.3858 -  mchunkptr       nextchunk;   /* next contiguous chunk */
  5.3859 -  INTERNAL_SIZE_T nextsize;    /* its size */
  5.3860 -  int             nextinuse;   /* true if nextchunk is used */
  5.3861 -  INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
  5.3862 -  mchunkptr       bck;         /* misc temp for linking */
  5.3863 -  mchunkptr       fwd;         /* misc temp for linking */
  5.3864 -
  5.3865 -  /* free(0) has no effect */
  5.3866 -  if (mem != 0) {
  5.3867 -    p = mem2chunk(mem);
  5.3868 -    size = chunksize(p);
  5.3869 -
  5.3870 -    check_inuse_chunk(p);
  5.3871 -
  5.3872 -    /*
  5.3873 -      If eligible, place chunk on a fastbin so it can be found
  5.3874 -      and used quickly in malloc.
  5.3875 -    */
  5.3876 -
  5.3877 -    if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
  5.3878 -
  5.3879 -#if TRIM_FASTBINS
  5.3880 -        /* 
  5.3881 -           If TRIM_FASTBINS set, don't place chunks
  5.3882 -           bordering top into fastbins
  5.3883 -        */
  5.3884 -        && (chunk_at_offset(p, size) != av->top)
  5.3885 -#endif
  5.3886 -        ) {
  5.3887 -
  5.3888 -      set_fastchunks(av);
  5.3889 -      fb = &(av->fastbins[fastbin_index(size)]);
  5.3890 -      p->fd = *fb;
  5.3891 -      *fb = p;
  5.3892 -    }
  5.3893 -
  5.3894 -    /*
  5.3895 -       Consolidate other non-mmapped chunks as they arrive.
  5.3896 -    */
  5.3897 -
  5.3898 -    else if (!chunk_is_mmapped(p)) {
  5.3899 -      set_anychunks(av);
  5.3900 -
  5.3901 -      nextchunk = chunk_at_offset(p, size);
  5.3902 -      nextsize = chunksize(nextchunk);
  5.3903 -
  5.3904 -      /* consolidate backward */
  5.3905 -      if (!prev_inuse(p)) {
  5.3906 -        prevsize = p->prev_size;
  5.3907 -        size += prevsize;
  5.3908 -        p = chunk_at_offset(p, -((long) prevsize));
  5.3909 -        unlink(p, bck, fwd);
  5.3910 -      }
  5.3911 -
  5.3912 -      if (nextchunk != av->top) {
  5.3913 -        /* get and clear inuse bit */
  5.3914 -        nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
  5.3915 -        set_head(nextchunk, nextsize);
  5.3916 -
  5.3917 -        /* consolidate forward */
  5.3918 -        if (!nextinuse) {
  5.3919 -          unlink(nextchunk, bck, fwd);
  5.3920 -          size += nextsize;
  5.3921 -        }
  5.3922 -
  5.3923 -        /*
  5.3924 -          Place the chunk in unsorted chunk list. Chunks are
  5.3925 -          not placed into regular bins until after they have
  5.3926 -          been given one chance to be used in malloc.
  5.3927 -        */
  5.3928 -
  5.3929 -        bck = unsorted_chunks(av);
  5.3930 -        fwd = bck->fd;
  5.3931 -        p->bk = bck;
  5.3932 -        p->fd = fwd;
  5.3933 -        bck->fd = p;
  5.3934 -        fwd->bk = p;
  5.3935 -
  5.3936 -        set_head(p, size | PREV_INUSE);
  5.3937 -        set_foot(p, size);
  5.3938 -        
  5.3939 -        check_free_chunk(p);
  5.3940 -      }
  5.3941 -
  5.3942 -      /*
  5.3943 -         If the chunk borders the current high end of memory,
  5.3944 -         consolidate into top
  5.3945 -      */
  5.3946 -
  5.3947 -      else {
  5.3948 -        size += nextsize;
  5.3949 -        set_head(p, size | PREV_INUSE);
  5.3950 -        av->top = p;
  5.3951 -        check_chunk(p);
  5.3952 -      }
  5.3953 -
  5.3954 -      /*
  5.3955 -        If freeing a large space, consolidate possibly-surrounding
  5.3956 -        chunks. Then, if the total unused topmost memory exceeds trim
  5.3957 -        threshold, ask malloc_trim to reduce top.
  5.3958 -
  5.3959 -        Unless max_fast is 0, we don't know if there are fastbins
  5.3960 -        bordering top, so we cannot tell for sure whether threshold
  5.3961 -        has been reached unless fastbins are consolidated.  But we
  5.3962 -        don't want to consolidate on each free.  As a compromise,
  5.3963 -        consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
  5.3964 -        is reached.
  5.3965 -      */
  5.3966 -
  5.3967 -      if ((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { 
  5.3968 -        if (have_fastchunks(av)) 
  5.3969 -          malloc_consolidate(av);
  5.3970 -
  5.3971 -#ifndef MORECORE_CANNOT_TRIM        
  5.3972 -        if ((CHUNK_SIZE_T)(chunksize(av->top)) >= 
  5.3973 -            (CHUNK_SIZE_T)(av->trim_threshold))
  5.3974 -          sYSTRIm(av->top_pad, av);
  5.3975 -#endif
  5.3976 -      }
  5.3977 -
  5.3978 -    }
  5.3979 -    /*
  5.3980 -      If the chunk was allocated via mmap, release via munmap()
  5.3981 -      Note that if HAVE_MMAP is false but chunk_is_mmapped is
  5.3982 -      true, then user must have overwritten memory. There's nothing
  5.3983 -      we can do to catch this error unless DEBUG is set, in which case
  5.3984 -      check_inuse_chunk (above) will have triggered error.
  5.3985 -    */
  5.3986 -
  5.3987 -    else {
  5.3988 -#if HAVE_MMAP
  5.3989 -      int ret;
  5.3990 -      INTERNAL_SIZE_T offset = p->prev_size;
  5.3991 -      av->n_mmaps--;
  5.3992 -      av->mmapped_mem -= (size + offset);
  5.3993 -      ret = munmap((char*)p - offset, size + offset);
  5.3994 -      /* munmap returns non-zero on failure */
  5.3995 -      assert(ret == 0);
  5.3996 -#endif
  5.3997 -    }
  5.3998 -  }
  5.3999 -}
  5.4000 -
  5.4001 -/*
  5.4002 -  ------------------------- malloc_consolidate -------------------------
  5.4003 -
  5.4004 -  malloc_consolidate is a specialized version of free() that tears
  5.4005 -  down chunks held in fastbins.  Free itself cannot be used for this
  5.4006 -  purpose since, among other things, it might place chunks back onto
  5.4007 -  fastbins.  So, instead, we need to use a minor variant of the same
  5.4008 -  code.
  5.4009 -  
  5.4010 -  Also, because this routine needs to be called the first time through
  5.4011 -  malloc anyway, it turns out to be the perfect place to trigger
  5.4012 -  initialization code.
  5.4013 -*/
  5.4014 -
  5.4015 -#if __STD_C
  5.4016 -static void malloc_consolidate(mstate av)
  5.4017 -#else
  5.4018 -static void malloc_consolidate(av) mstate av;
  5.4019 -#endif
  5.4020 -{
  5.4021 -  mfastbinptr*    fb;                 /* current fastbin being consolidated */
  5.4022 -  mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
  5.4023 -  mchunkptr       p;                  /* current chunk being consolidated */
  5.4024 -  mchunkptr       nextp;              /* next chunk to consolidate */
  5.4025 -  mchunkptr       unsorted_bin;       /* bin header */
  5.4026 -  mchunkptr       first_unsorted;     /* chunk to link to */
  5.4027 -
  5.4028 -  /* These have same use as in free() */
  5.4029 -  mchunkptr       nextchunk;
  5.4030 -  INTERNAL_SIZE_T size;
  5.4031 -  INTERNAL_SIZE_T nextsize;
  5.4032 -  INTERNAL_SIZE_T prevsize;
  5.4033 -  int             nextinuse;
  5.4034 -  mchunkptr       bck;
  5.4035 -  mchunkptr       fwd;
  5.4036 -
  5.4037 -  /*
  5.4038 -    If max_fast is 0, we know that av hasn't
  5.4039 -    yet been initialized, in which case do so below
  5.4040 -  */
  5.4041 -
  5.4042 -  if (av->max_fast != 0) {
  5.4043 -    clear_fastchunks(av);
  5.4044 -
  5.4045 -    unsorted_bin = unsorted_chunks(av);
  5.4046 -
  5.4047 -    /*
  5.4048 -      Remove each chunk from fast bin and consolidate it, placing it
  5.4049 -      then in unsorted bin. Among other reasons for doing this,
  5.4050 -      placing in unsorted bin avoids needing to calculate actual bins
  5.4051 -      until malloc is sure that chunks aren't immediately going to be
  5.4052 -      reused anyway.
  5.4053 -    */
  5.4054 -    
  5.4055 -    maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
  5.4056 -    fb = &(av->fastbins[0]);
  5.4057 -    do {
  5.4058 -      if ( (p = *fb) != 0) {
  5.4059 -        *fb = 0;
  5.4060 -        
  5.4061 -        do {
  5.4062 -          check_inuse_chunk(p);
  5.4063 -          nextp = p->fd;
  5.4064 -          
  5.4065 -          /* Slightly streamlined version of consolidation code in free() */
  5.4066 -          size = p->size & ~PREV_INUSE;
  5.4067 -          nextchunk = chunk_at_offset(p, size);
  5.4068 -          nextsize = chunksize(nextchunk);
  5.4069 -          
  5.4070 -          if (!prev_inuse(p)) {
  5.4071 -            prevsize = p->prev_size;
  5.4072 -            size += prevsize;
  5.4073 -            p = chunk_at_offset(p, -((long) prevsize));
  5.4074 -            unlink(p, bck, fwd);
  5.4075 -          }
  5.4076 -          
  5.4077 -          if (nextchunk != av->top) {
  5.4078 -            nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
  5.4079 -            set_head(nextchunk, nextsize);
  5.4080 -            
  5.4081 -            if (!nextinuse) {
  5.4082 -              size += nextsize;
  5.4083 -              unlink(nextchunk, bck, fwd);
  5.4084 -            }
  5.4085 -            
  5.4086 -            first_unsorted = unsorted_bin->fd;
  5.4087 -            unsorted_bin->fd = p;
  5.4088 -            first_unsorted->bk = p;
  5.4089 -            
  5.4090 -            set_head(p, size | PREV_INUSE);
  5.4091 -            p->bk = unsorted_bin;
  5.4092 -            p->fd = first_unsorted;
  5.4093 -            set_foot(p, size);
  5.4094 -          }
  5.4095 -          
  5.4096 -          else {
  5.4097 -            size += nextsize;
  5.4098 -            set_head(p, size | PREV_INUSE);
  5.4099 -            av->top = p;
  5.4100 -          }
  5.4101 -          
  5.4102 -        } while ( (p = nextp) != 0);
  5.4103 -        
  5.4104 -      }
  5.4105 -    } while (fb++ != maxfb);
  5.4106 -  }
  5.4107 -  else {
  5.4108 -    malloc_init_state(av);
  5.4109 -    check_malloc_state();
  5.4110 -  }
  5.4111 -}
  5.4112 -
  5.4113 -/*
  5.4114 -  ------------------------------ realloc ------------------------------
  5.4115 -*/
  5.4116 -
  5.4117 -
  5.4118 -#if __STD_C
  5.4119 -Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
  5.4120 -#else
  5.4121 -Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
  5.4122 -#endif
  5.4123 -{
  5.4124 -  mstate av = get_malloc_state();
  5.4125 -
  5.4126 -  INTERNAL_SIZE_T  nb;              /* padded request size */
  5.4127 -
  5.4128 -  mchunkptr        oldp;            /* chunk corresponding to oldmem */
  5.4129 -  INTERNAL_SIZE_T  oldsize;         /* its size */
  5.4130 -
  5.4131 -  mchunkptr        newp;            /* chunk to return */
  5.4132 -  INTERNAL_SIZE_T  newsize;         /* its size */
  5.4133 -  Void_t*          newmem;          /* corresponding user mem */
  5.4134 -
  5.4135 -  mchunkptr        next;            /* next contiguous chunk after oldp */
  5.4136 -
  5.4137 -  mchunkptr        remainder;       /* extra space at end of newp */
  5.4138 -  CHUNK_SIZE_T     remainder_size;  /* its size */
  5.4139 -
  5.4140 -  mchunkptr        bck;             /* misc temp for linking */
  5.4141 -  mchunkptr        fwd;             /* misc temp for linking */
  5.4142 -
  5.4143 -  CHUNK_SIZE_T     copysize;        /* bytes to copy */
  5.4144 -  unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
  5.4145 -  INTERNAL_SIZE_T* s;               /* copy source */ 
  5.4146 -  INTERNAL_SIZE_T* d;               /* copy destination */
  5.4147 -
  5.4148 -
  5.4149 -#ifdef REALLOC_ZERO_BYTES_FREES
  5.4150 -  if (bytes == 0) {
  5.4151 -    fREe(oldmem);
  5.4152 -    return 0;
  5.4153 -  }
  5.4154 -#endif
  5.4155 -
  5.4156 -  /* realloc of null is supposed to be same as malloc */
  5.4157 -  if (oldmem == 0) return mALLOc(bytes);
  5.4158 -
  5.4159 -  checked_request2size(bytes, nb);
  5.4160 -
  5.4161 -  oldp    = mem2chunk(oldmem);
  5.4162 -  oldsize = chunksize(oldp);
  5.4163 -
  5.4164 -  check_inuse_chunk(oldp);
  5.4165 -
  5.4166 -  if (!chunk_is_mmapped(oldp)) {
  5.4167 -
  5.4168 -    if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb)) {
  5.4169 -      /* already big enough; split below */
  5.4170 -      newp = oldp;
  5.4171 -      newsize = oldsize;
  5.4172 -    }
  5.4173 -
  5.4174 -    else {
  5.4175 -      next = chunk_at_offset(oldp, oldsize);
  5.4176 -
  5.4177 -      /* Try to expand forward into top */
  5.4178 -      if (next == av->top &&
  5.4179 -          (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
  5.4180 -          (CHUNK_SIZE_T)(nb + MINSIZE)) {
  5.4181 -        set_head_size(oldp, nb);
  5.4182 -        av->top = chunk_at_offset(oldp, nb);
  5.4183 -        set_head(av->top, (newsize - nb) | PREV_INUSE);
  5.4184 -        return chunk2mem(oldp);
  5.4185 -      }
  5.4186 -      
  5.4187 -      /* Try to expand forward into next chunk;  split off remainder below */
  5.4188 -      else if (next != av->top && 
  5.4189 -               !inuse(next) &&
  5.4190 -               (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
  5.4191 -               (CHUNK_SIZE_T)(nb)) {
  5.4192 -        newp = oldp;
  5.4193 -        unlink(next, bck, fwd);
  5.4194 -      }
  5.4195 -
  5.4196 -      /* allocate, copy, free */
  5.4197 -      else {
  5.4198 -        newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
  5.4199 -        if (newmem == 0)
  5.4200 -          return 0; /* propagate failure */
  5.4201 -      
  5.4202 -        newp = mem2chunk(newmem);
  5.4203 -        newsize = chunksize(newp);
  5.4204 -        
  5.4205 -        /*
  5.4206 -          Avoid copy if newp is next chunk after oldp.
  5.4207 -        */
  5.4208 -        if (newp == next) {
  5.4209 -          newsize += oldsize;
  5.4210 -          newp = oldp;
  5.4211 -        }
  5.4212 -        else {
  5.4213 -          /*
  5.4214 -            Unroll copy of <= 36 bytes (72 if 8byte sizes)
  5.4215 -            We know that contents have an odd number of
  5.4216 -            INTERNAL_SIZE_T-sized words; minimally 3.
  5.4217 -          */
  5.4218 -          
  5.4219 -          copysize = oldsize - SIZE_SZ;
  5.4220 -          s = (INTERNAL_SIZE_T*)(oldmem);
  5.4221 -          d = (INTERNAL_SIZE_T*)(newmem);
  5.4222 -          ncopies = copysize / sizeof(INTERNAL_SIZE_T);
  5.4223 -          assert(ncopies >= 3);
  5.4224 -          
  5.4225 -          if (ncopies > 9)
  5.4226 -            MALLOC_COPY(d, s, copysize);
  5.4227 -          
  5.4228 -          else {
  5.4229 -            *(d+0) = *(s+0);
  5.4230 -            *(d+1) = *(s+1);
  5.4231 -            *(d+2) = *(s+2);
  5.4232 -            if (ncopies > 4) {
  5.4233 -              *(d+3) = *(s+3);
  5.4234 -              *(d+4) = *(s+4);
  5.4235 -              if (ncopies > 6) {
  5.4236 -                *(d+5) = *(s+5);
  5.4237 -                *(d+6) = *(s+6);
  5.4238 -                if (ncopies > 8) {
  5.4239 -                  *(d+7) = *(s+7);
  5.4240 -                  *(d+8) = *(s+8);
  5.4241 -                }
  5.4242 -              }
  5.4243 -            }
  5.4244 -          }
  5.4245 -          
  5.4246 -          fREe(oldmem);
  5.4247 -          check_inuse_chunk(newp);
  5.4248 -          return chunk2mem(newp);
  5.4249 -        }
  5.4250 -      }
  5.4251 -    }
  5.4252 -
  5.4253 -    /* If possible, free extra space in old or extended chunk */
  5.4254 -
  5.4255 -    assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
  5.4256 -
  5.4257 -    remainder_size = newsize - nb;
  5.4258 -
  5.4259 -    if (remainder_size < MINSIZE) { /* not enough extra to split off */
  5.4260 -      set_head_size(newp, newsize);
  5.4261 -      set_inuse_bit_at_offset(newp, newsize);
  5.4262 -    }
  5.4263 -    else { /* split remainder */
  5.4264 -      remainder = chunk_at_offset(newp, nb);
  5.4265 -      set_head_size(newp, nb);
  5.4266 -      set_head(remainder, remainder_size | PREV_INUSE);
  5.4267 -      /* Mark remainder as inuse so free() won't complain */
  5.4268 -      set_inuse_bit_at_offset(remainder, remainder_size);
  5.4269 -      fREe(chunk2mem(remainder)); 
  5.4270 -    }
  5.4271 -
  5.4272 -    check_inuse_chunk(newp);
  5.4273 -    return chunk2mem(newp);
  5.4274 -  }
  5.4275 -
  5.4276 -  /*
  5.4277 -    Handle mmap cases
  5.4278 -  */
  5.4279 -
  5.4280 -  else {
  5.4281 -#if HAVE_MMAP
  5.4282 -
  5.4283 -#if HAVE_MREMAP
  5.4284 -    INTERNAL_SIZE_T offset = oldp->prev_size;
  5.4285 -    size_t pagemask = av->pagesize - 1;
  5.4286 -    char *cp;
  5.4287 -    CHUNK_SIZE_T  sum;
  5.4288 -    
  5.4289 -    /* Note the extra SIZE_SZ overhead */
  5.4290 -    newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
  5.4291 -
  5.4292 -    /* don't need to remap if still within same page */
  5.4293 -    if (oldsize == newsize - offset) 
  5.4294 -      return oldmem;
  5.4295 -
  5.4296 -    cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
  5.4297 -    
  5.4298 -    if (cp != (char*)MORECORE_FAILURE) {
  5.4299 -
  5.4300 -      newp = (mchunkptr)(cp + offset);
  5.4301 -      set_head(newp, (newsize - offset)|IS_MMAPPED);
  5.4302 -      
  5.4303 -      assert(aligned_OK(chunk2mem(newp)));
  5.4304 -      assert((newp->prev_size == offset));
  5.4305 -      
  5.4306 -      /* update statistics */
  5.4307 -      sum = av->mmapped_mem += newsize - oldsize;
  5.4308 -      if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem)) 
  5.4309 -        av->max_mmapped_mem = sum;
  5.4310 -      sum += av->sbrked_mem;
  5.4311 -      if (sum > (CHUNK_SIZE_T)(av->max_total_mem)) 
  5.4312 -        av->max_total_mem = sum;
  5.4313 -      
  5.4314 -      return chunk2mem(newp);
  5.4315 -    }
  5.4316 -#endif
  5.4317 -
  5.4318 -    /* Note the extra SIZE_SZ overhead. */
  5.4319 -    if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ)) 
  5.4320 -      newmem = oldmem; /* do nothing */
  5.4321 -    else {
  5.4322 -      /* Must alloc, copy, free. */
  5.4323 -      newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
  5.4324 -      if (newmem != 0) {
  5.4325 -        MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
  5.4326 -        fREe(oldmem);
  5.4327 -      }
  5.4328 -    }
  5.4329 -    return newmem;
  5.4330 -
  5.4331 -#else 
  5.4332 -    /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
  5.4333 -    check_malloc_state();
  5.4334 -    MALLOC_FAILURE_ACTION;
  5.4335 -    return 0;
  5.4336 -#endif
  5.4337 -  }
  5.4338 -}
  5.4339 -
  5.4340 -/*
  5.4341 -  ------------------------------ memalign ------------------------------
  5.4342 -*/
  5.4343 -
  5.4344 -#if __STD_C
  5.4345 -Void_t* mEMALIGn(size_t alignment, size_t bytes)
  5.4346 -#else
  5.4347 -Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
  5.4348 -#endif
  5.4349 -{
  5.4350 -  INTERNAL_SIZE_T nb;             /* padded  request size */
  5.4351 -  char*           m;              /* memory returned by malloc call */
  5.4352 -  mchunkptr       p;              /* corresponding chunk */
  5.4353 -  char*           brk;            /* alignment point within p */
  5.4354 -  mchunkptr       newp;           /* chunk to return */
  5.4355 -  INTERNAL_SIZE_T newsize;        /* its size */
  5.4356 -  INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
  5.4357 -  mchunkptr       remainder;      /* spare room at end to split off */
  5.4358 -  CHUNK_SIZE_T    remainder_size; /* its size */
  5.4359 -  INTERNAL_SIZE_T size;
  5.4360 -
  5.4361 -  /* If need less alignment than we give anyway, just relay to malloc */
  5.4362 -
  5.4363 -  if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
  5.4364 -
  5.4365 -  /* Otherwise, ensure that it is at least a minimum chunk size */
  5.4366 -
  5.4367 -  if (alignment <  MINSIZE) alignment = MINSIZE;
  5.4368 -
  5.4369 -  /* Make sure alignment is power of 2 (in case MINSIZE is not).  */
  5.4370 -  if ((alignment & (alignment - 1)) != 0) {
  5.4371 -    size_t a = MALLOC_ALIGNMENT * 2;
  5.4372 -    while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
  5.4373 -    alignment = a;
  5.4374 -  }
  5.4375 -
  5.4376 -  checked_request2size(bytes, nb);
  5.4377 -
  5.4378 -  /*
  5.4379 -    Strategy: find a spot within that chunk that meets the alignment
  5.4380 -    request, and then possibly free the leading and trailing space.
  5.4381 -  */
  5.4382 -
  5.4383 -
  5.4384 -  /* Call malloc with worst case padding to hit alignment. */
  5.4385 -
  5.4386 -  m  = (char*)(mALLOc(nb + alignment + MINSIZE));
  5.4387 -
  5.4388 -  if (m == 0) return 0; /* propagate failure */
  5.4389 -
  5.4390 -  p = mem2chunk(m);
  5.4391 -
  5.4392 -  if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
  5.4393 -
  5.4394 -    /*
  5.4395 -      Find an aligned spot inside chunk.  Since we need to give back
  5.4396 -      leading space in a chunk of at least MINSIZE, if the first
  5.4397 -      calculation places us at a spot with less than MINSIZE leader,
  5.4398 -      we can move to the next aligned spot -- we've allocated enough
  5.4399 -      total room so that this is always possible.
  5.4400 -    */
  5.4401 -
  5.4402 -    brk = (char*)mem2chunk((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
  5.4403 -                           -((signed long) alignment)));
  5.4404 -    if ((CHUNK_SIZE_T)(brk - (char*)(p)) < MINSIZE)
  5.4405 -      brk += alignment;
  5.4406 -
  5.4407 -    newp = (mchunkptr)brk;
  5.4408 -    leadsize = brk - (char*)(p);
  5.4409 -    newsize = chunksize(p) - leadsize;
  5.4410 -
  5.4411 -    /* For mmapped chunks, just adjust offset */
  5.4412 -    if (chunk_is_mmapped(p)) {
  5.4413 -      newp->prev_size = p->prev_size + leadsize;
  5.4414 -      set_head(newp, newsize|IS_MMAPPED);
  5.4415 -      return chunk2mem(newp);
  5.4416 -    }
  5.4417 -
  5.4418 -    /* Otherwise, give back leader, use the rest */
  5.4419 -    set_head(newp, newsize | PREV_INUSE);
  5.4420 -    set_inuse_bit_at_offset(newp, newsize);
  5.4421 -    set_head_size(p, leadsize);
  5.4422 -    fREe(chunk2mem(p));
  5.4423 -    p = newp;
  5.4424 -
  5.4425 -    assert (newsize >= nb &&
  5.4426 -            (((PTR_UINT)(chunk2mem(p))) % alignment) == 0);
  5.4427 -  }
  5.4428 -
  5.4429 -  /* Also give back spare room at the end */
  5.4430 -  if (!chunk_is_mmapped(p)) {
  5.4431 -    size = chunksize(p);
  5.4432 -    if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
  5.4433 -      remainder_size = size - nb;
  5.4434 -      remainder = chunk_at_offset(p, nb);
  5.4435 -      set_head(remainder, remainder_size | PREV_INUSE);
  5.4436 -      set_head_size(p, nb);
  5.4437 -      fREe(chunk2mem(remainder));
  5.4438 -    }
  5.4439 -  }
  5.4440 -
  5.4441 -  check_inuse_chunk(p);
  5.4442 -  return chunk2mem(p);
  5.4443 -}
  5.4444 -
  5.4445 -/*
  5.4446 -  ------------------------------ calloc ------------------------------
  5.4447 -*/
  5.4448 -
  5.4449 -#if __STD_C
  5.4450 -Void_t* cALLOc(size_t n_elements, size_t elem_size)
  5.4451 -#else
  5.4452 -Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
  5.4453 -#endif
  5.4454 -{
  5.4455 -  mchunkptr p;
  5.4456 -  CHUNK_SIZE_T  clearsize;
  5.4457 -  CHUNK_SIZE_T  nclears;
  5.4458 -  INTERNAL_SIZE_T* d;
  5.4459 -
  5.4460 -  Void_t* mem = mALLOc(n_elements * elem_size);
  5.4461 -
  5.4462 -  if (mem != 0) {
  5.4463 -    p = mem2chunk(mem);
  5.4464 -
  5.4465 -    if (!chunk_is_mmapped(p))
  5.4466 -    {  
  5.4467 -      /*
  5.4468 -        Unroll clear of <= 36 bytes (72 if 8byte sizes)
  5.4469 -        We know that contents have an odd number of
  5.4470 -        INTERNAL_SIZE_T-sized words; minimally 3.
  5.4471 -      */
  5.4472 -
  5.4473 -      d = (INTERNAL_SIZE_T*)mem;
  5.4474 -      clearsize = chunksize(p) - SIZE_SZ;
  5.4475 -      nclears = clearsize / sizeof(INTERNAL_SIZE_T);
  5.4476 -      assert(nclears >= 3);
  5.4477 -
  5.4478 -      if (nclears > 9)
  5.4479 -        MALLOC_ZERO(d, clearsize);
  5.4480 -
  5.4481 -      else {
  5.4482 -        *(d+0) = 0;
  5.4483 -        *(d+1) = 0;
  5.4484 -        *(d+2) = 0;
  5.4485 -        if (nclears > 4) {
  5.4486 -          *(d+3) = 0;
  5.4487 -          *(d+4) = 0;
  5.4488 -          if (nclears > 6) {
  5.4489 -            *(d+5) = 0;
  5.4490 -            *(d+6) = 0;
  5.4491 -            if (nclears > 8) {
  5.4492 -              *(d+7) = 0;
  5.4493 -              *(d+8) = 0;
  5.4494 -            }
  5.4495 -          }
  5.4496 -        }
  5.4497 -      }
  5.4498 -    }
  5.4499 -#if ! MMAP_CLEARS
  5.4500 -    else
  5.4501 -    {
  5.4502 -      d = (INTERNAL_SIZE_T*)mem;
  5.4503 -      /*
  5.4504 -        Note the additional SIZE_SZ
  5.4505 -      */
  5.4506 -      clearsize = chunksize(p) - 2*SIZE_SZ;
  5.4507 -      MALLOC_ZERO(d, clearsize);
  5.4508 -    }
  5.4509 -#endif
  5.4510 -  }
  5.4511 -  return mem;
  5.4512 -}
  5.4513 -
  5.4514 -/*
  5.4515 -  ------------------------------ cfree ------------------------------
  5.4516 -*/
  5.4517 -
  5.4518 -#if __STD_C
  5.4519 -void cFREe(Void_t *mem)
  5.4520 -#else
  5.4521 -void cFREe(mem) Void_t *mem;
  5.4522 -#endif
  5.4523 -{
  5.4524 -  fREe(mem);
  5.4525 -}
  5.4526 -
  5.4527 -/*
  5.4528 -  ------------------------- independent_calloc -------------------------
  5.4529 -*/
  5.4530 -
  5.4531 -#if __STD_C
  5.4532 -Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[])
  5.4533 -#else
  5.4534 -Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[];
  5.4535 -#endif
  5.4536 -{
  5.4537 -  size_t sz = elem_size; /* serves as 1-element array */
  5.4538 -  /* opts arg of 3 means all elements are same size, and should be cleared */
  5.4539 -  return iALLOc(n_elements, &sz, 3, chunks);
  5.4540 -}
  5.4541 -
  5.4542 -/*
  5.4543 -  ------------------------- independent_comalloc -------------------------
  5.4544 -*/
  5.4545 -
  5.4546 -#if __STD_C
  5.4547 -Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[])
  5.4548 -#else
  5.4549 -Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[];
  5.4550 -#endif
  5.4551 -{
  5.4552 -  return iALLOc(n_elements, sizes, 0, chunks);
  5.4553 -}
  5.4554 -
  5.4555 -
  5.4556 -/*
  5.4557 -  ------------------------------ ialloc ------------------------------
  5.4558 -  ialloc provides common support for independent_X routines, handling all of
  5.4559 -  the combinations that can result.
  5.4560 -
  5.4561 -  The opts arg has:
  5.4562 -    bit 0 set if all elements are same size (using sizes[0])
  5.4563 -    bit 1 set if elements should be zeroed
  5.4564 -*/
  5.4565 -
  5.4566 -
  5.4567 -#if __STD_C
  5.4568 -static Void_t** iALLOc(size_t n_elements, 
  5.4569 -                       size_t* sizes,  
  5.4570 -                       int opts,
  5.4571 -                       Void_t* chunks[])
  5.4572 -#else
  5.4573 -static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
  5.4574 -#endif
  5.4575 -{
  5.4576 -  mstate av = get_malloc_state();
  5.4577 -  INTERNAL_SIZE_T element_size;   /* chunksize of each element, if all same */
  5.4578 -  INTERNAL_SIZE_T contents_size;  /* total size of elements */
  5.4579 -  INTERNAL_SIZE_T array_size;     /* request size of pointer array */
  5.4580 -  Void_t*         mem;            /* malloced aggregate space */
  5.4581 -  mchunkptr       p;              /* corresponding chunk */
  5.4582 -  INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
  5.4583 -  Void_t**        marray;         /* either "chunks" or malloced ptr array */
  5.4584 -  mchunkptr       array_chunk;    /* chunk for malloced ptr array */
  5.4585 -  int             mmx;            /* to disable mmap */
  5.4586 -  INTERNAL_SIZE_T size;           
  5.4587 -  size_t          i;
  5.4588 -
  5.4589 -  /* Ensure initialization */
  5.4590 -  if (av->max_fast == 0) malloc_consolidate(av);
  5.4591 -
  5.4592 -  /* compute array length, if needed */
  5.4593 -  if (chunks != 0) {
  5.4594 -    if (n_elements == 0)
  5.4595 -      return chunks; /* nothing to do */
  5.4596 -    marray = chunks;
  5.4597 -    array_size = 0;
  5.4598 -  }
  5.4599 -  else {
  5.4600 -    /* if empty req, must still return chunk representing empty array */
  5.4601 -    if (n_elements == 0) 
  5.4602 -      return (Void_t**) mALLOc(0);
  5.4603 -    marray = 0;
  5.4604 -    array_size = request2size(n_elements * (sizeof(Void_t*)));
  5.4605 -  }
  5.4606 -
  5.4607 -  /* compute total element size */
  5.4608 -  if (opts & 0x1) { /* all-same-size */
  5.4609 -    element_size = request2size(*sizes);
  5.4610 -    contents_size = n_elements * element_size;
  5.4611 -  }
  5.4612 -  else { /* add up all the sizes */
  5.4613 -    element_size = 0;
  5.4614 -    contents_size = 0;
  5.4615 -    for (i = 0; i != n_elements; ++i) 
  5.4616 -      contents_size += request2size(sizes[i]);     
  5.4617 -  }
  5.4618 -
  5.4619 -  /* subtract out alignment bytes from total to minimize overallocation */
  5.4620 -  size = contents_size + array_size - MALLOC_ALIGN_MASK;
  5.4621 -  
  5.4622 -  /* 
  5.4623 -     Allocate the aggregate chunk.
  5.4624 -     But first disable mmap so malloc won't use it, since
  5.4625 -     we would not be able to later free/realloc space internal
  5.4626 -     to a segregated mmap region.
  5.4627 - */
  5.4628 -  mmx = av->n_mmaps_max;   /* disable mmap */
  5.4629 -  av->n_mmaps_max = 0;
  5.4630 -  mem = mALLOc(size);
  5.4631 -  av->n_mmaps_max = mmx;   /* reset mmap */
  5.4632 -  if (mem == 0) 
  5.4633 -    return 0;
  5.4634 -
  5.4635 -  p = mem2chunk(mem);
  5.4636 -  assert(!chunk_is_mmapped(p)); 
  5.4637 -  remainder_size = chunksize(p);
  5.4638 -
  5.4639 -  if (opts & 0x2) {       /* optionally clear the elements */
  5.4640 -    MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
  5.4641 -  }
  5.4642 -
  5.4643 -  /* If not provided, allocate the pointer array as final part of chunk */
  5.4644 -  if (marray == 0) {
  5.4645 -    array_chunk = chunk_at_offset(p, contents_size);
  5.4646 -    marray = (Void_t**) (chunk2mem(array_chunk));
  5.4647 -    set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE);
  5.4648 -    remainder_size = contents_size;
  5.4649 -  }
  5.4650 -
  5.4651 -  /* split out elements */
  5.4652 -  for (i = 0; ; ++i) {
  5.4653 -    marray[i] = chunk2mem(p);
  5.4654 -    if (i != n_elements-1) {
  5.4655 -      if (element_size != 0) 
  5.4656 -        size = element_size;
  5.4657 -      else
  5.4658 -        size = request2size(sizes[i]);          
  5.4659 -      remainder_size -= size;
  5.4660 -      set_head(p, size | PREV_INUSE);
  5.4661 -      p = chunk_at_offset(p, size);
  5.4662 -    }
  5.4663 -    else { /* the final element absorbs any overallocation slop */
  5.4664 -      set_head(p, remainder_size | PREV_INUSE);
  5.4665 -      break;
  5.4666 -    }
  5.4667 -  }
  5.4668 -
  5.4669 -#if DEBUG
  5.4670 -  if (marray != chunks) {
  5.4671 -    /* final element must have exactly exhausted chunk */
  5.4672 -    if (element_size != 0) 
  5.4673 -      assert(remainder_size == element_size);
  5.4674 -    else
  5.4675 -      assert(remainder_size == request2size(sizes[i]));
  5.4676 -    check_inuse_chunk(mem2chunk(marray));
  5.4677 -  }
  5.4678 -
  5.4679 -  for (i = 0; i != n_elements; ++i)
  5.4680 -    check_inuse_chunk(mem2chunk(marray[i]));
  5.4681 -#endif
  5.4682 -
  5.4683 -  return marray;
  5.4684 -}
  5.4685 -
  5.4686 -
  5.4687 -/*
  5.4688 -  ------------------------------ valloc ------------------------------
  5.4689 -*/
  5.4690 -
  5.4691 -#if __STD_C
  5.4692 -Void_t* vALLOc(size_t bytes)
  5.4693 -#else
  5.4694 -Void_t* vALLOc(bytes) size_t bytes;
  5.4695 -#endif
  5.4696 -{
  5.4697 -  /* Ensure initialization */
  5.4698 -  mstate av = get_malloc_state();
  5.4699 -  if (av->max_fast == 0) malloc_consolidate(av);
  5.4700 -  return mEMALIGn(av->pagesize, bytes);
  5.4701 -}
  5.4702 -
  5.4703 -/*
  5.4704 -  ------------------------------ pvalloc ------------------------------
  5.4705 -*/
  5.4706 -
  5.4707 -
  5.4708 -#if __STD_C
  5.4709 -Void_t* pVALLOc(size_t bytes)
  5.4710 -#else
  5.4711 -Void_t* pVALLOc(bytes) size_t bytes;
  5.4712 -#endif
  5.4713 -{
  5.4714 -  mstate av = get_malloc_state();
  5.4715 -  size_t pagesz;
  5.4716 -
  5.4717 -  /* Ensure initialization */
  5.4718 -  if (av->max_fast == 0) malloc_consolidate(av);
  5.4719 -  pagesz = av->pagesize;
  5.4720 -  return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
  5.4721 -}
  5.4722 -   
  5.4723 -
  5.4724 -/*
  5.4725 -  ------------------------------ malloc_trim ------------------------------
  5.4726 -*/
  5.4727 -
  5.4728 -#if __STD_C
  5.4729 -int mTRIm(size_t pad)
  5.4730 -#else
  5.4731 -int mTRIm(pad) size_t pad;
  5.4732 -#endif
  5.4733 -{
  5.4734 -  mstate av = get_malloc_state();
  5.4735 -  /* Ensure initialization/consolidation */
  5.4736 -  malloc_consolidate(av);
  5.4737 -
  5.4738 -#ifndef MORECORE_CANNOT_TRIM        
  5.4739 -  return sYSTRIm(pad, av);
  5.4740 -#else
  5.4741 -  return 0;
  5.4742 -#endif
  5.4743 -}
  5.4744 -
  5.4745 -
  5.4746 -/*
  5.4747 -  ------------------------- malloc_usable_size -------------------------
  5.4748 -*/
  5.4749 -
  5.4750 -#if __STD_C
  5.4751 -size_t mUSABLe(Void_t* mem)
  5.4752 -#else
  5.4753 -size_t mUSABLe(mem) Void_t* mem;
  5.4754 -#endif
  5.4755 -{
  5.4756 -  mchunkptr p;
  5.4757 -  if (mem != 0) {
  5.4758 -    p = mem2chunk(mem);
  5.4759 -    if (chunk_is_mmapped(p))
  5.4760 -      return chunksize(p) - 2*SIZE_SZ;
  5.4761 -    else if (inuse(p))
  5.4762 -      return chunksize(p) - SIZE_SZ;
  5.4763 -  }
  5.4764 -  return 0;
  5.4765 -}
  5.4766 -
  5.4767 -/*
  5.4768 -  ------------------------------ mallinfo ------------------------------
  5.4769 -*/
  5.4770 -
  5.4771 -struct mallinfo mALLINFo()
  5.4772 -{
  5.4773 -  mstate av = get_malloc_state();
  5.4774 -  struct mallinfo mi;
  5.4775 -  int i;
  5.4776 -  mbinptr b;
  5.4777 -  mchunkptr p;
  5.4778 -  INTERNAL_SIZE_T avail;
  5.4779 -  INTERNAL_SIZE_T fastavail;
  5.4780 -  int nblocks;
  5.4781 -  int nfastblocks;
  5.4782 -
  5.4783 -  /* Ensure initialization */
  5.4784 -  if (av->top == 0)  malloc_consolidate(av);
  5.4785 -
  5.4786 -  check_malloc_state();
  5.4787 -
  5.4788 -  /* Account for top */
  5.4789 -  avail = chunksize(av->top);
  5.4790 -  nblocks = 1;  /* top always exists */
  5.4791 -
  5.4792 -  /* traverse fastbins */
  5.4793 -  nfastblocks = 0;
  5.4794 -  fastavail = 0;
  5.4795 -
  5.4796 -  for (i = 0; i < NFASTBINS; ++i) {
  5.4797 -    for (p = av->fastbins[i]; p != 0; p = p->fd) {
  5.4798 -      ++nfastblocks;
  5.4799 -      fastavail += chunksize(p);
  5.4800 -    }
  5.4801 -  }
  5.4802 -
  5.4803 -  avail += fastavail;
  5.4804 -
  5.4805 -  /* traverse regular bins */
  5.4806 -  for (i = 1; i < NBINS; ++i) {
  5.4807 -    b = bin_at(av, i);
  5.4808 -    for (p = last(b); p != b; p = p->bk) {
  5.4809 -      ++nblocks;
  5.4810 -      avail += chunksize(p);
  5.4811 -    }
  5.4812 -  }
  5.4813 -
  5.4814 -  mi.smblks = nfastblocks;
  5.4815 -  mi.ordblks = nblocks;
  5.4816 -  mi.fordblks = avail;
  5.4817 -  mi.uordblks = av->sbrked_mem - avail;
  5.4818 -  mi.arena = av->sbrked_mem;
  5.4819 -  mi.hblks = av->n_mmaps;
  5.4820 -  mi.hblkhd = av->mmapped_mem;
  5.4821 -  mi.fsmblks = fastavail;
  5.4822 -  mi.keepcost = chunksize(av->top);
  5.4823 -  mi.usmblks = av->max_total_mem;
  5.4824 -  return mi;
  5.4825 -}
  5.4826 -
  5.4827 -/*
  5.4828 -  ------------------------------ malloc_stats ------------------------------
  5.4829 -*/
  5.4830 -
  5.4831 -void mSTATs()
  5.4832 -{
  5.4833 -  struct mallinfo mi = mALLINFo();
  5.4834 -
  5.4835 -#ifdef WIN32
  5.4836 -  {
  5.4837 -    CHUNK_SIZE_T  free, reserved, committed;
  5.4838 -    vminfo (&free, &reserved, &committed);
  5.4839 -    fprintf(stderr, "free bytes       = %10lu\n", 
  5.4840 -            free);
  5.4841 -    fprintf(stderr, "reserved bytes   = %10lu\n", 
  5.4842 -            reserved);
  5.4843 -    fprintf(stderr, "committed bytes  = %10lu\n", 
  5.4844 -            committed);
  5.4845 -  }
  5.4846 -#endif
  5.4847 -
  5.4848 -/* RN XXX  */
  5.4849 -  printf("max system bytes = %10lu\n",
  5.4850 -          (CHUNK_SIZE_T)(mi.usmblks));
  5.4851 -  printf("system bytes     = %10lu\n",
  5.4852 -          (CHUNK_SIZE_T)(mi.arena + mi.hblkhd));
  5.4853 -  printf("in use bytes     = %10lu\n",
  5.4854 -          (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
  5.4855 -
  5.4856 -#ifdef WIN32 
  5.4857 -  {
  5.4858 -    CHUNK_SIZE_T  kernel, user;
  5.4859 -    if (cpuinfo (TRUE, &kernel, &user)) {
  5.4860 -      fprintf(stderr, "kernel ms        = %10lu\n", 
  5.4861 -              kernel);
  5.4862 -      fprintf(stderr, "user ms          = %10lu\n", 
  5.4863 -              user);
  5.4864 -    }
  5.4865 -  }
  5.4866 -#endif
  5.4867 -}
  5.4868 -
  5.4869 -
  5.4870 -/*
  5.4871 -  ------------------------------ mallopt ------------------------------
  5.4872 -*/
  5.4873 -
  5.4874 -#if __STD_C
  5.4875 -int mALLOPt(int param_number, int value)
  5.4876 -#else
  5.4877 -int mALLOPt(param_number, value) int param_number; int value;
  5.4878 -#endif
  5.4879 -{
  5.4880 -  mstate av = get_malloc_state();
  5.4881 -  /* Ensure initialization/consolidation */
  5.4882 -  malloc_consolidate(av);
  5.4883 -
  5.4884 -  switch(param_number) {
  5.4885 -  case M_MXFAST:
  5.4886 -    if (value >= 0 && value <= MAX_FAST_SIZE) {
  5.4887 -      set_max_fast(av, value);
  5.4888 -      return 1;
  5.4889 -    }
  5.4890 -    else
  5.4891 -      return 0;
  5.4892 -
  5.4893 -  case M_TRIM_THRESHOLD:
  5.4894 -    av->trim_threshold = value;
  5.4895 -    return 1;
  5.4896 -
  5.4897 -  case M_TOP_PAD:
  5.4898 -    av->top_pad = value;
  5.4899 -    return 1;
  5.4900 -
  5.4901 -  case M_MMAP_THRESHOLD:
  5.4902 -    av->mmap_threshold = value;
  5.4903 -    return 1;
  5.4904 -
  5.4905 -  case M_MMAP_MAX:
  5.4906 -#if !HAVE_MMAP
  5.4907 -    if (value != 0)
  5.4908 -      return 0;
  5.4909 -#endif
  5.4910 -    av->n_mmaps_max = value;
  5.4911 -    return 1;
  5.4912 -
  5.4913 -  default:
  5.4914 -    return 0;
  5.4915 -  }
  5.4916 -}
  5.4917 -
  5.4918 -
  5.4919 -/* 
  5.4920 -  -------------------- Alternative MORECORE functions --------------------
  5.4921 -*/
  5.4922 -
  5.4923 -
  5.4924 -/*
  5.4925 -  General Requirements for MORECORE.
  5.4926 -
  5.4927 -  The MORECORE function must have the following properties:
  5.4928 -
  5.4929 -  If MORECORE_CONTIGUOUS is false:
  5.4930 -
  5.4931 -    * MORECORE must allocate in multiples of pagesize. It will
  5.4932 -      only be called with arguments that are multiples of pagesize.
  5.4933 -
  5.4934 -    * MORECORE(0) must return an address that is at least 
  5.4935 -      MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
  5.4936 -
  5.4937 -  else (i.e. If MORECORE_CONTIGUOUS is true):
  5.4938 -
  5.4939 -    * Consecutive calls to MORECORE with positive arguments
  5.4940 -      return increasing addresses, indicating that space has been
  5.4941 -      contiguously extended. 
  5.4942 -
  5.4943 -    * MORECORE need not allocate in multiples of pagesize.
  5.4944 -      Calls to MORECORE need not have args of multiples of pagesize.
  5.4945 -
  5.4946 -    * MORECORE need not page-align.
  5.4947 -
  5.4948 -  In either case:
  5.4949 -
  5.4950 -    * MORECORE may allocate more memory than requested. (Or even less,
  5.4951 -      but this will generally result in a malloc failure.)
  5.4952 -
  5.4953 -    * MORECORE must not allocate memory when given argument zero, but
  5.4954 -      instead return one past the end address of memory from previous
  5.4955 -      nonzero call. This malloc does NOT call MORECORE(0)
  5.4956 -      until at least one call with positive arguments is made, so
  5.4957 -      the initial value returned is not important.
  5.4958 -
  5.4959 -    * Even though consecutive calls to MORECORE need not return contiguous
  5.4960 -      addresses, it must be OK for malloc'ed chunks to span multiple
  5.4961 -      regions in those cases where they do happen to be contiguous.
  5.4962 -
  5.4963 -    * MORECORE need not handle negative arguments -- it may instead
  5.4964 -      just return MORECORE_FAILURE when given negative arguments.
  5.4965 -      Negative arguments are always multiples of pagesize. MORECORE
  5.4966 -      must not misinterpret negative args as large positive unsigned
  5.4967 -      args. You can suppress all such calls from even occurring by defining
  5.4968 -      MORECORE_CANNOT_TRIM,
  5.4969 -
  5.4970 -  There is some variation across systems about the type of the
  5.4971 -  argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
  5.4972 -  actually be size_t, because sbrk supports negative args, so it is
  5.4973 -  normally the signed type of the same width as size_t (sometimes
  5.4974 -  declared as "intptr_t", and sometimes "ptrdiff_t").  It doesn't much
  5.4975 -  matter though. Internally, we use "long" as arguments, which should
  5.4976 -  work across all reasonable possibilities.
  5.4977 -
  5.4978 -  Additionally, if MORECORE ever returns failure for a positive
  5.4979 -  request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
  5.4980 -  system allocator. This is a useful backup strategy for systems with
  5.4981 -  holes in address spaces -- in this case sbrk cannot contiguously
  5.4982 -  expand the heap, but mmap may be able to map noncontiguous space.
  5.4983 -
  5.4984 -  If you'd like mmap to ALWAYS be used, you can define MORECORE to be
  5.4985 -  a function that always returns MORECORE_FAILURE.
  5.4986 -
  5.4987 -  Malloc only has limited ability to detect failures of MORECORE
  5.4988 -  to supply contiguous space when it says it can. In particular,
  5.4989 -  multithreaded programs that do not use locks may result in
  5.4990 -  rece conditions across calls to MORECORE that result in gaps
  5.4991 -  that cannot be detected as such, and subsequent corruption.
  5.4992 -
  5.4993 -  If you are using this malloc with something other than sbrk (or its
  5.4994 -  emulation) to supply memory regions, you probably want to set
  5.4995 -  MORECORE_CONTIGUOUS as false.  As an example, here is a custom
  5.4996 -  allocator kindly contributed for pre-OSX macOS.  It uses virtually
  5.4997 -  but not necessarily physically contiguous non-paged memory (locked
  5.4998 -  in, present and won't get swapped out).  You can use it by
  5.4999 -  uncommenting this section, adding some #includes, and setting up the
  5.5000 -  appropriate defines above:
  5.5001 -
  5.5002 -      #define MORECORE osMoreCore
  5.5003 -      #define MORECORE_CONTIGUOUS 0
  5.5004 -
  5.5005 -  There is also a shutdown routine that should somehow be called for
  5.5006 -  cleanup upon program exit.
  5.5007 -
  5.5008 -  #define MAX_POOL_ENTRIES 100
  5.5009 -  #define MINIMUM_MORECORE_SIZE  (64 * 1024)
  5.5010 -  static int next_os_pool;
  5.5011 -  void *our_os_pools[MAX_POOL_ENTRIES];
  5.5012 -
  5.5013 -  void *osMoreCore(int size)
  5.5014 -  {
  5.5015 -    void *ptr = 0;
  5.5016 -    static void *sbrk_top = 0;
  5.5017 -
  5.5018 -    if (size > 0)
  5.5019 -    {
  5.5020 -      if (size < MINIMUM_MORECORE_SIZE)
  5.5021 -         size = MINIMUM_MORECORE_SIZE;
  5.5022 -      if (CurrentExecutionLevel() == kTaskLevel)
  5.5023 -         ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
  5.5024 -      if (ptr == 0)
  5.5025 -      {
  5.5026 -        return (void *) MORECORE_FAILURE;
  5.5027 -      }
  5.5028 -      // save ptrs so they can be freed during cleanup
  5.5029 -      our_os_pools[next_os_pool] = ptr;
  5.5030 -      next_os_pool++;
  5.5031 -      ptr = (void *) ((((CHUNK_SIZE_T) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
  5.5032 -      sbrk_top = (char *) ptr + size;
  5.5033 -      return ptr;
  5.5034 -    }
  5.5035 -    else if (size < 0)
  5.5036 -    {
  5.5037 -      // we don't currently support shrink behavior
  5.5038 -      return (void *) MORECORE_FAILURE;
  5.5039 -    }
  5.5040 -    else
  5.5041 -    {
  5.5042 -      return sbrk_top;
  5.5043 -    }
  5.5044 -  }
  5.5045 -
  5.5046 -  // cleanup any allocated memory pools
  5.5047 -  // called as last thing before shutting down driver
  5.5048 -
  5.5049 -  void osCleanupMem(void)
  5.5050 -  {
  5.5051 -    void **ptr;
  5.5052 -
  5.5053 -    for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
  5.5054 -      if (*ptr)
  5.5055 -      {
  5.5056 -         PoolDeallocate(*ptr);
  5.5057 -         *ptr = 0;
  5.5058 -      }
  5.5059 -  }
  5.5060 -
  5.5061 -*/
  5.5062 -
  5.5063 -
  5.5064 -/* 
  5.5065 -  -------------------------------------------------------------- 
  5.5066 -
  5.5067 -  Emulation of sbrk for win32. 
  5.5068 -  Donated by J. Walter <Walter@GeNeSys-e.de>.
  5.5069 -  For additional information about this code, and malloc on Win32, see 
  5.5070 -     http://www.genesys-e.de/jwalter/
  5.5071 -*/
  5.5072 -
  5.5073 -
  5.5074 -#ifdef WIN32
  5.5075 -
  5.5076 -#ifdef _DEBUG
  5.5077 -/* #define TRACE */
  5.5078 -#endif
  5.5079 -
  5.5080 -/* Support for USE_MALLOC_LOCK */
  5.5081 -#ifdef USE_MALLOC_LOCK
  5.5082 -
  5.5083 -/* Wait for spin lock */
  5.5084 -static int slwait (int *sl) {
  5.5085 -    while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0) 
  5.5086 -	    Sleep (0);
  5.5087 -    return 0;
  5.5088 -}
  5.5089 -
  5.5090 -/* Release spin lock */
  5.5091 -static int slrelease (int *sl) {
  5.5092 -    InterlockedExchange (sl, 0);
  5.5093 -    return 0;
  5.5094 -}
  5.5095 -
  5.5096 -#ifdef NEEDED
  5.5097 -/* Spin lock for emulation code */
  5.5098 -static int g_sl;
  5.5099 -#endif
  5.5100 -
  5.5101 -#endif /* USE_MALLOC_LOCK */
  5.5102 -
  5.5103 -/* getpagesize for windows */
  5.5104 -static long getpagesize (void) {
  5.5105 -    static long g_pagesize = 0;
  5.5106 -    if (! g_pagesize) {
  5.5107 -        SYSTEM_INFO system_info;
  5.5108 -        GetSystemInfo (&system_info);
  5.5109 -        g_pagesize = system_info.dwPageSize;
  5.5110 -    }
  5.5111 -    return g_pagesize;
  5.5112 -}
  5.5113 -static long getregionsize (void) {
  5.5114 -    static long g_regionsize = 0;
  5.5115 -    if (! g_regionsize) {
  5.5116 -        SYSTEM_INFO system_info;
  5.5117 -        GetSystemInfo (&system_info);
  5.5118 -        g_regionsize = system_info.dwAllocationGranularity;
  5.5119 -    }
  5.5120 -    return g_regionsize;
  5.5121 -}
  5.5122 -
  5.5123 -/* A region list entry */
  5.5124 -typedef struct _region_list_entry {
  5.5125 -    void *top_allocated;
  5.5126 -    void *top_committed;
  5.5127 -    void *top_reserved;
  5.5128 -    long reserve_size;
  5.5129 -    struct _region_list_entry *previous;
  5.5130 -} region_list_entry;
  5.5131 -
  5.5132 -/* Allocate and link a region entry in the region list */
  5.5133 -static int region_list_append (region_list_entry **last, void *base_reserved, long reserve_size) {
  5.5134 -    region_list_entry *next = HeapAlloc (GetProcessHeap (), 0, sizeof (region_list_entry));
  5.5135 -    if (! next)
  5.5136 -        return FALSE;
  5.5137 -    next->top_allocated = (char *) base_reserved;
  5.5138 -    next->top_committed = (char *) base_reserved;
  5.5139 -    next->top_reserved = (char *) base_reserved + reserve_size;
  5.5140 -    next->reserve_size = reserve_size;
  5.5141 -    next->previous = *last;
  5.5142 -    *last = next;
  5.5143 -    return TRUE;
  5.5144 -}
  5.5145 -/* Free and unlink the last region entry from the region list */
  5.5146 -static int region_list_remove (region_list_entry **last) {
  5.5147 -    region_list_entry *previous = (*last)->previous;
  5.5148 -    if (! HeapFree (GetProcessHeap (), sizeof (region_list_entry), *last))
  5.5149 -        return FALSE;
  5.5150 -    *last = previous;
  5.5151 -    return TRUE;
  5.5152 -}
  5.5153 -
  5.5154 -#define CEIL(size,to)	(((size)+(to)-1)&~((to)-1))
  5.5155 -#define FLOOR(size,to)	((size)&~((to)-1))
  5.5156 -
  5.5157 -#define SBRK_SCALE  0
  5.5158 -/* #define SBRK_SCALE  1 */
  5.5159 -/* #define SBRK_SCALE  2 */
  5.5160 -/* #define SBRK_SCALE  4  */
  5.5161 -
  5.5162 -/* sbrk for windows */
  5.5163 -static void *sbrk (long size) {
  5.5164 -    static long g_pagesize, g_my_pagesize;
  5.5165 -    static long g_regionsize, g_my_regionsize;
  5.5166 -    static region_list_entry *g_last;
  5.5167 -    void *result = (void *) MORECORE_FAILURE;
  5.5168 -#ifdef TRACE
  5.5169 -    printf ("sbrk %d\n", size);
  5.5170 -#endif
  5.5171 -#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5.5172 -    /* Wait for spin lock */
  5.5173 -    slwait (&g_sl);
  5.5174 -#endif
  5.5175 -    /* First time initialization */
  5.5176 -    if (! g_pagesize) {
  5.5177 -        g_pagesize = getpagesize ();
  5.5178 -        g_my_pagesize = g_pagesize << SBRK_SCALE;
  5.5179 -    }
  5.5180 -    if (! g_regionsize) {
  5.5181 -        g_regionsize = getregionsize ();
  5.5182 -        g_my_regionsize = g_regionsize << SBRK_SCALE;
  5.5183 -    }
  5.5184 -    if (! g_last) {
  5.5185 -        if (! region_list_append (&g_last, 0, 0)) 
  5.5186 -           goto sbrk_exit;
  5.5187 -    }
  5.5188 -    /* Assert invariants */
  5.5189 -    assert (g_last);
  5.5190 -    assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
  5.5191 -            g_last->top_allocated <= g_last->top_committed);
  5.5192 -    assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
  5.5193 -            g_last->top_committed <= g_last->top_reserved &&
  5.5194 -            (unsigned) g_last->top_committed % g_pagesize == 0);
  5.5195 -    assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
  5.5196 -    assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
  5.5197 -    /* Allocation requested? */
  5.5198 -    if (size >= 0) {
  5.5199 -        /* Allocation size is the requested size */
  5.5200 -        long allocate_size = size;
  5.5201 -        /* Compute the size to commit */
  5.5202 -        long to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
  5.5203 -        /* Do we reach the commit limit? */
  5.5204 -        if (to_commit > 0) {
  5.5205 -            /* Round size to commit */
  5.5206 -            long commit_size = CEIL (to_commit, g_my_pagesize);
  5.5207 -            /* Compute the size to reserve */
  5.5208 -            long to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved;
  5.5209 -            /* Do we reach the reserve limit? */
  5.5210 -            if (to_reserve > 0) {
  5.5211 -                /* Compute the remaining size to commit in the current region */
  5.5212 -                long remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed;
  5.5213 -                if (remaining_commit_size > 0) {
  5.5214 -                    /* Assert preconditions */
  5.5215 -                    assert ((unsigned) g_last->top_committed % g_pagesize == 0);
  5.5216 -                    assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); {
  5.5217 -                        /* Commit this */
  5.5218 -                        void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size,
  5.5219 -							                                 MEM_COMMIT, PAGE_READWRITE);
  5.5220 -                        /* Check returned pointer for consistency */
  5.5221 -                        if (base_committed != g_last->top_committed)
  5.5222 -                            goto sbrk_exit;
  5.5223 -                        /* Assert postconditions */
  5.5224 -                        assert ((unsigned) base_committed % g_pagesize == 0);
  5.5225 -#ifdef TRACE
  5.5226 -                        printf ("Commit %p %d\n", base_committed, remaining_commit_size);
  5.5227 -#endif
  5.5228 -                        /* Adjust the regions commit top */
  5.5229 -                        g_last->top_committed = (char *) base_committed + remaining_commit_size;
  5.5230 -                    }
  5.5231 -                } {
  5.5232 -                    /* Now we are going to search and reserve. */
  5.5233 -                    int contiguous = -1;
  5.5234 -                    int found = FALSE;
  5.5235 -                    MEMORY_BASIC_INFORMATION memory_info;
  5.5236 -                    void *base_reserved;
  5.5237 -                    long reserve_size;
  5.5238 -                    do {
  5.5239 -                        /* Assume contiguous memory */
  5.5240 -                        contiguous = TRUE;
  5.5241 -                        /* Round size to reserve */
  5.5242 -                        reserve_size = CEIL (to_reserve, g_my_regionsize);
  5.5243 -                        /* Start with the current region's top */
  5.5244 -                        memory_info.BaseAddress = g_last->top_reserved;
  5.5245 -                        /* Assert preconditions */
  5.5246 -                        assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
  5.5247 -                        assert (0 < reserve_size && reserve_size % g_regionsize == 0);
  5.5248 -                        while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
  5.5249 -                            /* Assert postconditions */
  5.5250 -                            assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
  5.5251 -#ifdef TRACE
  5.5252 -                            printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize, 
  5.5253 -                                    memory_info.State == MEM_FREE ? "FREE": 
  5.5254 -                                    (memory_info.State == MEM_RESERVE ? "RESERVED":
  5.5255 -                                     (memory_info.State == MEM_COMMIT ? "COMMITTED": "?")));
  5.5256 -#endif
  5.5257 -                            /* Region is free, well aligned and big enough: we are done */
  5.5258 -                            if (memory_info.State == MEM_FREE &&
  5.5259 -                                (unsigned) memory_info.BaseAddress % g_regionsize == 0 &&
  5.5260 -                                memory_info.RegionSize >= (unsigned) reserve_size) {
  5.5261 -                                found = TRUE;
  5.5262 -                                break;
  5.5263 -                            }
  5.5264 -                            /* From now on we can't get contiguous memory! */
  5.5265 -                            contiguous = FALSE;
  5.5266 -                            /* Recompute size to reserve */
  5.5267 -                            reserve_size = CEIL (allocate_size, g_my_regionsize);
  5.5268 -                            memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
  5.5269 -                            /* Assert preconditions */
  5.5270 -                            assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
  5.5271 -                            assert (0 < reserve_size && reserve_size % g_regionsize == 0);
  5.5272 -                        }
  5.5273 -                        /* Search failed? */
  5.5274 -                        if (! found) 
  5.5275 -                            goto sbrk_exit;
  5.5276 -                        /* Assert preconditions */
  5.5277 -                        assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0);
  5.5278 -                        assert (0 < reserve_size && reserve_size % g_regionsize == 0);
  5.5279 -                        /* Try to reserve this */
  5.5280 -                        base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size, 
  5.5281 -					                                  MEM_RESERVE, PAGE_NOACCESS);
  5.5282 -                        if (! base_reserved) {
  5.5283 -                            int rc = GetLastError ();
  5.5284 -                            if (rc != ERROR_INVALID_ADDRESS) 
  5.5285 -                                goto sbrk_exit;
  5.5286 -                        }
  5.5287 -                        /* A null pointer signals (hopefully) a race condition with another thread. */
  5.5288 -                        /* In this case, we try again. */
  5.5289 -                    } while (! base_reserved);
  5.5290 -                    /* Check returned pointer for consistency */
  5.5291 -                    if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress)
  5.5292 -                        goto sbrk_exit;
  5.5293 -                    /* Assert postconditions */
  5.5294 -                    assert ((unsigned) base_reserved % g_regionsize == 0);
  5.5295 -#ifdef TRACE
  5.5296 -                    printf ("Reserve %p %d\n", base_reserved, reserve_size);
  5.5297 -#endif
  5.5298 -                    /* Did we get contiguous memory? */
  5.5299 -                    if (contiguous) {
  5.5300 -                        long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated;
  5.5301 -                        /* Adjust allocation size */
  5.5302 -                        allocate_size -= start_size;
  5.5303 -                        /* Adjust the regions allocation top */
  5.5304 -                        g_last->top_allocated = g_last->top_committed;
  5.5305 -                        /* Recompute the size to commit */
  5.5306 -                        to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
  5.5307 -                        /* Round size to commit */
  5.5308 -                        commit_size = CEIL (to_commit, g_my_pagesize);
  5.5309 -                    } 
  5.5310 -                    /* Append the new region to the list */
  5.5311 -                    if (! region_list_append (&g_last, base_reserved, reserve_size))
  5.5312 -                        goto sbrk_exit;
  5.5313 -                    /* Didn't we get contiguous memory? */
  5.5314 -                    if (! contiguous) {
  5.5315 -                        /* Recompute the size to commit */
  5.5316 -                        to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
  5.5317 -                        /* Round size to commit */
  5.5318 -                        commit_size = CEIL (to_commit, g_my_pagesize);
  5.5319 -                    }
  5.5320 -                }
  5.5321 -            } 
  5.5322 -            /* Assert preconditions */
  5.5323 -            assert ((unsigned) g_last->top_committed % g_pagesize == 0);
  5.5324 -            assert (0 < commit_size && commit_size % g_pagesize == 0); {
  5.5325 -                /* Commit this */
  5.5326 -                void *base_committed = VirtualAlloc (g_last->top_committed, commit_size, 
  5.5327 -				    			                     MEM_COMMIT, PAGE_READWRITE);
  5.5328 -                /* Check returned pointer for consistency */
  5.5329 -                if (base_committed != g_last->top_committed)
  5.5330 -                    goto sbrk_exit;
  5.5331 -                /* Assert postconditions */
  5.5332 -                assert ((unsigned) base_committed % g_pagesize == 0);
  5.5333 -#ifdef TRACE
  5.5334 -                printf ("Commit %p %d\n", base_committed, commit_size);
  5.5335 -#endif
  5.5336 -                /* Adjust the regions commit top */
  5.5337 -                g_last->top_committed = (char *) base_committed + commit_size;
  5.5338 -            }
  5.5339 -        } 
  5.5340 -        /* Adjust the regions allocation top */
  5.5341 -        g_last->top_allocated = (char *) g_last->top_allocated + allocate_size;
  5.5342 -        result = (char *) g_last->top_allocated - size;
  5.5343 -    /* Deallocation requested? */
  5.5344 -    } else if (size < 0) {
  5.5345 -        long deallocate_size = - size;
  5.5346 -        /* As long as we have a region to release */
  5.5347 -        while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) {
  5.5348 -            /* Get the size to release */
  5.5349 -            long release_size = g_last->reserve_size;
  5.5350 -            /* Get the base address */
  5.5351 -            void *base_reserved = (char *) g_last->top_reserved - release_size;
  5.5352 -            /* Assert preconditions */
  5.5353 -            assert ((unsigned) base_reserved % g_regionsize == 0); 
  5.5354 -            assert (0 < release_size && release_size % g_regionsize == 0); {
  5.5355 -                /* Release this */
  5.5356 -                int rc = VirtualFree (base_reserved, 0, 
  5.5357 -                                      MEM_RELEASE);
  5.5358 -                /* Check returned code for consistency */
  5.5359 -                if (! rc)
  5.5360 -                    goto sbrk_exit;
  5.5361 -#ifdef TRACE
  5.5362 -                printf ("Release %p %d\n", base_reserved, release_size);
  5.5363 -#endif
  5.5364 -            }
  5.5365 -            /* Adjust deallocation size */
  5.5366 -            deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved;
  5.5367 -            /* Remove the old region from the list */
  5.5368 -            if (! region_list_remove (&g_last))
  5.5369 -                goto sbrk_exit;
  5.5370 -        } {
  5.5371 -            /* Compute the size to decommit */
  5.5372 -            long to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size);
  5.5373 -            if (to_decommit >= g_my_pagesize) {
  5.5374 -                /* Compute the size to decommit */
  5.5375 -                long decommit_size = FLOOR (to_decommit, g_my_pagesize);
  5.5376 -                /*  Compute the base address */
  5.5377 -                void *base_committed = (char *) g_last->top_committed - decommit_size;
  5.5378 -                /* Assert preconditions */
  5.5379 -                assert ((unsigned) base_committed % g_pagesize == 0);
  5.5380 -                assert (0 < decommit_size && decommit_size % g_pagesize == 0); {
  5.5381 -                    /* Decommit this */
  5.5382 -                    int rc = VirtualFree ((char *) base_committed, decommit_size, 
  5.5383 -                                          MEM_DECOMMIT);
  5.5384 -                    /* Check returned code for consistency */
  5.5385 -                    if (! rc)
  5.5386 -                        goto sbrk_exit;
  5.5387 -#ifdef TRACE
  5.5388 -                    printf ("Decommit %p %d\n", base_committed, decommit_size);
  5.5389 -#endif
  5.5390 -                }
  5.5391 -                /* Adjust deallocation size and regions commit and allocate top */
  5.5392 -                deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed;
  5.5393 -                g_last->top_committed = base_committed;
  5.5394 -                g_last->top_allocated = base_committed;
  5.5395 -            }
  5.5396 -        }
  5.5397 -        /* Adjust regions allocate top */
  5.5398 -        g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size;
  5.5399 -        /* Check for underflow */
  5.5400 -        if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated ||
  5.5401 -            g_last->top_allocated > g_last->top_committed) {
  5.5402 -            /* Adjust regions allocate top */
  5.5403 -            g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size;
  5.5404 -            goto sbrk_exit;
  5.5405 -        }
  5.5406 -        result = g_last->top_allocated;
  5.5407 -    }
  5.5408 -    /* Assert invariants */
  5.5409 -    assert (g_last);
  5.5410 -    assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
  5.5411 -            g_last->top_allocated <= g_last->top_committed);
  5.5412 -    assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
  5.5413 -            g_last->top_committed <= g_last->top_reserved &&
  5.5414 -            (unsigned) g_last->top_committed % g_pagesize == 0);
  5.5415 -    assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
  5.5416 -    assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
  5.5417 -
  5.5418 -sbrk_exit:
  5.5419 -#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5.5420 -    /* Release spin lock */
  5.5421 -    slrelease (&g_sl);
  5.5422 -#endif
  5.5423 -    return result;
  5.5424 -}
  5.5425 -
  5.5426 -/* mmap for windows */
  5.5427 -static void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) {
  5.5428 -    static long g_pagesize;
  5.5429 -    static long g_regionsize;
  5.5430 -#ifdef TRACE
  5.5431 -    printf ("mmap %d\n", size);
  5.5432 -#endif
  5.5433 -#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5.5434 -    /* Wait for spin lock */
  5.5435 -    slwait (&g_sl);
  5.5436 -#endif
  5.5437 -    /* First time initialization */
  5.5438 -    if (! g_pagesize) 
  5.5439 -        g_pagesize = getpagesize ();
  5.5440 -    if (! g_regionsize) 
  5.5441 -        g_regionsize = getregionsize ();
  5.5442 -    /* Assert preconditions */
  5.5443 -    assert ((unsigned) ptr % g_regionsize == 0);
  5.5444 -    assert (size % g_pagesize == 0);
  5.5445 -    /* Allocate this */
  5.5446 -    ptr = VirtualAlloc (ptr, size,
  5.5447 -					    MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE);
  5.5448 -    if (! ptr) {
  5.5449 -        ptr = (void *) MORECORE_FAILURE;
  5.5450 -        goto mmap_exit;
  5.5451 -    }
  5.5452 -    /* Assert postconditions */
  5.5453 -    assert ((unsigned) ptr % g_regionsize == 0);
  5.5454 -#ifdef TRACE
  5.5455 -    printf ("Commit %p %d\n", ptr, size);
  5.5456 -#endif
  5.5457 -mmap_exit:
  5.5458 -#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5.5459 -    /* Release spin lock */
  5.5460 -    slrelease (&g_sl);
  5.5461 -#endif
  5.5462 -    return ptr;
  5.5463 -}
  5.5464 -
  5.5465 -/* munmap for windows */
  5.5466 -static long munmap (void *ptr, long size) {
  5.5467 -    static long g_pagesize;
  5.5468 -    static long g_regionsize;
  5.5469 -    int rc = MUNMAP_FAILURE;
  5.5470 -#ifdef TRACE
  5.5471 -    printf ("munmap %p %d\n", ptr, size);
  5.5472 -#endif
  5.5473 -#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5.5474 -    /* Wait for spin lock */
  5.5475 -    slwait (&g_sl);
  5.5476 -#endif
  5.5477 -    /* First time initialization */
  5.5478 -    if (! g_pagesize) 
  5.5479 -        g_pagesize = getpagesize ();
  5.5480 -    if (! g_regionsize) 
  5.5481 -        g_regionsize = getregionsize ();
  5.5482 -    /* Assert preconditions */
  5.5483 -    assert ((unsigned) ptr % g_regionsize == 0);
  5.5484 -    assert (size % g_pagesize == 0);
  5.5485 -    /* Free this */
  5.5486 -    if (! VirtualFree (ptr, 0, 
  5.5487 -                       MEM_RELEASE))
  5.5488 -        goto munmap_exit;
  5.5489 -    rc = 0;
  5.5490 -#ifdef TRACE
  5.5491 -    printf ("Release %p %d\n", ptr, size);
  5.5492 -#endif
  5.5493 -munmap_exit:
  5.5494 -#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
  5.5495 -    /* Release spin lock */
  5.5496 -    slrelease (&g_sl);
  5.5497 -#endif
  5.5498 -    return rc;
  5.5499 -}
  5.5500 -
  5.5501 -static void vminfo (CHUNK_SIZE_T  *free, CHUNK_SIZE_T  *reserved, CHUNK_SIZE_T  *committed) {
  5.5502 -    MEMORY_BASIC_INFORMATION memory_info;
  5.5503 -    memory_info.BaseAddress = 0;
  5.5504 -    *free = *reserved = *committed = 0;
  5.5505 -    while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
  5.5506 -        switch (memory_info.State) {
  5.5507 -        case MEM_FREE:
  5.5508 -            *free += memory_info.RegionSize;
  5.5509 -            break;
  5.5510 -        case MEM_RESERVE:
  5.5511 -            *reserved += memory_info.RegionSize;
  5.5512 -            break;
  5.5513 -        case MEM_COMMIT:
  5.5514 -            *committed += memory_info.RegionSize;
  5.5515 -            break;
  5.5516 -        }
  5.5517 -        memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
  5.5518 -    }
  5.5519 -}
  5.5520 -
  5.5521 -static int cpuinfo (int whole, CHUNK_SIZE_T  *kernel, CHUNK_SIZE_T  *user) {
  5.5522 -    if (whole) {
  5.5523 -        __int64 creation64, exit64, kernel64, user64;
  5.5524 -        int rc = GetProcessTimes (GetCurrentProcess (), 
  5.5525 -                                  (FILETIME *) &creation64,  
  5.5526 -                                  (FILETIME *) &exit64, 
  5.5527 -                                  (FILETIME *) &kernel64, 
  5.5528 -                                  (FILETIME *) &user64);
  5.5529 -        if (! rc) {
  5.5530 -            *kernel = 0;
  5.5531 -            *user = 0;
  5.5532 -            return FALSE;
  5.5533 -        } 
  5.5534 -        *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
  5.5535 -        *user = (CHUNK_SIZE_T) (user64 / 10000);
  5.5536 -        return TRUE;
  5.5537 -    } else {
  5.5538 -        __int64 creation64, exit64, kernel64, user64;
  5.5539 -        int rc = GetThreadTimes (GetCurrentThread (), 
  5.5540 -                                 (FILETIME *) &creation64,  
  5.5541 -                                 (FILETIME *) &exit64, 
  5.5542 -                                 (FILETIME *) &kernel64, 
  5.5543 -                                 (FILETIME *) &user64);
  5.5544 -        if (! rc) {
  5.5545 -            *kernel = 0;
  5.5546 -            *user = 0;
  5.5547 -            return FALSE;
  5.5548 -        } 
  5.5549 -        *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
  5.5550 -        *user = (CHUNK_SIZE_T) (user64 / 10000);
  5.5551 -        return TRUE;
  5.5552 -    }
  5.5553 -}
  5.5554 -
  5.5555 -#endif /* WIN32 */
  5.5556 -
  5.5557 -/* ------------------------------------------------------------
  5.5558 -History:
  5.5559 -    V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
  5.5560 -      * Fix malloc_state bitmap array misdeclaration
  5.5561 -
  5.5562 -    V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
  5.5563 -      * Allow tuning of FIRST_SORTED_BIN_SIZE
  5.5564 -      * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
  5.5565 -      * Better detection and support for non-contiguousness of MORECORE. 
  5.5566 -        Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
  5.5567 -      * Bypass most of malloc if no frees. Thanks To Emery Berger.
  5.5568 -      * Fix freeing of old top non-contiguous chunk im sysmalloc.
  5.5569 -      * Raised default trim and map thresholds to 256K.
  5.5570 -      * Fix mmap-related #defines. Thanks to Lubos Lunak.
  5.5571 -      * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
  5.5572 -      * Branch-free bin calculation
  5.5573 -      * Default trim and mmap thresholds now 256K.
  5.5574 -
  5.5575 -    V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
  5.5576 -      * Introduce independent_comalloc and independent_calloc.
  5.5577 -        Thanks to Michael Pachos for motivation and help.
  5.5578 -      * Make optional .h file available
  5.5579 -      * Allow > 2GB requests on 32bit systems.
  5.5580 -      * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
  5.5581 -        Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
  5.5582 -        and Anonymous.
  5.5583 -      * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for 
  5.5584 -        helping test this.)
  5.5585 -      * memalign: check alignment arg
  5.5586 -      * realloc: don't try to shift chunks backwards, since this
  5.5587 -        leads to  more fragmentation in some programs and doesn't
  5.5588 -        seem to help in any others.
  5.5589 -      * Collect all cases in malloc requiring system memory into sYSMALLOc
  5.5590 -      * Use mmap as backup to sbrk
  5.5591 -      * Place all internal state in malloc_state
  5.5592 -      * Introduce fastbins (although similar to 2.5.1)
  5.5593 -      * Many minor tunings and cosmetic improvements
  5.5594 -      * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK 
  5.5595 -      * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
  5.5596 -        Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
  5.5597 -      * Include errno.h to support default failure action.
  5.5598 -
  5.5599 -    V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
  5.5600 -      * return null for negative arguments
  5.5601 -      * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
  5.5602 -         * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
  5.5603 -          (e.g. WIN32 platforms)
  5.5604 -         * Cleanup header file inclusion for WIN32 platforms
  5.5605 -         * Cleanup code to avoid Microsoft Visual C++ compiler complaints
  5.5606 -         * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
  5.5607 -           memory allocation routines
  5.5608 -         * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
  5.5609 -         * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
  5.5610 -           usage of 'assert' in non-WIN32 code
  5.5611 -         * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
  5.5612 -           avoid infinite loop
  5.5613 -      * Always call 'fREe()' rather than 'free()'
  5.5614 -
  5.5615 -    V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
  5.5616 -      * Fixed ordering problem with boundary-stamping
  5.5617 -
  5.5618 -    V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
  5.5619 -      * Added pvalloc, as recommended by H.J. Liu
  5.5620 -      * Added 64bit pointer support mainly from Wolfram Gloger
  5.5621 -      * Added anonymously donated WIN32 sbrk emulation
  5.5622 -      * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
  5.5623 -      * malloc_extend_top: fix mask error that caused wastage after
  5.5624 -        foreign sbrks
  5.5625 -      * Add linux mremap support code from HJ Liu
  5.5626 -
  5.5627 -    V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
  5.5628 -      * Integrated most documentation with the code.
  5.5629 -      * Add support for mmap, with help from
  5.5630 -        Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5.5631 -      * Use last_remainder in more cases.
  5.5632 -      * Pack bins using idea from  colin@nyx10.cs.du.edu
  5.5633 -      * Use ordered bins instead of best-fit threshhold
  5.5634 -      * Eliminate block-local decls to simplify tracing and debugging.
  5.5635 -      * Support another case of realloc via move into top
  5.5636 -      * Fix error occuring when initial sbrk_base not word-aligned.
  5.5637 -      * Rely on page size for units instead of SBRK_UNIT to
  5.5638 -        avoid surprises about sbrk alignment conventions.
  5.5639 -      * Add mallinfo, mallopt. Thanks to Raymond Nijssen
  5.5640 -        (raymond@es.ele.tue.nl) for the suggestion.
  5.5641 -      * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
  5.5642 -      * More precautions for cases where other routines call sbrk,
  5.5643 -        courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5.5644 -      * Added macros etc., allowing use in linux libc from
  5.5645 -        H.J. Lu (hjl@gnu.ai.mit.edu)
  5.5646 -      * Inverted this history list
  5.5647 -
  5.5648 -    V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
  5.5649 -      * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
  5.5650 -      * Removed all preallocation code since under current scheme
  5.5651 -        the work required to undo bad preallocations exceeds
  5.5652 -        the work saved in good cases for most test programs.
  5.5653 -      * No longer use return list or unconsolidated bins since
  5.5654 -        no scheme using them consistently outperforms those that don't
  5.5655 -        given above changes.
  5.5656 -      * Use best fit for very large chunks to prevent some worst-cases.
  5.5657 -      * Added some support for debugging
  5.5658 -
  5.5659 -    V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
  5.5660 -      * Removed footers when chunks are in use. Thanks to
  5.5661 -        Paul Wilson (wilson@cs.texas.edu) for the suggestion.
  5.5662 -
  5.5663 -    V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
  5.5664 -      * Added malloc_trim, with help from Wolfram Gloger
  5.5665 -        (wmglo@Dent.MED.Uni-Muenchen.DE).
  5.5666 -
  5.5667 -    V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
  5.5668 -
  5.5669 -    V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
  5.5670 -      * realloc: try to expand in both directions
  5.5671 -      * malloc: swap order of clean-bin strategy;
  5.5672 -      * realloc: only conditionally expand backwards
  5.5673 -      * Try not to scavenge used bins
  5.5674 -      * Use bin counts as a guide to preallocation
  5.5675 -      * Occasionally bin return list chunks in first scan
  5.5676 -      * Add a few optimizations from colin@nyx10.cs.du.edu
  5.5677 -
  5.5678 -    V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
  5.5679 -      * faster bin computation & slightly different binning
  5.5680 -      * merged all consolidations to one part of malloc proper
  5.5681 -         (eliminating old malloc_find_space & malloc_clean_bin)
  5.5682 -      * Scan 2 returns chunks (not just 1)
  5.5683 -      * Propagate failure in realloc if malloc returns 0
  5.5684 -      * Add stuff to allow compilation on non-ANSI compilers
  5.5685 -          from kpv@research.att.com
  5.5686 -
  5.5687 -    V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
  5.5688 -      * removed potential for odd address access in prev_chunk
  5.5689 -      * removed dependency on getpagesize.h
  5.5690 -      * misc cosmetics and a bit more internal documentation
  5.5691 -      * anticosmetics: mangled names in macros to evade debugger strangeness
  5.5692 -      * tested on sparc, hp-700, dec-mips, rs6000
  5.5693 -          with gcc & native cc (hp, dec only) allowing
  5.5694 -          Detlefs & Zorn comparison study (in SIGPLAN Notices.)
  5.5695 -
  5.5696 -    Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
  5.5697 -      * Based loosely on libg++-1.2X malloc. (It retains some of the overall
  5.5698 -         structure of old version,  but most details differ.)
  5.5699 -
  5.5700 -*/
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/extras/mini-os/lib/xmalloc.c	Fri Aug 26 10:35:36 2005 +0000
     6.3 @@ -0,0 +1,219 @@
     6.4 +/* 
     6.5 + ****************************************************************************
     6.6 + * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
     6.7 + ****************************************************************************
     6.8 + *
     6.9 + *        File: xmaloc.c
    6.10 + *      Author: Grzegorz Milos (gm281@cam.ac.uk)
    6.11 + *     Changes: 
    6.12 + *              
    6.13 + *        Date: Aug 2005
    6.14 + * 
    6.15 + * Environment: Xen Minimal OS
    6.16 + * Description: simple memory allocator
    6.17 + *
    6.18 + ****************************************************************************
    6.19 + * Simple allocator for Mini-os.  If larger than a page, simply use the
    6.20 + * page-order allocator.
    6.21 + *
    6.22 + * Copy of the allocator for Xen by Rusty Russell:
    6.23 + * Copyright (C) 2005 Rusty Russell IBM Corporation
    6.24 + *
    6.25 + * This program is free software; you can redistribute it and/or modify
    6.26 + * it under the terms of the GNU General Public License as published by
    6.27 + * the Free Software Foundation; either version 2 of the License, or
    6.28 + * (at your option) any later version.
    6.29 + *
    6.30 + * This program is distributed in the hope that it will be useful,
    6.31 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    6.32 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    6.33 + * GNU General Public License for more details.
    6.34 + *
    6.35 + * You should have received a copy of the GNU General Public License
    6.36 + * along with this program; if not, write to the Free Software
    6.37 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    6.38 + */
    6.39 +
    6.40 +#include <os.h>
    6.41 +#include <mm.h>
    6.42 +#include <types.h>
    6.43 +#include <lib.h>
    6.44 +#include <list.h>
    6.45 +
    6.46 +static LIST_HEAD(freelist);
    6.47 +/* static spinlock_t freelist_lock = SPIN_LOCK_UNLOCKED; */
    6.48 +
    6.49 +struct xmalloc_hdr
    6.50 +{
    6.51 +    /* Total including this hdr. */
    6.52 +    size_t size;
    6.53 +    struct list_head freelist;
    6.54 +} __cacheline_aligned;
    6.55 +
    6.56 +static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block)
    6.57 +{
    6.58 +    struct xmalloc_hdr *extra;
    6.59 +    size_t leftover = block - size;
    6.60 +
    6.61 +    /* If enough is left to make a block, put it on free list. */
    6.62 +    if ( leftover >= (2 * sizeof(struct xmalloc_hdr)) )
    6.63 +    {
    6.64 +        extra = (struct xmalloc_hdr *)((unsigned long)hdr + size);
    6.65 +        extra->size = leftover;
    6.66 +        list_add(&extra->freelist, &freelist);
    6.67 +    }
    6.68 +    else
    6.69 +    {
    6.70 +        size = block;
    6.71 +    }
    6.72 +
    6.73 +    hdr->size = size;
    6.74 +    /* Debugging aid. */
    6.75 +    hdr->freelist.next = hdr->freelist.prev = NULL;
    6.76 +}
    6.77 +
    6.78 +static void *xmalloc_new_page(size_t size)
    6.79 +{
    6.80 +    struct xmalloc_hdr *hdr;
    6.81 +    /* unsigned long flags; */
    6.82 +
    6.83 +    hdr = (struct xmalloc_hdr *)alloc_page();
    6.84 +    if ( hdr == NULL )
    6.85 +        return NULL;
    6.86 +
    6.87 +    /* spin_lock_irqsave(&freelist_lock, flags); */
    6.88 +    maybe_split(hdr, size, PAGE_SIZE);
    6.89 +    /* spin_unlock_irqrestore(&freelist_lock, flags); */
    6.90 +
    6.91 +    return hdr+1;
    6.92 +}
    6.93 +
    6.94 +/* Big object?  Just use the page allocator. */
    6.95 +static void *xmalloc_whole_pages(size_t size)
    6.96 +{
    6.97 +    struct xmalloc_hdr *hdr;
    6.98 +    unsigned int pageorder = get_order(size);
    6.99 +
   6.100 +    hdr = (struct xmalloc_hdr *)alloc_pages(pageorder);
   6.101 +    if ( hdr == NULL )
   6.102 +        return NULL;
   6.103 +
   6.104 +    hdr->size = (1 << (pageorder + PAGE_SHIFT));
   6.105 +    /* Debugging aid. */
   6.106 +    hdr->freelist.next = hdr->freelist.prev = NULL;
   6.107 +
   6.108 +    return hdr+1;
   6.109 +}
   6.110 +
   6.111 +/* Return size, increased to alignment with align. */
   6.112 +static inline size_t align_up(size_t size, size_t align)
   6.113 +{
   6.114 +    return (size + align - 1) & ~(align - 1);
   6.115 +}
   6.116 +
   6.117 +void *_xmalloc(size_t size, size_t align)
   6.118 +{
   6.119 +    struct xmalloc_hdr *i;
   6.120 +    /* unsigned long flags; */
   6.121 +
   6.122 +    /* Add room for header, pad to align next header. */
   6.123 +    size += sizeof(struct xmalloc_hdr);
   6.124 +    size = align_up(size, __alignof__(struct xmalloc_hdr));
   6.125 +
   6.126 +    /* For big allocs, give them whole pages. */
   6.127 +    if ( size >= PAGE_SIZE )
   6.128 +        return xmalloc_whole_pages(size);
   6.129 +
   6.130 +    /* Search free list. */
   6.131 +    /* spin_lock_irqsave(&freelist_lock, flags); */
   6.132 +    list_for_each_entry( i, &freelist, freelist )
   6.133 +    {
   6.134 +        if ( i->size < size )
   6.135 +            continue;
   6.136 +        list_del(&i->freelist);
   6.137 +        maybe_split(i, size, i->size);
   6.138 +        /* spin_unlock_irqrestore(&freelist_lock, flags); */
   6.139 +        return i+1;
   6.140 +    }
   6.141 +    /* spin_unlock_irqrestore(&freelist_lock, flags); */
   6.142 +
   6.143 +    /* Alloc a new page and return from that. */
   6.144 +    return xmalloc_new_page(size);
   6.145 +}
   6.146 +
   6.147 +void xfree(const void *p)
   6.148 +{
   6.149 +    /* unsigned long flags; */
   6.150 +    struct xmalloc_hdr *i, *tmp, *hdr;
   6.151 +
   6.152 +    if ( p == NULL )
   6.153 +        return;
   6.154 +
   6.155 +    hdr = (struct xmalloc_hdr *)p - 1;
   6.156 +
   6.157 +    /* We know hdr will be on same page. */
   6.158 +    if(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK))
   6.159 +    {
   6.160 +        printk("Header should be on the same page\n");
   6.161 +        *(int*)0=0;
   6.162 +    }
   6.163 +
   6.164 +    /* Not previously freed. */
   6.165 +    if(hdr->freelist.next || hdr->freelist.prev)
   6.166 +    {
   6.167 +        printk("Should not be previously freed\n");
   6.168 +        *(int*)0=0;
   6.169 +    }
   6.170 +
   6.171 +    /* Big allocs free directly. */
   6.172 +    if ( hdr->size >= PAGE_SIZE )
   6.173 +    {
   6.174 +        free_pages(hdr, get_order(hdr->size));
   6.175 +        return;
   6.176 +    }
   6.177 +
   6.178 +    /* Merge with other free block, or put in list. */
   6.179 +    /* spin_lock_irqsave(&freelist_lock, flags); */
   6.180 +    list_for_each_entry_safe( i, tmp, &freelist, freelist )
   6.181 +    {
   6.182 +        unsigned long _i   = (unsigned long)i;
   6.183 +        unsigned long _hdr = (unsigned long)hdr;
   6.184 +
   6.185 +        /* Do not merge across page boundaries. */
   6.186 +        if ( ((_i ^ _hdr) & PAGE_MASK) != 0 )
   6.187 +            continue;
   6.188 +
   6.189 +        /* We follow this block?  Swallow it. */
   6.190 +        if ( (_i + i->size) == _hdr )
   6.191 +        {
   6.192 +            list_del(&i->freelist);
   6.193 +            i->size += hdr->size;
   6.194 +            hdr = i;
   6.195 +        }
   6.196 +
   6.197 +        /* We precede this block? Swallow it. */
   6.198 +        if ( (_hdr + hdr->size) == _i )
   6.199 +        {
   6.200 +            list_del(&i->freelist);
   6.201 +            hdr->size += i->size;
   6.202 +        }
   6.203 +    }
   6.204 +
   6.205 +    /* Did we merge an entire page? */
   6.206 +    if ( hdr->size == PAGE_SIZE )
   6.207 +    {
   6.208 +        if((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0)
   6.209 +        {
   6.210 +            printk("Bug\n");
   6.211 +            *(int*)0=0;
   6.212 +        }
   6.213 +        free_pages(hdr, 0);
   6.214 +    }
   6.215 +    else
   6.216 +    {
   6.217 +        list_add(&hdr->freelist, &freelist);
   6.218 +    }
   6.219 +
   6.220 +    /* spin_unlock_irqrestore(&freelist_lock, flags); */
   6.221 +}
   6.222 +
     7.1 --- a/extras/mini-os/mm.c	Fri Aug 26 09:29:54 2005 +0000
     7.2 +++ b/extras/mini-os/mm.c	Fri Aug 26 10:35:36 2005 +0000
     7.3 @@ -1,6 +1,7 @@
     7.4 -/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
     7.5 +/* 
     7.6   ****************************************************************************
     7.7   * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
     7.8 + * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
     7.9   ****************************************************************************
    7.10   *
    7.11   *        File: mm.c
    7.12 @@ -14,8 +15,6 @@
    7.13   *              contains buddy page allocator from Xen.
    7.14   *
    7.15   ****************************************************************************
    7.16 - * $Id: c-insert.c,v 1.7 2002/11/08 16:04:34 rn Exp $
    7.17 - ****************************************************************************
    7.18   * Permission is hereby granted, free of charge, to any person obtaining a copy
    7.19   * of this software and associated documentation files (the "Software"), to
    7.20   * deal in the Software without restriction, including without limitation the
    7.21 @@ -40,7 +39,7 @@
    7.22  #include <mm.h>
    7.23  #include <types.h>
    7.24  #include <lib.h>
    7.25 -
    7.26 +#include <xmalloc.h>
    7.27  
    7.28  #ifdef MM_DEBUG
    7.29  #define DEBUG(_f, _a...) \
    7.30 @@ -505,6 +504,6 @@ void init_mm(void)
    7.31             (u_long)to_virt(PFN_PHYS(max_pfn)), PFN_PHYS(max_pfn));
    7.32      init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn));   
    7.33  #endif
    7.34 -
    7.35 +    
    7.36      printk("MM: done\n");
    7.37  }