ia64/xen-unstable

view extras/mini-os/lib/malloc.c @ 4146:f2d61710e4d9

bitkeeper revision 1.1236.25.24 (42366e9aQ71LQ8uCB-Y1IwVNqx5eqA)

Merge djm@kirby.fc.hp.com://home/djm/src/xen/xeno-unstable-ia64.bk
into sportsman.spdomain:/home/djm/xeno-unstable-ia64.bk
author djm@sportsman.spdomain
date Tue Mar 15 05:11:54 2005 +0000 (2005-03-15)
parents c67c82ddb44a
children
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
2 ****************************************************************************
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 ****************************************************************************
5 *
6 * File: malloc.c
7 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
8 * Changes:
9 *
10 * Date: Aug 2003
11 *
12 * Environment: Xen Minimal OS
13 * Description: Library functions, maloc at al
14 *
15 ****************************************************************************
16 * $Id: c-insert.c,v 1.7 2002/11/08 16:04:34 rn Exp $
17 ****************************************************************************
18 */
20 #include <os.h>
21 #include <mm.h>
22 #include <types.h>
23 #include <lib.h>
25 /* standard compile option */
26 #define HAVE_MEMCOPY 1
27 #define USE_MEMCPY 1
28 #undef HAVE_MMAP
29 #undef MMAP_CLEARS
30 #undef HAVE_MREMAP
31 #define malloc_getpagesize PAGE_SIZE
32 #undef HAVE_USR_INCLUDE_MALLOC_H
33 #define LACKS_UNISTD_H 1
34 #define LACKS_SYS_PARAM_H 1
35 #define LACKS_SYS_MMAN_H 1
36 #define LACKS_FCNTL_H 1
39 /* page allocator interface */
40 #define MORECORE more_core
41 #define MORECORE_CONTIGUOUS 0
42 #define MORECORE_FAILURE 0
43 #define MORECORE_CANNOT_TRIM 1
45 static void *more_core(size_t n)
46 {
47 static void *last;
48 unsigned long order, num_pages;
49 void *ret;
51 if (n == 0)
52 return last;
54 n = PFN_UP(n);
55 for ( order = 0; n > 1; order++ )
56 n >>= 1;
57 ret = (void *)alloc_pages(order);
59 /* work out pointer to end of chunk */
60 if ( ret )
61 {
62 num_pages = 1 << order;
63 last = (char *)ret + (num_pages * PAGE_SIZE);
64 }
66 return ret;
67 }
69 /* other options commented out below */
70 #define __STD_C 1
71 #define Void_t void
72 #define assert(x) ((void)0)
74 #define CHUNK_SIZE_T unsigned long
75 #define PTR_UINT unsigned long
76 #define INTERNAL_SIZE_T size_t
77 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
78 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
79 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
80 #define TRIM_FASTBINS 0
82 #define M_MXFAST 1
83 #define DEFAULT_MXFAST 64
84 #define M_TRIM_THRESHOLD -1
85 #define DEFAULT_TRIM_THRESHOLD (256 * 1024)
86 #define M_TOP_PAD -2
87 #define DEFAULT_TOP_PAD (0)
88 #define M_MMAP_THRESHOLD -3
89 #define DEFAULT_MMAP_THRESHOLD (256 * 1024)
90 #define M_MMAP_MAX -4
91 #define DEFAULT_MMAP_MAX (0)
92 #define MALLOC_FAILURE_ACTION printf("malloc failure\n")
94 #define cALLOc public_cALLOc
95 #define fREe public_fREe
96 #define cFREe public_cFREe
97 #define mALLOc public_mALLOc
98 #define mEMALIGn public_mEMALIGn
99 #define rEALLOc public_rEALLOc
100 #define vALLOc public_vALLOc
101 #define pVALLOc public_pVALLOc
102 #define mALLINFo public_mALLINFo
103 #define mALLOPt public_mALLOPt
104 #define mTRIm public_mTRIm
105 #define mSTATs public_mSTATs
106 #define mUSABLe public_mUSABLe
107 #define iCALLOc public_iCALLOc
108 #define iCOMALLOc public_iCOMALLOc
110 #define public_cALLOc calloc
111 #define public_fREe free
112 #define public_cFREe cfree
113 #define public_mALLOc malloc
114 #define public_mEMALIGn memalign
115 #define public_rEALLOc realloc
116 #define public_vALLOc valloc
117 #define public_pVALLOc pvalloc
118 #define public_mALLINFo mallinfo
119 #define public_mALLOPt mallopt
120 #define public_mTRIm malloc_trim
121 #define public_mSTATs malloc_stats
122 #define public_mUSABLe malloc_usable_size
123 #define public_iCALLOc independent_calloc
124 #define public_iCOMALLOc independent_comalloc
127 /*
128 This is a version (aka dlmalloc) of malloc/free/realloc written by
129 Doug Lea and released to the public domain. Use, modify, and
130 redistribute this code without permission or acknowledgement in any
131 way you wish. Send questions, comments, complaints, performance
132 data, etc to dl@cs.oswego.edu
134 * VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
136 Note: There may be an updated version of this malloc obtainable at
137 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
138 Check before installing!
140 * Quickstart
142 This library is all in one file to simplify the most common usage:
143 ftp it, compile it (-O), and link it into another program. All
144 of the compile-time options default to reasonable values for use on
145 most unix platforms. Compile -DWIN32 for reasonable defaults on windows.
146 You might later want to step through various compile-time and dynamic
147 tuning options.
149 For convenience, an include file for code using this malloc is at:
150 ftp://gee.cs.oswego.edu/pub/misc/malloc-2.7.1.h
151 You don't really need this .h file unless you call functions not
152 defined in your system include files. The .h file contains only the
153 excerpts from this file needed for using this malloc on ANSI C/C++
154 systems, so long as you haven't changed compile-time options about
155 naming and tuning parameters. If you do, then you can create your
156 own malloc.h that does include all settings by cutting at the point
157 indicated below.
159 * Why use this malloc?
161 This is not the fastest, most space-conserving, most portable, or
162 most tunable malloc ever written. However it is among the fastest
163 while also being among the most space-conserving, portable and tunable.
164 Consistent balance across these factors results in a good general-purpose
165 allocator for malloc-intensive programs.
167 The main properties of the algorithms are:
168 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
169 with ties normally decided via FIFO (i.e. least recently used).
170 * For small (<= 64 bytes by default) requests, it is a caching
171 allocator, that maintains pools of quickly recycled chunks.
172 * In between, and for combinations of large and small requests, it does
173 the best it can trying to meet both goals at once.
174 * For very large requests (>= 128KB by default), it relies on system
175 memory mapping facilities, if supported.
177 For a longer but slightly out of date high-level description, see
178 http://gee.cs.oswego.edu/dl/html/malloc.html
180 You may already by default be using a C library containing a malloc
181 that is based on some version of this malloc (for example in
182 linux). You might still want to use the one in this file in order to
183 customize settings or to avoid overheads associated with library
184 versions.
186 * Contents, described in more detail in "description of public routines" below.
188 Standard (ANSI/SVID/...) functions:
189 malloc(size_t n);
190 calloc(size_t n_elements, size_t element_size);
191 free(Void_t* p);
192 realloc(Void_t* p, size_t n);
193 memalign(size_t alignment, size_t n);
194 valloc(size_t n);
195 mallinfo()
196 mallopt(int parameter_number, int parameter_value)
198 Additional functions:
199 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
200 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
201 pvalloc(size_t n);
202 cfree(Void_t* p);
203 malloc_trim(size_t pad);
204 malloc_usable_size(Void_t* p);
205 malloc_stats();
207 * Vital statistics:
209 Supported pointer representation: 4 or 8 bytes
210 Supported size_t representation: 4 or 8 bytes
211 Note that size_t is allowed to be 4 bytes even if pointers are 8.
212 You can adjust this by defining INTERNAL_SIZE_T
214 Alignment: 2 * sizeof(size_t) (default)
215 (i.e., 8 byte alignment with 4byte size_t). This suffices for
216 nearly all current machines and C compilers. However, you can
217 define MALLOC_ALIGNMENT to be wider than this if necessary.
219 Minimum overhead per allocated chunk: 4 or 8 bytes
220 Each malloced chunk has a hidden word of overhead holding size
221 and status information.
223 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
224 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
226 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
227 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
228 needed; 4 (8) for a trailing size field and 8 (16) bytes for
229 free list pointers. Thus, the minimum allocatable size is
230 16/24/32 bytes.
232 Even a request for zero bytes (i.e., malloc(0)) returns a
233 pointer to something of the minimum allocatable size.
235 The maximum overhead wastage (i.e., number of extra bytes
236 allocated than were requested in malloc) is less than or equal
237 to the minimum size, except for requests >= mmap_threshold that
238 are serviced via mmap(), where the worst case wastage is 2 *
239 sizeof(size_t) bytes plus the remainder from a system page (the
240 minimal mmap unit); typically 4096 or 8192 bytes.
242 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
243 8-byte size_t: 2^64 minus about two pages
245 It is assumed that (possibly signed) size_t values suffice to
246 represent chunk sizes. `Possibly signed' is due to the fact
247 that `size_t' may be defined on a system as either a signed or
248 an unsigned type. The ISO C standard says that it must be
249 unsigned, but a few systems are known not to adhere to this.
250 Additionally, even when size_t is unsigned, sbrk (which is by
251 default used to obtain memory from system) accepts signed
252 arguments, and may not be able to handle size_t-wide arguments
253 with negative sign bit. Generally, values that would
254 appear as negative after accounting for overhead and alignment
255 are supported only via mmap(), which does not have this
256 limitation.
258 Requests for sizes outside the allowed range will perform an optional
259 failure action and then return null. (Requests may also
260 also fail because a system is out of memory.)
262 Thread-safety: NOT thread-safe unless USE_MALLOC_LOCK defined
264 When USE_MALLOC_LOCK is defined, wrappers are created to
265 surround every public call with either a pthread mutex or
266 a win32 spinlock (depending on WIN32). This is not
267 especially fast, and can be a major bottleneck.
268 It is designed only to provide minimal protection
269 in concurrent environments, and to provide a basis for
270 extensions. If you are using malloc in a concurrent program,
271 you would be far better off obtaining ptmalloc, which is
272 derived from a version of this malloc, and is well-tuned for
273 concurrent programs. (See http://www.malloc.de) Note that
274 even when USE_MALLOC_LOCK is defined, you can can guarantee
275 full thread-safety only if no threads acquire memory through
276 direct calls to MORECORE or other system-level allocators.
278 Compliance: I believe it is compliant with the 1997 Single Unix Specification
279 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
280 others as well.
282 * Synopsis of compile-time options:
284 People have reported using previous versions of this malloc on all
285 versions of Unix, sometimes by tweaking some of the defines
286 below. It has been tested most extensively on Solaris and
287 Linux. It is also reported to work on WIN32 platforms.
288 People also report using it in stand-alone embedded systems.
290 The implementation is in straight, hand-tuned ANSI C. It is not
291 at all modular. (Sorry!) It uses a lot of macros. To be at all
292 usable, this code should be compiled using an optimizing compiler
293 (for example gcc -O3) that can simplify expressions and control
294 paths. (FAQ: some macros import variables as arguments rather than
295 declare locals because people reported that some debuggers
296 otherwise get confused.)
298 OPTION DEFAULT VALUE
300 Compilation Environment options:
302 __STD_C derived from C compiler defines
303 WIN32 NOT defined
304 HAVE_MEMCPY defined
305 USE_MEMCPY 1 if HAVE_MEMCPY is defined
306 HAVE_MMAP defined as 1
307 MMAP_CLEARS 1
308 HAVE_MREMAP 0 unless linux defined
309 malloc_getpagesize derived from system #includes, or 4096 if not
310 HAVE_USR_INCLUDE_MALLOC_H NOT defined
311 LACKS_UNISTD_H NOT defined unless WIN32
312 LACKS_SYS_PARAM_H NOT defined unless WIN32
313 LACKS_SYS_MMAN_H NOT defined unless WIN32
314 LACKS_FCNTL_H NOT defined
316 Changing default word sizes:
318 INTERNAL_SIZE_T size_t
319 MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T)
320 PTR_UINT unsigned long
321 CHUNK_SIZE_T unsigned long
323 Configuration and functionality options:
325 USE_DL_PREFIX NOT defined
326 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
327 USE_MALLOC_LOCK NOT defined
328 DEBUG NOT defined
329 REALLOC_ZERO_BYTES_FREES NOT defined
330 MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
331 TRIM_FASTBINS 0
332 FIRST_SORTED_BIN_SIZE 512
334 Options for customizing MORECORE:
336 MORECORE sbrk
337 MORECORE_CONTIGUOUS 1
338 MORECORE_CANNOT_TRIM NOT defined
339 MMAP_AS_MORECORE_SIZE (1024 * 1024)
341 Tuning options that are also dynamically changeable via mallopt:
343 DEFAULT_MXFAST 64
344 DEFAULT_TRIM_THRESHOLD 256 * 1024
345 DEFAULT_TOP_PAD 0
346 DEFAULT_MMAP_THRESHOLD 256 * 1024
347 DEFAULT_MMAP_MAX 65536
349 There are several other #defined constants and macros that you
350 probably don't want to touch unless you are extending or adapting malloc.
351 */
353 /* RN: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
354 #if 0
356 /*
357 WIN32 sets up defaults for MS environment and compilers.
358 Otherwise defaults are for unix.
359 */
361 /* #define WIN32 */
363 #ifdef WIN32
365 #define WIN32_LEAN_AND_MEAN
366 #include <windows.h>
368 /* Win32 doesn't supply or need the following headers */
369 #define LACKS_UNISTD_H
370 #define LACKS_SYS_PARAM_H
371 #define LACKS_SYS_MMAN_H
373 /* Use the supplied emulation of sbrk */
374 #define MORECORE sbrk
375 #define MORECORE_CONTIGUOUS 1
376 #define MORECORE_FAILURE ((void*)(-1))
378 /* Use the supplied emulation of mmap and munmap */
379 #define HAVE_MMAP 1
380 #define MUNMAP_FAILURE (-1)
381 #define MMAP_CLEARS 1
383 /* These values don't really matter in windows mmap emulation */
384 #define MAP_PRIVATE 1
385 #define MAP_ANONYMOUS 2
386 #define PROT_READ 1
387 #define PROT_WRITE 2
389 /* Emulation functions defined at the end of this file */
391 /* If USE_MALLOC_LOCK, use supplied critical-section-based lock functions */
392 #ifdef USE_MALLOC_LOCK
393 static int slwait(int *sl);
394 static int slrelease(int *sl);
395 #endif
397 static long getpagesize(void);
398 static long getregionsize(void);
399 static void *sbrk(long size);
400 static void *mmap(void *ptr, long size, long prot, long type, long handle, long arg);
401 static long munmap(void *ptr, long size);
403 static void vminfo (unsigned long*free, unsigned long*reserved, unsigned long*committed);
404 static int cpuinfo (int whole, unsigned long*kernel, unsigned long*user);
406 #endif
408 /*
409 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
410 compiler, or a C compiler sufficiently close to ANSI to get away
411 with it.
412 */
414 #ifndef __STD_C
415 #if defined(__STDC__) || defined(_cplusplus)
416 #define __STD_C 1
417 #else
418 #define __STD_C 0
419 #endif
420 #endif /*__STD_C*/
423 /*
424 Void_t* is the pointer type that malloc should say it returns
425 */
427 #ifndef Void_t
428 #if (__STD_C || defined(WIN32))
429 #define Void_t void
430 #else
431 #define Void_t char
432 #endif
433 #endif /*Void_t*/
435 #if __STD_C
436 #include <stddef.h> /* for size_t */
437 #else
438 #include <sys/types.h>
439 #endif
441 #ifdef __cplusplus
442 extern "C" {
443 #endif
445 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
447 /* #define LACKS_UNISTD_H */
449 #ifndef LACKS_UNISTD_H
450 #include <unistd.h>
451 #endif
453 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
455 /* #define LACKS_SYS_PARAM_H */
458 #include <stdio.h> /* needed for malloc_stats */
459 #include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
462 /*
463 Debugging:
465 Because freed chunks may be overwritten with bookkeeping fields, this
466 malloc will often die when freed memory is overwritten by user
467 programs. This can be very effective (albeit in an annoying way)
468 in helping track down dangling pointers.
470 If you compile with -DDEBUG, a number of assertion checks are
471 enabled that will catch more memory errors. You probably won't be
472 able to make much sense of the actual assertion errors, but they
473 should help you locate incorrectly overwritten memory. The
474 checking is fairly extensive, and will slow down execution
475 noticeably. Calling malloc_stats or mallinfo with DEBUG set will
476 attempt to check every non-mmapped allocated and free chunk in the
477 course of computing the summmaries. (By nature, mmapped regions
478 cannot be checked very much automatically.)
480 Setting DEBUG may also be helpful if you are trying to modify
481 this code. The assertions in the check routines spell out in more
482 detail the assumptions and invariants underlying the algorithms.
484 Setting DEBUG does NOT provide an automated mechanism for checking
485 that all accesses to malloced memory stay within their
486 bounds. However, there are several add-ons and adaptations of this
487 or other mallocs available that do this.
488 */
490 #if DEBUG
491 #include <assert.h>
492 #else
493 #define assert(x) ((void)0)
494 #endif
496 /*
497 The unsigned integer type used for comparing any two chunk sizes.
498 This should be at least as wide as size_t, but should not be signed.
499 */
501 #ifndef CHUNK_SIZE_T
502 #define CHUNK_SIZE_T unsigned long
503 #endif
505 /*
506 The unsigned integer type used to hold addresses when they are are
507 manipulated as integers. Except that it is not defined on all
508 systems, intptr_t would suffice.
509 */
510 #ifndef PTR_UINT
511 #define PTR_UINT unsigned long
512 #endif
515 /*
516 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
517 of chunk sizes.
519 The default version is the same as size_t.
521 While not strictly necessary, it is best to define this as an
522 unsigned type, even if size_t is a signed type. This may avoid some
523 artificial size limitations on some systems.
525 On a 64-bit machine, you may be able to reduce malloc overhead by
526 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
527 expense of not being able to handle more than 2^32 of malloced
528 space. If this limitation is acceptable, you are encouraged to set
529 this unless you are on a platform requiring 16byte alignments. In
530 this case the alignment requirements turn out to negate any
531 potential advantages of decreasing size_t word size.
533 Implementors: Beware of the possible combinations of:
534 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
535 and might be the same width as int or as long
536 - size_t might have different width and signedness as INTERNAL_SIZE_T
537 - int and long might be 32 or 64 bits, and might be the same width
538 To deal with this, most comparisons and difference computations
539 among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
540 aware of the fact that casting an unsigned int to a wider long does
541 not sign-extend. (This also makes checking for negative numbers
542 awkward.) Some of these casts result in harmless compiler warnings
543 on some systems.
544 */
546 #ifndef INTERNAL_SIZE_T
547 #define INTERNAL_SIZE_T size_t
548 #endif
550 /* The corresponding word size */
551 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
555 /*
556 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
557 It must be a power of two at least 2 * SIZE_SZ, even on machines
558 for which smaller alignments would suffice. It may be defined as
559 larger than this though. Note however that code and data structures
560 are optimized for the case of 8-byte alignment.
561 */
564 #ifndef MALLOC_ALIGNMENT
565 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
566 #endif
568 /* The corresponding bit mask value */
569 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
573 /*
574 REALLOC_ZERO_BYTES_FREES should be set if a call to
575 realloc with zero bytes should be the same as a call to free.
576 Some people think it should. Otherwise, since this malloc
577 returns a unique pointer for malloc(0), so does realloc(p, 0).
578 */
580 /* #define REALLOC_ZERO_BYTES_FREES */
582 /*
583 TRIM_FASTBINS controls whether free() of a very small chunk can
584 immediately lead to trimming. Setting to true (1) can reduce memory
585 footprint, but will almost always slow down programs that use a lot
586 of small chunks.
588 Define this only if you are willing to give up some speed to more
589 aggressively reduce system-level memory footprint when releasing
590 memory in programs that use many small chunks. You can get
591 essentially the same effect by setting MXFAST to 0, but this can
592 lead to even greater slowdowns in programs using many small chunks.
593 TRIM_FASTBINS is an in-between compile-time option, that disables
594 only those chunks bordering topmost memory from being placed in
595 fastbins.
596 */
598 #ifndef TRIM_FASTBINS
599 #define TRIM_FASTBINS 0
600 #endif
603 /*
604 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
605 This is necessary when you only want to use this malloc in one part
606 of a program, using your regular system malloc elsewhere.
607 */
609 /* #define USE_DL_PREFIX */
612 /*
613 USE_MALLOC_LOCK causes wrapper functions to surround each
614 callable routine with pthread mutex lock/unlock.
616 USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
617 */
620 /* #define USE_MALLOC_LOCK */
623 /*
624 If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
625 actually a wrapper function that first calls MALLOC_PREACTION, then
626 calls the internal routine, and follows it with
627 MALLOC_POSTACTION. This is needed for locking, but you can also use
628 this, without USE_MALLOC_LOCK, for purposes of interception,
629 instrumentation, etc. It is a sad fact that using wrappers often
630 noticeably degrades performance of malloc-intensive programs.
631 */
633 #ifdef USE_MALLOC_LOCK
634 #define USE_PUBLIC_MALLOC_WRAPPERS
635 #else
636 /* #define USE_PUBLIC_MALLOC_WRAPPERS */
637 #endif
640 /*
641 Two-phase name translation.
642 All of the actual routines are given mangled names.
643 When wrappers are used, they become the public callable versions.
644 When DL_PREFIX is used, the callable names are prefixed.
645 */
647 #ifndef USE_PUBLIC_MALLOC_WRAPPERS
648 #define cALLOc public_cALLOc
649 #define fREe public_fREe
650 #define cFREe public_cFREe
651 #define mALLOc public_mALLOc
652 #define mEMALIGn public_mEMALIGn
653 #define rEALLOc public_rEALLOc
654 #define vALLOc public_vALLOc
655 #define pVALLOc public_pVALLOc
656 #define mALLINFo public_mALLINFo
657 #define mALLOPt public_mALLOPt
658 #define mTRIm public_mTRIm
659 #define mSTATs public_mSTATs
660 #define mUSABLe public_mUSABLe
661 #define iCALLOc public_iCALLOc
662 #define iCOMALLOc public_iCOMALLOc
663 #endif
665 #ifdef USE_DL_PREFIX
666 #define public_cALLOc dlcalloc
667 #define public_fREe dlfree
668 #define public_cFREe dlcfree
669 #define public_mALLOc dlmalloc
670 #define public_mEMALIGn dlmemalign
671 #define public_rEALLOc dlrealloc
672 #define public_vALLOc dlvalloc
673 #define public_pVALLOc dlpvalloc
674 #define public_mALLINFo dlmallinfo
675 #define public_mALLOPt dlmallopt
676 #define public_mTRIm dlmalloc_trim
677 #define public_mSTATs dlmalloc_stats
678 #define public_mUSABLe dlmalloc_usable_size
679 #define public_iCALLOc dlindependent_calloc
680 #define public_iCOMALLOc dlindependent_comalloc
681 #else /* USE_DL_PREFIX */
682 #define public_cALLOc calloc
683 #define public_fREe free
684 #define public_cFREe cfree
685 #define public_mALLOc malloc
686 #define public_mEMALIGn memalign
687 #define public_rEALLOc realloc
688 #define public_vALLOc valloc
689 #define public_pVALLOc pvalloc
690 #define public_mALLINFo mallinfo
691 #define public_mALLOPt mallopt
692 #define public_mTRIm malloc_trim
693 #define public_mSTATs malloc_stats
694 #define public_mUSABLe malloc_usable_size
695 #define public_iCALLOc independent_calloc
696 #define public_iCOMALLOc independent_comalloc
697 #endif /* USE_DL_PREFIX */
700 /*
701 HAVE_MEMCPY should be defined if you are not otherwise using
702 ANSI STD C, but still have memcpy and memset in your C library
703 and want to use them in calloc and realloc. Otherwise simple
704 macro versions are defined below.
706 USE_MEMCPY should be defined as 1 if you actually want to
707 have memset and memcpy called. People report that the macro
708 versions are faster than libc versions on some systems.
710 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
711 (of <= 36 bytes) are manually unrolled in realloc and calloc.
712 */
714 #define HAVE_MEMCPY
716 #ifndef USE_MEMCPY
717 #ifdef HAVE_MEMCPY
718 #define USE_MEMCPY 1
719 #else
720 #define USE_MEMCPY 0
721 #endif
722 #endif
725 #if (__STD_C || defined(HAVE_MEMCPY))
727 #ifdef WIN32
728 /* On Win32 memset and memcpy are already declared in windows.h */
729 #else
730 #if __STD_C
731 void* memset(void*, int, size_t);
732 void* memcpy(void*, const void*, size_t);
733 #else
734 Void_t* memset();
735 Void_t* memcpy();
736 #endif
737 #endif
738 #endif
740 /*
741 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
742 malloc fails to be able to return memory, either because memory is
743 exhausted or because of illegal arguments.
745 By default, sets errno if running on STD_C platform, else does nothing.
746 */
748 #ifndef MALLOC_FAILURE_ACTION
749 #if __STD_C
750 #define MALLOC_FAILURE_ACTION \
751 errno = ENOMEM;
753 #else
754 #define MALLOC_FAILURE_ACTION
755 #endif
756 #endif
758 /*
759 MORECORE-related declarations. By default, rely on sbrk
760 */
763 #ifdef LACKS_UNISTD_H
764 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
765 #if __STD_C
766 extern Void_t* sbrk(ptrdiff_t);
767 #else
768 extern Void_t* sbrk();
769 #endif
770 #endif
771 #endif
773 /*
774 MORECORE is the name of the routine to call to obtain more memory
775 from the system. See below for general guidance on writing
776 alternative MORECORE functions, as well as a version for WIN32 and a
777 sample version for pre-OSX macos.
778 */
780 #ifndef MORECORE
781 #define MORECORE sbrk
782 #endif
784 /*
785 MORECORE_FAILURE is the value returned upon failure of MORECORE
786 as well as mmap. Since it cannot be an otherwise valid memory address,
787 and must reflect values of standard sys calls, you probably ought not
788 try to redefine it.
789 */
791 #ifndef MORECORE_FAILURE
792 #define MORECORE_FAILURE (-1)
793 #endif
795 /*
796 If MORECORE_CONTIGUOUS is true, take advantage of fact that
797 consecutive calls to MORECORE with positive arguments always return
798 contiguous increasing addresses. This is true of unix sbrk. Even
799 if not defined, when regions happen to be contiguous, malloc will
800 permit allocations spanning regions obtained from different
801 calls. But defining this when applicable enables some stronger
802 consistency checks and space efficiencies.
803 */
805 #ifndef MORECORE_CONTIGUOUS
806 #define MORECORE_CONTIGUOUS 1
807 #endif
809 /*
810 Define MORECORE_CANNOT_TRIM if your version of MORECORE
811 cannot release space back to the system when given negative
812 arguments. This is generally necessary only if you are using
813 a hand-crafted MORECORE function that cannot handle negative arguments.
814 */
816 /* #define MORECORE_CANNOT_TRIM */
819 /*
820 Define HAVE_MMAP as true to optionally make malloc() use mmap() to
821 allocate very large blocks. These will be returned to the
822 operating system immediately after a free(). Also, if mmap
823 is available, it is used as a backup strategy in cases where
824 MORECORE fails to provide space from system.
826 This malloc is best tuned to work with mmap for large requests.
827 If you do not have mmap, operations involving very large chunks (1MB
828 or so) may be slower than you'd like.
829 */
831 #ifndef HAVE_MMAP
832 #define HAVE_MMAP 1
833 #endif
835 #if HAVE_MMAP
836 /*
837 Standard unix mmap using /dev/zero clears memory so calloc doesn't
838 need to.
839 */
841 #ifndef MMAP_CLEARS
842 #define MMAP_CLEARS 1
843 #endif
845 #else /* no mmap */
846 #ifndef MMAP_CLEARS
847 #define MMAP_CLEARS 0
848 #endif
849 #endif
852 /*
853 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
854 sbrk fails, and mmap is used as a backup (which is done only if
855 HAVE_MMAP). The value must be a multiple of page size. This
856 backup strategy generally applies only when systems have "holes" in
857 address space, so sbrk cannot perform contiguous expansion, but
858 there is still space available on system. On systems for which
859 this is known to be useful (i.e. most linux kernels), this occurs
860 only when programs allocate huge amounts of memory. Between this,
861 and the fact that mmap regions tend to be limited, the size should
862 be large, to avoid too many mmap calls and thus avoid running out
863 of kernel resources.
864 */
866 #ifndef MMAP_AS_MORECORE_SIZE
867 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
868 #endif
870 /*
871 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
872 large blocks. This is currently only possible on Linux with
873 kernel versions newer than 1.3.77.
874 */
876 #ifndef HAVE_MREMAP
877 #ifdef linux
878 #define HAVE_MREMAP 1
879 #else
880 #define HAVE_MREMAP 0
881 #endif
883 #endif /* HAVE_MMAP */
886 /*
887 The system page size. To the extent possible, this malloc manages
888 memory from the system in page-size units. Note that this value is
889 cached during initialization into a field of malloc_state. So even
890 if malloc_getpagesize is a function, it is only called once.
892 The following mechanics for getpagesize were adapted from bsd/gnu
893 getpagesize.h. If none of the system-probes here apply, a value of
894 4096 is used, which should be OK: If they don't apply, then using
895 the actual value probably doesn't impact performance.
896 */
899 #ifndef malloc_getpagesize
901 #ifndef LACKS_UNISTD_H
902 # include <unistd.h>
903 #endif
905 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
906 # ifndef _SC_PAGE_SIZE
907 # define _SC_PAGE_SIZE _SC_PAGESIZE
908 # endif
909 # endif
911 # ifdef _SC_PAGE_SIZE
912 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
913 # else
914 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
915 extern size_t getpagesize();
916 # define malloc_getpagesize getpagesize()
917 # else
918 # ifdef WIN32 /* use supplied emulation of getpagesize */
919 # define malloc_getpagesize getpagesize()
920 # else
921 # ifndef LACKS_SYS_PARAM_H
922 # include <sys/param.h>
923 # endif
924 # ifdef EXEC_PAGESIZE
925 # define malloc_getpagesize EXEC_PAGESIZE
926 # else
927 # ifdef NBPG
928 # ifndef CLSIZE
929 # define malloc_getpagesize NBPG
930 # else
931 # define malloc_getpagesize (NBPG * CLSIZE)
932 # endif
933 # else
934 # ifdef NBPC
935 # define malloc_getpagesize NBPC
936 # else
937 # ifdef PAGESIZE
938 # define malloc_getpagesize PAGESIZE
939 # else /* just guess */
940 # define malloc_getpagesize (4096)
941 # endif
942 # endif
943 # endif
944 # endif
945 # endif
946 # endif
947 # endif
948 #endif
950 /*
951 This version of malloc supports the standard SVID/XPG mallinfo
952 routine that returns a struct containing usage properties and
953 statistics. It should work on any SVID/XPG compliant system that has
954 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
955 install such a thing yourself, cut out the preliminary declarations
956 as described above and below and save them in a malloc.h file. But
957 there's no compelling reason to bother to do this.)
959 The main declaration needed is the mallinfo struct that is returned
960 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
961 bunch of fields that are not even meaningful in this version of
962 malloc. These fields are are instead filled by mallinfo() with
963 other numbers that might be of interest.
965 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
966 /usr/include/malloc.h file that includes a declaration of struct
967 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
968 version is declared below. These must be precisely the same for
969 mallinfo() to work. The original SVID version of this struct,
970 defined on most systems with mallinfo, declares all fields as
971 ints. But some others define as unsigned long. If your system
972 defines the fields using a type of different width than listed here,
973 you must #include your system version and #define
974 HAVE_USR_INCLUDE_MALLOC_H.
975 */
977 /* #define HAVE_USR_INCLUDE_MALLOC_H */
979 #ifdef HAVE_USR_INCLUDE_MALLOC_H
980 #include "/usr/include/malloc.h"
981 #else
983 /* SVID2/XPG mallinfo structure */
985 struct mallinfo {
986 int arena; /* non-mmapped space allocated from system */
987 int ordblks; /* number of free chunks */
988 int smblks; /* number of fastbin blocks */
989 int hblks; /* number of mmapped regions */
990 int hblkhd; /* space in mmapped regions */
991 int usmblks; /* maximum total allocated space */
992 int fsmblks; /* space available in freed fastbin blocks */
993 int uordblks; /* total allocated space */
994 int fordblks; /* total free space */
995 int keepcost; /* top-most, releasable (via malloc_trim) space */
996 };
998 /*
999 SVID/XPG defines four standard parameter numbers for mallopt,
1000 normally defined in malloc.h. Only one of these (M_MXFAST) is used
1001 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
1002 so setting them has no effect. But this malloc also supports other
1003 options in mallopt described below.
1004 */
1005 #endif
1008 /* ---------- description of public routines ------------ */
1010 /*
1011 malloc(size_t n)
1012 Returns a pointer to a newly allocated chunk of at least n bytes, or null
1013 if no space is available. Additionally, on failure, errno is
1014 set to ENOMEM on ANSI C systems.
1016 If n is zero, malloc returns a minumum-sized chunk. (The minimum
1017 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
1018 systems.) On most systems, size_t is an unsigned type, so calls
1019 with negative arguments are interpreted as requests for huge amounts
1020 of space, which will often fail. The maximum supported value of n
1021 differs across systems, but is in all cases less than the maximum
1022 representable value of a size_t.
1023 */
1024 #if __STD_C
1025 Void_t* public_mALLOc(size_t);
1026 #else
1027 Void_t* public_mALLOc();
1028 #endif
1030 /*
1031 free(Void_t* p)
1032 Releases the chunk of memory pointed to by p, that had been previously
1033 allocated using malloc or a related routine such as realloc.
1034 It has no effect if p is null. It can have arbitrary (i.e., bad!)
1035 effects if p has already been freed.
1037 Unless disabled (using mallopt), freeing very large spaces will
1038 when possible, automatically trigger operations that give
1039 back unused memory to the system, thus reducing program footprint.
1040 */
1041 #if __STD_C
1042 void public_fREe(Void_t*);
1043 #else
1044 void public_fREe();
1045 #endif
1047 /*
1048 calloc(size_t n_elements, size_t element_size);
1049 Returns a pointer to n_elements * element_size bytes, with all locations
1050 set to zero.
1051 */
1052 #if __STD_C
1053 Void_t* public_cALLOc(size_t, size_t);
1054 #else
1055 Void_t* public_cALLOc();
1056 #endif
1058 /*
1059 realloc(Void_t* p, size_t n)
1060 Returns a pointer to a chunk of size n that contains the same data
1061 as does chunk p up to the minimum of (n, p's size) bytes, or null
1062 if no space is available.
1064 The returned pointer may or may not be the same as p. The algorithm
1065 prefers extending p when possible, otherwise it employs the
1066 equivalent of a malloc-copy-free sequence.
1068 If p is null, realloc is equivalent to malloc.
1070 If space is not available, realloc returns null, errno is set (if on
1071 ANSI) and p is NOT freed.
1073 if n is for fewer bytes than already held by p, the newly unused
1074 space is lopped off and freed if possible. Unless the #define
1075 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
1076 zero (re)allocates a minimum-sized chunk.
1078 Large chunks that were internally obtained via mmap will always
1079 be reallocated using malloc-copy-free sequences unless
1080 the system supports MREMAP (currently only linux).
1082 The old unix realloc convention of allowing the last-free'd chunk
1083 to be used as an argument to realloc is not supported.
1084 */
1085 #if __STD_C
1086 Void_t* public_rEALLOc(Void_t*, size_t);
1087 #else
1088 Void_t* public_rEALLOc();
1089 #endif
1091 /*
1092 memalign(size_t alignment, size_t n);
1093 Returns a pointer to a newly allocated chunk of n bytes, aligned
1094 in accord with the alignment argument.
1096 The alignment argument should be a power of two. If the argument is
1097 not a power of two, the nearest greater power is used.
1098 8-byte alignment is guaranteed by normal malloc calls, so don't
1099 bother calling memalign with an argument of 8 or less.
1101 Overreliance on memalign is a sure way to fragment space.
1102 */
1103 #if __STD_C
1104 Void_t* public_mEMALIGn(size_t, size_t);
1105 #else
1106 Void_t* public_mEMALIGn();
1107 #endif
1109 /*
1110 valloc(size_t n);
1111 Equivalent to memalign(pagesize, n), where pagesize is the page
1112 size of the system. If the pagesize is unknown, 4096 is used.
1113 */
1114 #if __STD_C
1115 Void_t* public_vALLOc(size_t);
1116 #else
1117 Void_t* public_vALLOc();
1118 #endif
1122 /*
1123 mallopt(int parameter_number, int parameter_value)
1124 Sets tunable parameters The format is to provide a
1125 (parameter-number, parameter-value) pair. mallopt then sets the
1126 corresponding parameter to the argument value if it can (i.e., so
1127 long as the value is meaningful), and returns 1 if successful else
1128 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
1129 normally defined in malloc.h. Only one of these (M_MXFAST) is used
1130 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
1131 so setting them has no effect. But this malloc also supports four
1132 other options in mallopt. See below for details. Briefly, supported
1133 parameters are as follows (listed defaults are for "typical"
1134 configurations).
1136 Symbol param # default allowed param values
1137 M_MXFAST 1 64 0-80 (0 disables fastbins)
1138 M_TRIM_THRESHOLD -1 256*1024 any (-1U disables trimming)
1139 M_TOP_PAD -2 0 any
1140 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
1141 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
1142 */
1143 #if __STD_C
1144 int public_mALLOPt(int, int);
1145 #else
1146 int public_mALLOPt();
1147 #endif
1150 /*
1151 mallinfo()
1152 Returns (by copy) a struct containing various summary statistics:
1154 arena: current total non-mmapped bytes allocated from system
1155 ordblks: the number of free chunks
1156 smblks: the number of fastbin blocks (i.e., small chunks that
1157 have been freed but not use resused or consolidated)
1158 hblks: current number of mmapped regions
1159 hblkhd: total bytes held in mmapped regions
1160 usmblks: the maximum total allocated space. This will be greater
1161 than current total if trimming has occurred.
1162 fsmblks: total bytes held in fastbin blocks
1163 uordblks: current total allocated space (normal or mmapped)
1164 fordblks: total free space
1165 keepcost: the maximum number of bytes that could ideally be released
1166 back to system via malloc_trim. ("ideally" means that
1167 it ignores page restrictions etc.)
1169 Because these fields are ints, but internal bookkeeping may
1170 be kept as longs, the reported values may wrap around zero and
1171 thus be inaccurate.
1172 */
1173 #if __STD_C
1174 struct mallinfo public_mALLINFo(void);
1175 #else
1176 struct mallinfo public_mALLINFo();
1177 #endif
1179 /*
1180 independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1182 independent_calloc is similar to calloc, but instead of returning a
1183 single cleared space, it returns an array of pointers to n_elements
1184 independent elements that can hold contents of size elem_size, each
1185 of which starts out cleared, and can be independently freed,
1186 realloc'ed etc. The elements are guaranteed to be adjacently
1187 allocated (this is not guaranteed to occur with multiple callocs or
1188 mallocs), which may also improve cache locality in some
1189 applications.
1191 The "chunks" argument is optional (i.e., may be null, which is
1192 probably the most typical usage). If it is null, the returned array
1193 is itself dynamically allocated and should also be freed when it is
1194 no longer needed. Otherwise, the chunks array must be of at least
1195 n_elements in length. It is filled in with the pointers to the
1196 chunks.
1198 In either case, independent_calloc returns this pointer array, or
1199 null if the allocation failed. If n_elements is zero and "chunks"
1200 is null, it returns a chunk representing an array with zero elements
1201 (which should be freed if not wanted).
1203 Each element must be individually freed when it is no longer
1204 needed. If you'd like to instead be able to free all at once, you
1205 should instead use regular calloc and assign pointers into this
1206 space to represent elements. (In this case though, you cannot
1207 independently free elements.)
1209 independent_calloc simplifies and speeds up implementations of many
1210 kinds of pools. It may also be useful when constructing large data
1211 structures that initially have a fixed number of fixed-sized nodes,
1212 but the number is not known at compile time, and some of the nodes
1213 may later need to be freed. For example:
1215 struct Node { int item; struct Node* next; };
1217 struct Node* build_list() {
1218 struct Node** pool;
1219 int n = read_number_of_nodes_needed();
1220 if (n <= 0) return 0;
1221 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1222 if (pool == 0) die();
1223 // organize into a linked list...
1224 struct Node* first = pool[0];
1225 for (i = 0; i < n-1; ++i)
1226 pool[i]->next = pool[i+1];
1227 free(pool); // Can now free the array (or not, if it is needed later)
1228 return first;
1230 */
1231 #if __STD_C
1232 Void_t** public_iCALLOc(size_t, size_t, Void_t**);
1233 #else
1234 Void_t** public_iCALLOc();
1235 #endif
1237 /*
1238 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1240 independent_comalloc allocates, all at once, a set of n_elements
1241 chunks with sizes indicated in the "sizes" array. It returns
1242 an array of pointers to these elements, each of which can be
1243 independently freed, realloc'ed etc. The elements are guaranteed to
1244 be adjacently allocated (this is not guaranteed to occur with
1245 multiple callocs or mallocs), which may also improve cache locality
1246 in some applications.
1248 The "chunks" argument is optional (i.e., may be null). If it is null
1249 the returned array is itself dynamically allocated and should also
1250 be freed when it is no longer needed. Otherwise, the chunks array
1251 must be of at least n_elements in length. It is filled in with the
1252 pointers to the chunks.
1254 In either case, independent_comalloc returns this pointer array, or
1255 null if the allocation failed. If n_elements is zero and chunks is
1256 null, it returns a chunk representing an array with zero elements
1257 (which should be freed if not wanted).
1259 Each element must be individually freed when it is no longer
1260 needed. If you'd like to instead be able to free all at once, you
1261 should instead use a single regular malloc, and assign pointers at
1262 particular offsets in the aggregate space. (In this case though, you
1263 cannot independently free elements.)
1265 independent_comallac differs from independent_calloc in that each
1266 element may have a different size, and also that it does not
1267 automatically clear elements.
1269 independent_comalloc can be used to speed up allocation in cases
1270 where several structs or objects must always be allocated at the
1271 same time. For example:
1273 struct Head { ... }
1274 struct Foot { ... }
1276 void send_message(char* msg) {
1277 int msglen = strlen(msg);
1278 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1279 void* chunks[3];
1280 if (independent_comalloc(3, sizes, chunks) == 0)
1281 die();
1282 struct Head* head = (struct Head*)(chunks[0]);
1283 char* body = (char*)(chunks[1]);
1284 struct Foot* foot = (struct Foot*)(chunks[2]);
1285 // ...
1288 In general though, independent_comalloc is worth using only for
1289 larger values of n_elements. For small values, you probably won't
1290 detect enough difference from series of malloc calls to bother.
1292 Overuse of independent_comalloc can increase overall memory usage,
1293 since it cannot reuse existing noncontiguous small chunks that
1294 might be available for some of the elements.
1295 */
1296 #if __STD_C
1297 Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
1298 #else
1299 Void_t** public_iCOMALLOc();
1300 #endif
1303 /*
1304 pvalloc(size_t n);
1305 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1306 round up n to nearest pagesize.
1307 */
1308 #if __STD_C
1309 Void_t* public_pVALLOc(size_t);
1310 #else
1311 Void_t* public_pVALLOc();
1312 #endif
1314 /*
1315 cfree(Void_t* p);
1316 Equivalent to free(p).
1318 cfree is needed/defined on some systems that pair it with calloc,
1319 for odd historical reasons (such as: cfree is used in example
1320 code in the first edition of K&R).
1321 */
1322 #if __STD_C
1323 void public_cFREe(Void_t*);
1324 #else
1325 void public_cFREe();
1326 #endif
1328 /*
1329 malloc_trim(size_t pad);
1331 If possible, gives memory back to the system (via negative
1332 arguments to sbrk) if there is unused memory at the `high' end of
1333 the malloc pool. You can call this after freeing large blocks of
1334 memory to potentially reduce the system-level memory requirements
1335 of a program. However, it cannot guarantee to reduce memory. Under
1336 some allocation patterns, some large free blocks of memory will be
1337 locked between two used chunks, so they cannot be given back to
1338 the system.
1340 The `pad' argument to malloc_trim represents the amount of free
1341 trailing space to leave untrimmed. If this argument is zero,
1342 only the minimum amount of memory to maintain internal data
1343 structures will be left (one page or less). Non-zero arguments
1344 can be supplied to maintain enough trailing space to service
1345 future expected allocations without having to re-obtain memory
1346 from the system.
1348 Malloc_trim returns 1 if it actually released any memory, else 0.
1349 On systems that do not support "negative sbrks", it will always
1350 rreturn 0.
1351 */
1352 #if __STD_C
1353 int public_mTRIm(size_t);
1354 #else
1355 int public_mTRIm();
1356 #endif
1358 /*
1359 malloc_usable_size(Void_t* p);
1361 Returns the number of bytes you can actually use in
1362 an allocated chunk, which may be more than you requested (although
1363 often not) due to alignment and minimum size constraints.
1364 You can use this many bytes without worrying about
1365 overwriting other allocated objects. This is not a particularly great
1366 programming practice. malloc_usable_size can be more useful in
1367 debugging and assertions, for example:
1369 p = malloc(n);
1370 assert(malloc_usable_size(p) >= 256);
1372 */
1373 #if __STD_C
1374 size_t public_mUSABLe(Void_t*);
1375 #else
1376 size_t public_mUSABLe();
1377 #endif
1379 /*
1380 malloc_stats();
1381 Prints on stderr the amount of space obtained from the system (both
1382 via sbrk and mmap), the maximum amount (which may be more than
1383 current if malloc_trim and/or munmap got called), and the current
1384 number of bytes allocated via malloc (or realloc, etc) but not yet
1385 freed. Note that this is the number of bytes allocated, not the
1386 number requested. It will be larger than the number requested
1387 because of alignment and bookkeeping overhead. Because it includes
1388 alignment wastage as being in use, this figure may be greater than
1389 zero even when no user-level chunks are allocated.
1391 The reported current and maximum system memory can be inaccurate if
1392 a program makes other calls to system memory allocation functions
1393 (normally sbrk) outside of malloc.
1395 malloc_stats prints only the most commonly interesting statistics.
1396 More information can be obtained by calling mallinfo.
1398 */
1399 #if __STD_C
1400 void public_mSTATs();
1401 #else
1402 void public_mSTATs();
1403 #endif
1405 /* mallopt tuning options */
1407 /*
1408 M_MXFAST is the maximum request size used for "fastbins", special bins
1409 that hold returned chunks without consolidating their spaces. This
1410 enables future requests for chunks of the same size to be handled
1411 very quickly, but can increase fragmentation, and thus increase the
1412 overall memory footprint of a program.
1414 This malloc manages fastbins very conservatively yet still
1415 efficiently, so fragmentation is rarely a problem for values less
1416 than or equal to the default. The maximum supported value of MXFAST
1417 is 80. You wouldn't want it any higher than this anyway. Fastbins
1418 are designed especially for use with many small structs, objects or
1419 strings -- the default handles structs/objects/arrays with sizes up
1420 to 16 4byte fields, or small strings representing words, tokens,
1421 etc. Using fastbins for larger objects normally worsens
1422 fragmentation without improving speed.
1424 M_MXFAST is set in REQUEST size units. It is internally used in
1425 chunksize units, which adds padding and alignment. You can reduce
1426 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1427 algorithm to be a closer approximation of fifo-best-fit in all cases,
1428 not just for larger requests, but will generally cause it to be
1429 slower.
1430 */
1433 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1434 #ifndef M_MXFAST
1435 #define M_MXFAST 1
1436 #endif
1438 #ifndef DEFAULT_MXFAST
1439 #define DEFAULT_MXFAST 64
1440 #endif
1443 /*
1444 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1445 to keep before releasing via malloc_trim in free().
1447 Automatic trimming is mainly useful in long-lived programs.
1448 Because trimming via sbrk can be slow on some systems, and can
1449 sometimes be wasteful (in cases where programs immediately
1450 afterward allocate more large chunks) the value should be high
1451 enough so that your overall system performance would improve by
1452 releasing this much memory.
1454 The trim threshold and the mmap control parameters (see below)
1455 can be traded off with one another. Trimming and mmapping are
1456 two different ways of releasing unused memory back to the
1457 system. Between these two, it is often possible to keep
1458 system-level demands of a long-lived program down to a bare
1459 minimum. For example, in one test suite of sessions measuring
1460 the XF86 X server on Linux, using a trim threshold of 128K and a
1461 mmap threshold of 192K led to near-minimal long term resource
1462 consumption.
1464 If you are using this malloc in a long-lived program, it should
1465 pay to experiment with these values. As a rough guide, you
1466 might set to a value close to the average size of a process
1467 (program) running on your system. Releasing this much memory
1468 would allow such a process to run in memory. Generally, it's
1469 worth it to tune for trimming rather tham memory mapping when a
1470 program undergoes phases where several large chunks are
1471 allocated and released in ways that can reuse each other's
1472 storage, perhaps mixed with phases where there are no such
1473 chunks at all. And in well-behaved long-lived programs,
1474 controlling release of large blocks via trimming versus mapping
1475 is usually faster.
1477 However, in most programs, these parameters serve mainly as
1478 protection against the system-level effects of carrying around
1479 massive amounts of unneeded memory. Since frequent calls to
1480 sbrk, mmap, and munmap otherwise degrade performance, the default
1481 parameters are set to relatively high values that serve only as
1482 safeguards.
1484 The trim value must be greater than page size to have any useful
1485 effect. To disable trimming completely, you can set to
1486 (unsigned long)(-1)
1488 Trim settings interact with fastbin (MXFAST) settings: Unless
1489 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1490 freeing a chunk with size less than or equal to MXFAST. Trimming is
1491 instead delayed until subsequent freeing of larger chunks. However,
1492 you can still force an attempted trim by calling malloc_trim.
1494 Also, trimming is not generally possible in cases where
1495 the main arena is obtained via mmap.
1497 Note that the trick some people use of mallocing a huge space and
1498 then freeing it at program startup, in an attempt to reserve system
1499 memory, doesn't have the intended effect under automatic trimming,
1500 since that memory will immediately be returned to the system.
1501 */
1503 #define M_TRIM_THRESHOLD -1
1505 #ifndef DEFAULT_TRIM_THRESHOLD
1506 #define DEFAULT_TRIM_THRESHOLD (256 * 1024)
1507 #endif
1509 /*
1510 M_TOP_PAD is the amount of extra `padding' space to allocate or
1511 retain whenever sbrk is called. It is used in two ways internally:
1513 * When sbrk is called to extend the top of the arena to satisfy
1514 a new malloc request, this much padding is added to the sbrk
1515 request.
1517 * When malloc_trim is called automatically from free(),
1518 it is used as the `pad' argument.
1520 In both cases, the actual amount of padding is rounded
1521 so that the end of the arena is always a system page boundary.
1523 The main reason for using padding is to avoid calling sbrk so
1524 often. Having even a small pad greatly reduces the likelihood
1525 that nearly every malloc request during program start-up (or
1526 after trimming) will invoke sbrk, which needlessly wastes
1527 time.
1529 Automatic rounding-up to page-size units is normally sufficient
1530 to avoid measurable overhead, so the default is 0. However, in
1531 systems where sbrk is relatively slow, it can pay to increase
1532 this value, at the expense of carrying around more memory than
1533 the program needs.
1534 */
1536 #define M_TOP_PAD -2
1538 #ifndef DEFAULT_TOP_PAD
1539 #define DEFAULT_TOP_PAD (0)
1540 #endif
1542 /*
1543 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1544 to service a request. Requests of at least this size that cannot
1545 be allocated using already-existing space will be serviced via mmap.
1546 (If enough normal freed space already exists it is used instead.)
1548 Using mmap segregates relatively large chunks of memory so that
1549 they can be individually obtained and released from the host
1550 system. A request serviced through mmap is never reused by any
1551 other request (at least not directly; the system may just so
1552 happen to remap successive requests to the same locations).
1554 Segregating space in this way has the benefits that:
1556 1. Mmapped space can ALWAYS be individually released back
1557 to the system, which helps keep the system level memory
1558 demands of a long-lived program low.
1559 2. Mapped memory can never become `locked' between
1560 other chunks, as can happen with normally allocated chunks, which
1561 means that even trimming via malloc_trim would not release them.
1562 3. On some systems with "holes" in address spaces, mmap can obtain
1563 memory that sbrk cannot.
1565 However, it has the disadvantages that:
1567 1. The space cannot be reclaimed, consolidated, and then
1568 used to service later requests, as happens with normal chunks.
1569 2. It can lead to more wastage because of mmap page alignment
1570 requirements
1571 3. It causes malloc performance to be more dependent on host
1572 system memory management support routines which may vary in
1573 implementation quality and may impose arbitrary
1574 limitations. Generally, servicing a request via normal
1575 malloc steps is faster than going through a system's mmap.
1577 The advantages of mmap nearly always outweigh disadvantages for
1578 "large" chunks, but the value of "large" varies across systems. The
1579 default is an empirically derived value that works well in most
1580 systems.
1581 */
1583 #define M_MMAP_THRESHOLD -3
1585 #ifndef DEFAULT_MMAP_THRESHOLD
1586 #define DEFAULT_MMAP_THRESHOLD (256 * 1024)
1587 #endif
1589 /*
1590 M_MMAP_MAX is the maximum number of requests to simultaneously
1591 service using mmap. This parameter exists because
1592 . Some systems have a limited number of internal tables for
1593 use by mmap, and using more than a few of them may degrade
1594 performance.
1596 The default is set to a value that serves only as a safeguard.
1597 Setting to 0 disables use of mmap for servicing large requests. If
1598 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1599 to non-zero values in mallopt will fail.
1600 */
1602 #define M_MMAP_MAX -4
1604 #ifndef DEFAULT_MMAP_MAX
1605 #if HAVE_MMAP
1606 #define DEFAULT_MMAP_MAX (65536)
1607 #else
1608 #define DEFAULT_MMAP_MAX (0)
1609 #endif
1610 #endif
1612 #ifdef __cplusplus
1613 }; /* end of extern "C" */
1614 #endif
1617 /* RN XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
1618 #endif
1620 /*
1621 ========================================================================
1622 To make a fully customizable malloc.h header file, cut everything
1623 above this line, put into file malloc.h, edit to suit, and #include it
1624 on the next line, as well as in programs that use this malloc.
1625 ========================================================================
1626 */
1628 /* #include "malloc.h" */
1630 /* --------------------- public wrappers ---------------------- */
1632 #ifdef USE_PUBLIC_MALLOC_WRAPPERS
1634 /* Declare all routines as internal */
1635 #if __STD_C
1636 static Void_t* mALLOc(size_t);
1637 static void fREe(Void_t*);
1638 static Void_t* rEALLOc(Void_t*, size_t);
1639 static Void_t* mEMALIGn(size_t, size_t);
1640 static Void_t* vALLOc(size_t);
1641 static Void_t* pVALLOc(size_t);
1642 static Void_t* cALLOc(size_t, size_t);
1643 static Void_t** iCALLOc(size_t, size_t, Void_t**);
1644 static Void_t** iCOMALLOc(size_t, size_t*, Void_t**);
1645 static void cFREe(Void_t*);
1646 static int mTRIm(size_t);
1647 static size_t mUSABLe(Void_t*);
1648 static void mSTATs();
1649 static int mALLOPt(int, int);
1650 static struct mallinfo mALLINFo(void);
1651 #else
1652 static Void_t* mALLOc();
1653 static void fREe();
1654 static Void_t* rEALLOc();
1655 static Void_t* mEMALIGn();
1656 static Void_t* vALLOc();
1657 static Void_t* pVALLOc();
1658 static Void_t* cALLOc();
1659 static Void_t** iCALLOc();
1660 static Void_t** iCOMALLOc();
1661 static void cFREe();
1662 static int mTRIm();
1663 static size_t mUSABLe();
1664 static void mSTATs();
1665 static int mALLOPt();
1666 static struct mallinfo mALLINFo();
1667 #endif
1669 /*
1670 MALLOC_PREACTION and MALLOC_POSTACTION should be
1671 defined to return 0 on success, and nonzero on failure.
1672 The return value of MALLOC_POSTACTION is currently ignored
1673 in wrapper functions since there is no reasonable default
1674 action to take on failure.
1675 */
1678 #ifdef USE_MALLOC_LOCK
1680 #ifdef WIN32
1682 static int mALLOC_MUTEx;
1683 #define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
1684 #define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
1686 #else
1688 #include <pthread.h>
1690 static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
1692 #define MALLOC_PREACTION pthread_mutex_lock(&mALLOC_MUTEx)
1693 #define MALLOC_POSTACTION pthread_mutex_unlock(&mALLOC_MUTEx)
1695 #endif /* USE_MALLOC_LOCK */
1697 #else
1699 /* Substitute anything you like for these */
1701 #define MALLOC_PREACTION (0)
1702 #define MALLOC_POSTACTION (0)
1704 #endif
1706 Void_t* public_mALLOc(size_t bytes) {
1707 Void_t* m;
1708 if (MALLOC_PREACTION != 0) {
1709 return 0;
1711 m = mALLOc(bytes);
1712 if (MALLOC_POSTACTION != 0) {
1714 return m;
1717 void public_fREe(Void_t* m) {
1718 if (MALLOC_PREACTION != 0) {
1719 return;
1721 fREe(m);
1722 if (MALLOC_POSTACTION != 0) {
1726 Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
1727 if (MALLOC_PREACTION != 0) {
1728 return 0;
1730 m = rEALLOc(m, bytes);
1731 if (MALLOC_POSTACTION != 0) {
1733 return m;
1736 Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
1737 Void_t* m;
1738 if (MALLOC_PREACTION != 0) {
1739 return 0;
1741 m = mEMALIGn(alignment, bytes);
1742 if (MALLOC_POSTACTION != 0) {
1744 return m;
1747 Void_t* public_vALLOc(size_t bytes) {
1748 Void_t* m;
1749 if (MALLOC_PREACTION != 0) {
1750 return 0;
1752 m = vALLOc(bytes);
1753 if (MALLOC_POSTACTION != 0) {
1755 return m;
1758 Void_t* public_pVALLOc(size_t bytes) {
1759 Void_t* m;
1760 if (MALLOC_PREACTION != 0) {
1761 return 0;
1763 m = pVALLOc(bytes);
1764 if (MALLOC_POSTACTION != 0) {
1766 return m;
1769 Void_t* public_cALLOc(size_t n, size_t elem_size) {
1770 Void_t* m;
1771 if (MALLOC_PREACTION != 0) {
1772 return 0;
1774 m = cALLOc(n, elem_size);
1775 if (MALLOC_POSTACTION != 0) {
1777 return m;
1781 Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) {
1782 Void_t** m;
1783 if (MALLOC_PREACTION != 0) {
1784 return 0;
1786 m = iCALLOc(n, elem_size, chunks);
1787 if (MALLOC_POSTACTION != 0) {
1789 return m;
1792 Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) {
1793 Void_t** m;
1794 if (MALLOC_PREACTION != 0) {
1795 return 0;
1797 m = iCOMALLOc(n, sizes, chunks);
1798 if (MALLOC_POSTACTION != 0) {
1800 return m;
1803 void public_cFREe(Void_t* m) {
1804 if (MALLOC_PREACTION != 0) {
1805 return;
1807 cFREe(m);
1808 if (MALLOC_POSTACTION != 0) {
1812 int public_mTRIm(size_t s) {
1813 int result;
1814 if (MALLOC_PREACTION != 0) {
1815 return 0;
1817 result = mTRIm(s);
1818 if (MALLOC_POSTACTION != 0) {
1820 return result;
1823 size_t public_mUSABLe(Void_t* m) {
1824 size_t result;
1825 if (MALLOC_PREACTION != 0) {
1826 return 0;
1828 result = mUSABLe(m);
1829 if (MALLOC_POSTACTION != 0) {
1831 return result;
1834 void public_mSTATs() {
1835 if (MALLOC_PREACTION != 0) {
1836 return;
1838 mSTATs();
1839 if (MALLOC_POSTACTION != 0) {
1843 struct mallinfo public_mALLINFo() {
1844 struct mallinfo m;
1845 if (MALLOC_PREACTION != 0) {
1846 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1847 return nm;
1849 m = mALLINFo();
1850 if (MALLOC_POSTACTION != 0) {
1852 return m;
1855 int public_mALLOPt(int p, int v) {
1856 int result;
1857 if (MALLOC_PREACTION != 0) {
1858 return 0;
1860 result = mALLOPt(p, v);
1861 if (MALLOC_POSTACTION != 0) {
1863 return result;
1866 #endif
1870 /* ------------- Optional versions of memcopy ---------------- */
1873 #if USE_MEMCPY
1875 /*
1876 Note: memcpy is ONLY invoked with non-overlapping regions,
1877 so the (usually slower) memmove is not needed.
1878 */
1880 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1881 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1883 #else /* !USE_MEMCPY */
1885 /* Use Duff's device for good zeroing/copying performance. */
1887 #define MALLOC_ZERO(charp, nbytes) \
1888 do { \
1889 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1890 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1891 long mcn; \
1892 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1893 switch (mctmp) { \
1894 case 0: for(;;) { *mzp++ = 0; \
1895 case 7: *mzp++ = 0; \
1896 case 6: *mzp++ = 0; \
1897 case 5: *mzp++ = 0; \
1898 case 4: *mzp++ = 0; \
1899 case 3: *mzp++ = 0; \
1900 case 2: *mzp++ = 0; \
1901 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1902 } \
1903 } while(0)
1905 #define MALLOC_COPY(dest,src,nbytes) \
1906 do { \
1907 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1908 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1909 CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1910 long mcn; \
1911 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1912 switch (mctmp) { \
1913 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1914 case 7: *mcdst++ = *mcsrc++; \
1915 case 6: *mcdst++ = *mcsrc++; \
1916 case 5: *mcdst++ = *mcsrc++; \
1917 case 4: *mcdst++ = *mcsrc++; \
1918 case 3: *mcdst++ = *mcsrc++; \
1919 case 2: *mcdst++ = *mcsrc++; \
1920 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1921 } \
1922 } while(0)
1924 #endif
1926 /* ------------------ MMAP support ------------------ */
1929 #if HAVE_MMAP
1931 #ifndef LACKS_FCNTL_H
1932 #include <fcntl.h>
1933 #endif
1935 #ifndef LACKS_SYS_MMAN_H
1936 #include <sys/mman.h>
1937 #endif
1939 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1940 #define MAP_ANONYMOUS MAP_ANON
1941 #endif
1943 /*
1944 Nearly all versions of mmap support MAP_ANONYMOUS,
1945 so the following is unlikely to be needed, but is
1946 supplied just in case.
1947 */
1949 #ifndef MAP_ANONYMOUS
1951 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1953 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1954 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1955 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1956 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1958 #else
1960 #define MMAP(addr, size, prot, flags) \
1961 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1963 #endif
1966 #endif /* HAVE_MMAP */
1969 /*
1970 ----------------------- Chunk representations -----------------------
1971 */
1974 /*
1975 This struct declaration is misleading (but accurate and necessary).
1976 It declares a "view" into memory allowing access to necessary
1977 fields at known offsets from a given base. See explanation below.
1978 */
1980 struct malloc_chunk {
1982 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1983 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1985 struct malloc_chunk* fd; /* double links -- used only if free. */
1986 struct malloc_chunk* bk;
1987 };
1990 typedef struct malloc_chunk* mchunkptr;
1992 /*
1993 malloc_chunk details:
1995 (The following includes lightly edited explanations by Colin Plumb.)
1997 Chunks of memory are maintained using a `boundary tag' method as
1998 described in e.g., Knuth or Standish. (See the paper by Paul
1999 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
2000 survey of such techniques.) Sizes of free chunks are stored both
2001 in the front of each chunk and at the end. This makes
2002 consolidating fragmented chunks into bigger chunks very fast. The
2003 size fields also hold bits representing whether chunks are free or
2004 in use.
2006 An allocated chunk looks like this:
2009 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2010 | Size of previous chunk, if allocated | |
2011 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2012 | Size of chunk, in bytes |P|
2013 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2014 | User data starts here... .
2015 . .
2016 . (malloc_usable_space() bytes) .
2017 . |
2018 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2019 | Size of chunk |
2020 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2023 Where "chunk" is the front of the chunk for the purpose of most of
2024 the malloc code, but "mem" is the pointer that is returned to the
2025 user. "Nextchunk" is the beginning of the next contiguous chunk.
2027 Chunks always begin on even word boundries, so the mem portion
2028 (which is returned to the user) is also on an even word boundary, and
2029 thus at least double-word aligned.
2031 Free chunks are stored in circular doubly-linked lists, and look like this:
2033 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2034 | Size of previous chunk |
2035 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2036 `head:' | Size of chunk, in bytes |P|
2037 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2038 | Forward pointer to next chunk in list |
2039 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2040 | Back pointer to previous chunk in list |
2041 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2042 | Unused space (may be 0 bytes long) .
2043 . .
2044 . |
2045 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2046 `foot:' | Size of chunk, in bytes |
2047 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2049 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
2050 chunk size (which is always a multiple of two words), is an in-use
2051 bit for the *previous* chunk. If that bit is *clear*, then the
2052 word before the current chunk size contains the previous chunk
2053 size, and can be used to find the front of the previous chunk.
2054 The very first chunk allocated always has this bit set,
2055 preventing access to non-existent (or non-owned) memory. If
2056 prev_inuse is set for any given chunk, then you CANNOT determine
2057 the size of the previous chunk, and might even get a memory
2058 addressing fault when trying to do so.
2060 Note that the `foot' of the current chunk is actually represented
2061 as the prev_size of the NEXT chunk. This makes it easier to
2062 deal with alignments etc but can be very confusing when trying
2063 to extend or adapt this code.
2065 The two exceptions to all this are
2067 1. The special chunk `top' doesn't bother using the
2068 trailing size field since there is no next contiguous chunk
2069 that would have to index off it. After initialization, `top'
2070 is forced to always exist. If it would become less than
2071 MINSIZE bytes long, it is replenished.
2073 2. Chunks allocated via mmap, which have the second-lowest-order
2074 bit (IS_MMAPPED) set in their size fields. Because they are
2075 allocated one-by-one, each must contain its own trailing size field.
2077 */
2079 /*
2080 ---------- Size and alignment checks and conversions ----------
2081 */
2083 /* conversion from malloc headers to user pointers, and back */
2085 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
2086 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
2088 /* The smallest possible chunk */
2089 #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
2091 /* The smallest size we can malloc is an aligned minimal chunk */
2093 #define MINSIZE \
2094 (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
2096 /* Check if m has acceptable alignment */
2098 #define aligned_OK(m) (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
2101 /*
2102 Check if a request is so large that it would wrap around zero when
2103 padded and aligned. To simplify some other code, the bound is made
2104 low enough so that adding MINSIZE will also not wrap around sero.
2105 */
2107 #define REQUEST_OUT_OF_RANGE(req) \
2108 ((CHUNK_SIZE_T)(req) >= \
2109 (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-2 * MINSIZE))
2111 /* pad request bytes into a usable size -- internal version */
2113 #define request2size(req) \
2114 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
2115 MINSIZE : \
2116 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
2118 /* Same, except also perform argument check */
2120 #define checked_request2size(req, sz) \
2121 if (REQUEST_OUT_OF_RANGE(req)) { \
2122 MALLOC_FAILURE_ACTION; \
2123 return 0; \
2124 } \
2125 (sz) = request2size(req);
2127 /*
2128 --------------- Physical chunk operations ---------------
2129 */
2132 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
2133 #define PREV_INUSE 0x1
2135 /* extract inuse bit of previous chunk */
2136 #define prev_inuse(p) ((p)->size & PREV_INUSE)
2139 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
2140 #define IS_MMAPPED 0x2
2142 /* check for mmap()'ed chunk */
2143 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
2145 /*
2146 Bits to mask off when extracting size
2148 Note: IS_MMAPPED is intentionally not masked off from size field in
2149 macros for which mmapped chunks should never be seen. This should
2150 cause helpful core dumps to occur if it is tried by accident by
2151 people extending or adapting this malloc.
2152 */
2153 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
2155 /* Get size, ignoring use bits */
2156 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
2159 /* Ptr to next physical malloc_chunk. */
2160 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
2162 /* Ptr to previous physical malloc_chunk */
2163 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
2165 /* Treat space at ptr + offset as a chunk */
2166 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2168 /* extract p's inuse bit */
2169 #define inuse(p)\
2170 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
2172 /* set/clear chunk as being inuse without otherwise disturbing */
2173 #define set_inuse(p)\
2174 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
2176 #define clear_inuse(p)\
2177 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
2180 /* check/set/clear inuse bits in known places */
2181 #define inuse_bit_at_offset(p, s)\
2182 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
2184 #define set_inuse_bit_at_offset(p, s)\
2185 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
2187 #define clear_inuse_bit_at_offset(p, s)\
2188 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
2191 /* Set size at head, without disturbing its use bit */
2192 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
2194 /* Set size/use field */
2195 #define set_head(p, s) ((p)->size = (s))
2197 /* Set size at footer (only when chunk is not in use) */
2198 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
2201 /*
2202 -------------------- Internal data structures --------------------
2204 All internal state is held in an instance of malloc_state defined
2205 below. There are no other static variables, except in two optional
2206 cases:
2207 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
2208 * If HAVE_MMAP is true, but mmap doesn't support
2209 MAP_ANONYMOUS, a dummy file descriptor for mmap.
2211 Beware of lots of tricks that minimize the total bookkeeping space
2212 requirements. The result is a little over 1K bytes (for 4byte
2213 pointers and size_t.)
2214 */
2216 /*
2217 Bins
2219 An array of bin headers for free chunks. Each bin is doubly
2220 linked. The bins are approximately proportionally (log) spaced.
2221 There are a lot of these bins (128). This may look excessive, but
2222 works very well in practice. Most bins hold sizes that are
2223 unusual as malloc request sizes, but are more usual for fragments
2224 and consolidated sets of chunks, which is what these bins hold, so
2225 they can be found quickly. All procedures maintain the invariant
2226 that no consolidated chunk physically borders another one, so each
2227 chunk in a list is known to be preceeded and followed by either
2228 inuse chunks or the ends of memory.
2230 Chunks in bins are kept in size order, with ties going to the
2231 approximately least recently used chunk. Ordering isn't needed
2232 for the small bins, which all contain the same-sized chunks, but
2233 facilitates best-fit allocation for larger chunks. These lists
2234 are just sequential. Keeping them in order almost never requires
2235 enough traversal to warrant using fancier ordered data
2236 structures.
2238 Chunks of the same size are linked with the most
2239 recently freed at the front, and allocations are taken from the
2240 back. This results in LRU (FIFO) allocation order, which tends
2241 to give each chunk an equal opportunity to be consolidated with
2242 adjacent freed chunks, resulting in larger free chunks and less
2243 fragmentation.
2245 To simplify use in double-linked lists, each bin header acts
2246 as a malloc_chunk. This avoids special-casing for headers.
2247 But to conserve space and improve locality, we allocate
2248 only the fd/bk pointers of bins, and then use repositioning tricks
2249 to treat these as the fields of a malloc_chunk*.
2250 */
2252 typedef struct malloc_chunk* mbinptr;
2254 /* addressing -- note that bin_at(0) does not exist */
2255 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
2257 /* analog of ++bin */
2258 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
2260 /* Reminders about list directionality within bins */
2261 #define first(b) ((b)->fd)
2262 #define last(b) ((b)->bk)
2264 /* Take a chunk off a bin list */
2265 #define unlink(P, BK, FD) { \
2266 FD = P->fd; \
2267 BK = P->bk; \
2268 FD->bk = BK; \
2269 BK->fd = FD; \
2272 /*
2273 Indexing
2275 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2276 8 bytes apart. Larger bins are approximately logarithmically spaced:
2278 64 bins of size 8
2279 32 bins of size 64
2280 16 bins of size 512
2281 8 bins of size 4096
2282 4 bins of size 32768
2283 2 bins of size 262144
2284 1 bin of size what's left
2286 The bins top out around 1MB because we expect to service large
2287 requests via mmap.
2288 */
2290 #define NBINS 96
2291 #define NSMALLBINS 32
2292 #define SMALLBIN_WIDTH 8
2293 #define MIN_LARGE_SIZE 256
2295 #define in_smallbin_range(sz) \
2296 ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
2298 #define smallbin_index(sz) (((unsigned)(sz)) >> 3)
2300 /*
2301 Compute index for size. We expect this to be inlined when
2302 compiled with optimization, else not, which works out well.
2303 */
2304 static int largebin_index(unsigned int sz) {
2305 unsigned int x = sz >> SMALLBIN_WIDTH;
2306 unsigned int m; /* bit position of highest set bit of m */
2308 if (x >= 0x10000) return NBINS-1;
2310 /* On intel, use BSRL instruction to find highest bit */
2311 #if defined(__GNUC__) && defined(i386)
2313 __asm__("bsrl %1,%0\n\t"
2314 : "=r" (m)
2315 : "g" (x));
2317 #else
2319 /*
2320 Based on branch-free nlz algorithm in chapter 5 of Henry
2321 S. Warren Jr's book "Hacker's Delight".
2322 */
2324 unsigned int n = ((x - 0x100) >> 16) & 8;
2325 x <<= n;
2326 m = ((x - 0x1000) >> 16) & 4;
2327 n += m;
2328 x <<= m;
2329 m = ((x - 0x4000) >> 16) & 2;
2330 n += m;
2331 x = (x << m) >> 14;
2332 m = 13 - n + (x & ~(x>>1));
2334 #endif
2336 /* Use next 2 bits to create finer-granularity bins */
2337 return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
2340 #define bin_index(sz) \
2341 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2343 /*
2344 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
2345 first bin that is maintained in sorted order. This must
2346 be the smallest size corresponding to a given bin.
2348 Normally, this should be MIN_LARGE_SIZE. But you can weaken
2349 best fit guarantees to sometimes speed up malloc by increasing value.
2350 Doing this means that malloc may choose a chunk that is
2351 non-best-fitting by up to the width of the bin.
2353 Some useful cutoff values:
2354 512 - all bins sorted
2355 2560 - leaves bins <= 64 bytes wide unsorted
2356 12288 - leaves bins <= 512 bytes wide unsorted
2357 65536 - leaves bins <= 4096 bytes wide unsorted
2358 262144 - leaves bins <= 32768 bytes wide unsorted
2359 -1 - no bins sorted (not recommended!)
2360 */
2362 #define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
2363 /* #define FIRST_SORTED_BIN_SIZE 65536 */
2365 /*
2366 Unsorted chunks
2368 All remainders from chunk splits, as well as all returned chunks,
2369 are first placed in the "unsorted" bin. They are then placed
2370 in regular bins after malloc gives them ONE chance to be used before
2371 binning. So, basically, the unsorted_chunks list acts as a queue,
2372 with chunks being placed on it in free (and malloc_consolidate),
2373 and taken off (to be either used or placed in bins) in malloc.
2374 */
2376 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2377 #define unsorted_chunks(M) (bin_at(M, 1))
2379 /*
2380 Top
2382 The top-most available chunk (i.e., the one bordering the end of
2383 available memory) is treated specially. It is never included in
2384 any bin, is used only if no other chunk is available, and is
2385 released back to the system if it is very large (see
2386 M_TRIM_THRESHOLD). Because top initially
2387 points to its own bin with initial zero size, thus forcing
2388 extension on the first malloc request, we avoid having any special
2389 code in malloc to check whether it even exists yet. But we still
2390 need to do so when getting memory from system, so we make
2391 initial_top treat the bin as a legal but unusable chunk during the
2392 interval between initialization and the first call to
2393 sYSMALLOc. (This is somewhat delicate, since it relies on
2394 the 2 preceding words to be zero during this interval as well.)
2395 */
2397 /* Conveniently, the unsorted bin can be used as dummy top on first call */
2398 #define initial_top(M) (unsorted_chunks(M))
2400 /*
2401 Binmap
2403 To help compensate for the large number of bins, a one-level index
2404 structure is used for bin-by-bin searching. `binmap' is a
2405 bitvector recording whether bins are definitely empty so they can
2406 be skipped over during during traversals. The bits are NOT always
2407 cleared as soon as bins are empty, but instead only
2408 when they are noticed to be empty during traversal in malloc.
2409 */
2411 /* Conservatively use 32 bits per map word, even if on 64bit system */
2412 #define BINMAPSHIFT 5
2413 #define BITSPERMAP (1U << BINMAPSHIFT)
2414 #define BINMAPSIZE (NBINS / BITSPERMAP)
2416 #define idx2block(i) ((i) >> BINMAPSHIFT)
2417 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2419 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2420 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2421 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2423 /*
2424 Fastbins
2426 An array of lists holding recently freed small chunks. Fastbins
2427 are not doubly linked. It is faster to single-link them, and
2428 since chunks are never removed from the middles of these lists,
2429 double linking is not necessary. Also, unlike regular bins, they
2430 are not even processed in FIFO order (they use faster LIFO) since
2431 ordering doesn't much matter in the transient contexts in which
2432 fastbins are normally used.
2434 Chunks in fastbins keep their inuse bit set, so they cannot
2435 be consolidated with other free chunks. malloc_consolidate
2436 releases all chunks in fastbins and consolidates them with
2437 other free chunks.
2438 */
2440 typedef struct malloc_chunk* mfastbinptr;
2442 /* offset 2 to use otherwise unindexable first 2 bins */
2443 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2445 /* The maximum fastbin request size we support */
2446 #define MAX_FAST_SIZE 80
2448 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2450 /*
2451 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2452 that triggers automatic consolidation of possibly-surrounding
2453 fastbin chunks. This is a heuristic, so the exact value should not
2454 matter too much. It is defined at half the default trim threshold as a
2455 compromise heuristic to only attempt consolidation if it is likely
2456 to lead to trimming. However, it is not dynamically tunable, since
2457 consolidation reduces fragmentation surrounding loarge chunks even
2458 if trimming is not used.
2459 */
2461 #define FASTBIN_CONSOLIDATION_THRESHOLD \
2462 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
2464 /*
2465 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2466 they are used as flags.
2467 */
2469 /*
2470 ANYCHUNKS_BIT held in max_fast indicates that there may be any
2471 freed chunks at all. It is set true when entering a chunk into any
2472 bin.
2473 */
2475 #define ANYCHUNKS_BIT (1U)
2477 #define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
2478 #define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
2479 #define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
2481 /*
2482 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2483 some fastbin chunks. It is set true on entering a chunk into any
2484 fastbin, and cleared only in malloc_consolidate.
2485 */
2487 #define FASTCHUNKS_BIT (2U)
2489 #define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
2490 #define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2491 #define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
2493 /*
2494 Set value of max_fast.
2495 Use impossibly small value if 0.
2496 */
2498 #define set_max_fast(M, s) \
2499 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2500 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
2502 #define get_max_fast(M) \
2503 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
2506 /*
2507 morecore_properties is a status word holding dynamically discovered
2508 or controlled properties of the morecore function
2509 */
2511 #define MORECORE_CONTIGUOUS_BIT (1U)
2513 #define contiguous(M) \
2514 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
2515 #define noncontiguous(M) \
2516 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
2517 #define set_contiguous(M) \
2518 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
2519 #define set_noncontiguous(M) \
2520 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
2523 /*
2524 ----------- Internal state representation and initialization -----------
2525 */
2527 struct malloc_state {
2529 /* The maximum chunk size to be eligible for fastbin */
2530 INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
2532 /* Fastbins */
2533 mfastbinptr fastbins[NFASTBINS];
2535 /* Base of the topmost chunk -- not otherwise kept in a bin */
2536 mchunkptr top;
2538 /* The remainder from the most recent split of a small request */
2539 mchunkptr last_remainder;
2541 /* Normal bins packed as described above */
2542 mchunkptr bins[NBINS * 2];
2544 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
2545 unsigned int binmap[BINMAPSIZE+1];
2547 /* Tunable parameters */
2548 CHUNK_SIZE_T trim_threshold;
2549 INTERNAL_SIZE_T top_pad;
2550 INTERNAL_SIZE_T mmap_threshold;
2552 /* Memory map support */
2553 int n_mmaps;
2554 int n_mmaps_max;
2555 int max_n_mmaps;
2557 /* Cache malloc_getpagesize */
2558 unsigned int pagesize;
2560 /* Track properties of MORECORE */
2561 unsigned int morecore_properties;
2563 /* Statistics */
2564 INTERNAL_SIZE_T mmapped_mem;
2565 INTERNAL_SIZE_T sbrked_mem;
2566 INTERNAL_SIZE_T max_sbrked_mem;
2567 INTERNAL_SIZE_T max_mmapped_mem;
2568 INTERNAL_SIZE_T max_total_mem;
2569 };
2571 typedef struct malloc_state *mstate;
2573 /*
2574 There is exactly one instance of this struct in this malloc.
2575 If you are adapting this malloc in a way that does NOT use a static
2576 malloc_state, you MUST explicitly zero-fill it before using. This
2577 malloc relies on the property that malloc_state is initialized to
2578 all zeroes (as is true of C statics).
2579 */
2581 static struct malloc_state av_; /* never directly referenced */
2583 /*
2584 All uses of av_ are via get_malloc_state().
2585 At most one "call" to get_malloc_state is made per invocation of
2586 the public versions of malloc and free, but other routines
2587 that in turn invoke malloc and/or free may call more then once.
2588 Also, it is called in check* routines if DEBUG is set.
2589 */
2591 #define get_malloc_state() (&(av_))
2593 /*
2594 Initialize a malloc_state struct.
2596 This is called only from within malloc_consolidate, which needs
2597 be called in the same contexts anyway. It is never called directly
2598 outside of malloc_consolidate because some optimizing compilers try
2599 to inline it at all call points, which turns out not to be an
2600 optimization at all. (Inlining it in malloc_consolidate is fine though.)
2601 */
2603 #if __STD_C
2604 static void malloc_init_state(mstate av)
2605 #else
2606 static void malloc_init_state(av) mstate av;
2607 #endif
2609 int i;
2610 mbinptr bin;
2612 /* Establish circular links for normal bins */
2613 for (i = 1; i < NBINS; ++i) {
2614 bin = bin_at(av,i);
2615 bin->fd = bin->bk = bin;
2618 av->top_pad = DEFAULT_TOP_PAD;
2619 av->n_mmaps_max = DEFAULT_MMAP_MAX;
2620 av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
2621 av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
2623 #if MORECORE_CONTIGUOUS
2624 set_contiguous(av);
2625 #else
2626 set_noncontiguous(av);
2627 #endif
2630 set_max_fast(av, DEFAULT_MXFAST);
2632 av->top = initial_top(av);
2633 av->pagesize = malloc_getpagesize;
2636 /*
2637 Other internal utilities operating on mstates
2638 */
2640 static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
2641 #ifndef MORECORE_CANNOT_TRIM
2642 static int sYSTRIm(size_t, mstate);
2643 #endif
2644 static void malloc_consolidate(mstate);
2645 static Void_t** iALLOc(size_t, size_t*, int, Void_t**);
2647 /*
2648 Debugging support
2650 These routines make a number of assertions about the states
2651 of data structures that should be true at all times. If any
2652 are not true, it's very likely that a user program has somehow
2653 trashed memory. (It's also possible that there is a coding error
2654 in malloc. In which case, please report it!)
2655 */
2657 #if ! DEBUG
2659 #define check_chunk(P)
2660 #define check_free_chunk(P)
2661 #define check_inuse_chunk(P)
2662 #define check_remalloced_chunk(P,N)
2663 #define check_malloced_chunk(P,N)
2664 #define check_malloc_state()
2666 #else
2667 #define check_chunk(P) do_check_chunk(P)
2668 #define check_free_chunk(P) do_check_free_chunk(P)
2669 #define check_inuse_chunk(P) do_check_inuse_chunk(P)
2670 #define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N)
2671 #define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
2672 #define check_malloc_state() do_check_malloc_state()
2674 /*
2675 Properties of all chunks
2676 */
2678 #if __STD_C
2679 static void do_check_chunk(mchunkptr p)
2680 #else
2681 static void do_check_chunk(p) mchunkptr p;
2682 #endif
2684 mstate av = get_malloc_state();
2685 CHUNK_SIZE_T sz = chunksize(p);
2686 /* min and max possible addresses assuming contiguous allocation */
2687 char* max_address = (char*)(av->top) + chunksize(av->top);
2688 char* min_address = max_address - av->sbrked_mem;
2690 if (!chunk_is_mmapped(p)) {
2692 /* Has legal address ... */
2693 if (p != av->top) {
2694 if (contiguous(av)) {
2695 assert(((char*)p) >= min_address);
2696 assert(((char*)p + sz) <= ((char*)(av->top)));
2699 else {
2700 /* top size is always at least MINSIZE */
2701 assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
2702 /* top predecessor always marked inuse */
2703 assert(prev_inuse(p));
2707 else {
2708 #if HAVE_MMAP
2709 /* address is outside main heap */
2710 if (contiguous(av) && av->top != initial_top(av)) {
2711 assert(((char*)p) < min_address || ((char*)p) > max_address);
2713 /* chunk is page-aligned */
2714 assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
2715 /* mem is aligned */
2716 assert(aligned_OK(chunk2mem(p)));
2717 #else
2718 /* force an appropriate assert violation if debug set */
2719 assert(!chunk_is_mmapped(p));
2720 #endif
2724 /*
2725 Properties of free chunks
2726 */
2728 #if __STD_C
2729 static void do_check_free_chunk(mchunkptr p)
2730 #else
2731 static void do_check_free_chunk(p) mchunkptr p;
2732 #endif
2734 mstate av = get_malloc_state();
2736 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
2737 mchunkptr next = chunk_at_offset(p, sz);
2739 do_check_chunk(p);
2741 /* Chunk must claim to be free ... */
2742 assert(!inuse(p));
2743 assert (!chunk_is_mmapped(p));
2745 /* Unless a special marker, must have OK fields */
2746 if ((CHUNK_SIZE_T)(sz) >= MINSIZE)
2748 assert((sz & MALLOC_ALIGN_MASK) == 0);
2749 assert(aligned_OK(chunk2mem(p)));
2750 /* ... matching footer field */
2751 assert(next->prev_size == sz);
2752 /* ... and is fully consolidated */
2753 assert(prev_inuse(p));
2754 assert (next == av->top || inuse(next));
2756 /* ... and has minimally sane links */
2757 assert(p->fd->bk == p);
2758 assert(p->bk->fd == p);
2760 else /* markers are always of size SIZE_SZ */
2761 assert(sz == SIZE_SZ);
2764 /*
2765 Properties of inuse chunks
2766 */
2768 #if __STD_C
2769 static void do_check_inuse_chunk(mchunkptr p)
2770 #else
2771 static void do_check_inuse_chunk(p) mchunkptr p;
2772 #endif
2774 mstate av = get_malloc_state();
2775 mchunkptr next;
2776 do_check_chunk(p);
2778 if (chunk_is_mmapped(p))
2779 return; /* mmapped chunks have no next/prev */
2781 /* Check whether it claims to be in use ... */
2782 assert(inuse(p));
2784 next = next_chunk(p);
2786 /* ... and is surrounded by OK chunks.
2787 Since more things can be checked with free chunks than inuse ones,
2788 if an inuse chunk borders them and debug is on, it's worth doing them.
2789 */
2790 if (!prev_inuse(p)) {
2791 /* Note that we cannot even look at prev unless it is not inuse */
2792 mchunkptr prv = prev_chunk(p);
2793 assert(next_chunk(prv) == p);
2794 do_check_free_chunk(prv);
2797 if (next == av->top) {
2798 assert(prev_inuse(next));
2799 assert(chunksize(next) >= MINSIZE);
2801 else if (!inuse(next))
2802 do_check_free_chunk(next);
2805 /*
2806 Properties of chunks recycled from fastbins
2807 */
2809 #if __STD_C
2810 static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
2811 #else
2812 static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
2813 #endif
2815 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
2817 do_check_inuse_chunk(p);
2819 /* Legal size ... */
2820 assert((sz & MALLOC_ALIGN_MASK) == 0);
2821 assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
2822 /* ... and alignment */
2823 assert(aligned_OK(chunk2mem(p)));
2824 /* chunk is less than MINSIZE more than request */
2825 assert((long)(sz) - (long)(s) >= 0);
2826 assert((long)(sz) - (long)(s + MINSIZE) < 0);
2829 /*
2830 Properties of nonrecycled chunks at the point they are malloced
2831 */
2833 #if __STD_C
2834 static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
2835 #else
2836 static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
2837 #endif
2839 /* same as recycled case ... */
2840 do_check_remalloced_chunk(p, s);
2842 /*
2843 ... plus, must obey implementation invariant that prev_inuse is
2844 always true of any allocated chunk; i.e., that each allocated
2845 chunk borders either a previously allocated and still in-use
2846 chunk, or the base of its memory arena. This is ensured
2847 by making all allocations from the the `lowest' part of any found
2848 chunk. This does not necessarily hold however for chunks
2849 recycled via fastbins.
2850 */
2852 assert(prev_inuse(p));
2856 /*
2857 Properties of malloc_state.
2859 This may be useful for debugging malloc, as well as detecting user
2860 programmer errors that somehow write into malloc_state.
2862 If you are extending or experimenting with this malloc, you can
2863 probably figure out how to hack this routine to print out or
2864 display chunk addresses, sizes, bins, and other instrumentation.
2865 */
2867 static void do_check_malloc_state()
2869 mstate av = get_malloc_state();
2870 int i;
2871 mchunkptr p;
2872 mchunkptr q;
2873 mbinptr b;
2874 unsigned int binbit;
2875 int empty;
2876 unsigned int idx;
2877 INTERNAL_SIZE_T size;
2878 CHUNK_SIZE_T total = 0;
2879 int max_fast_bin;
2881 /* internal size_t must be no wider than pointer type */
2882 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2884 /* alignment is a power of 2 */
2885 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2887 /* cannot run remaining checks until fully initialized */
2888 if (av->top == 0 || av->top == initial_top(av))
2889 return;
2891 /* pagesize is a power of 2 */
2892 assert((av->pagesize & (av->pagesize-1)) == 0);
2894 /* properties of fastbins */
2896 /* max_fast is in allowed range */
2897 assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE));
2899 max_fast_bin = fastbin_index(av->max_fast);
2901 for (i = 0; i < NFASTBINS; ++i) {
2902 p = av->fastbins[i];
2904 /* all bins past max_fast are empty */
2905 if (i > max_fast_bin)
2906 assert(p == 0);
2908 while (p != 0) {
2909 /* each chunk claims to be inuse */
2910 do_check_inuse_chunk(p);
2911 total += chunksize(p);
2912 /* chunk belongs in this bin */
2913 assert(fastbin_index(chunksize(p)) == i);
2914 p = p->fd;
2918 if (total != 0)
2919 assert(have_fastchunks(av));
2920 else if (!have_fastchunks(av))
2921 assert(total == 0);
2923 /* check normal bins */
2924 for (i = 1; i < NBINS; ++i) {
2925 b = bin_at(av,i);
2927 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2928 if (i >= 2) {
2929 binbit = get_binmap(av,i);
2930 empty = last(b) == b;
2931 if (!binbit)
2932 assert(empty);
2933 else if (!empty)
2934 assert(binbit);
2937 for (p = last(b); p != b; p = p->bk) {
2938 /* each chunk claims to be free */
2939 do_check_free_chunk(p);
2940 size = chunksize(p);
2941 total += size;
2942 if (i >= 2) {
2943 /* chunk belongs in bin */
2944 idx = bin_index(size);
2945 assert(idx == i);
2946 /* lists are sorted */
2947 if ((CHUNK_SIZE_T) size >= (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
2948 assert(p->bk == b ||
2949 (CHUNK_SIZE_T)chunksize(p->bk) >=
2950 (CHUNK_SIZE_T)chunksize(p));
2953 /* chunk is followed by a legal chain of inuse chunks */
2954 for (q = next_chunk(p);
2955 (q != av->top && inuse(q) &&
2956 (CHUNK_SIZE_T)(chunksize(q)) >= MINSIZE);
2957 q = next_chunk(q))
2958 do_check_inuse_chunk(q);
2962 /* top chunk is OK */
2963 check_chunk(av->top);
2965 /* sanity checks for statistics */
2967 assert(total <= (CHUNK_SIZE_T)(av->max_total_mem));
2968 assert(av->n_mmaps >= 0);
2969 assert(av->n_mmaps <= av->max_n_mmaps);
2971 assert((CHUNK_SIZE_T)(av->sbrked_mem) <=
2972 (CHUNK_SIZE_T)(av->max_sbrked_mem));
2974 assert((CHUNK_SIZE_T)(av->mmapped_mem) <=
2975 (CHUNK_SIZE_T)(av->max_mmapped_mem));
2977 assert((CHUNK_SIZE_T)(av->max_total_mem) >=
2978 (CHUNK_SIZE_T)(av->mmapped_mem) + (CHUNK_SIZE_T)(av->sbrked_mem));
2980 #endif
2983 /* ----------- Routines dealing with system allocation -------------- */
2985 /*
2986 sysmalloc handles malloc cases requiring more memory from the system.
2987 On entry, it is assumed that av->top does not have enough
2988 space to service request for nb bytes, thus requiring that av->top
2989 be extended or replaced.
2990 */
2992 #if __STD_C
2993 static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
2994 #else
2995 static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
2996 #endif
2998 mchunkptr old_top; /* incoming value of av->top */
2999 INTERNAL_SIZE_T old_size; /* its size */
3000 char* old_end; /* its end address */
3002 long size; /* arg to first MORECORE or mmap call */
3003 char* brk; /* return value from MORECORE */
3005 long correction; /* arg to 2nd MORECORE call */
3006 char* snd_brk; /* 2nd return val */
3008 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
3009 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
3010 char* aligned_brk; /* aligned offset into brk */
3012 mchunkptr p; /* the allocated/returned chunk */
3013 mchunkptr remainder; /* remainder from allocation */
3014 CHUNK_SIZE_T remainder_size; /* its size */
3016 CHUNK_SIZE_T sum; /* for updating stats */
3018 size_t pagemask = av->pagesize - 1;
3020 /*
3021 If there is space available in fastbins, consolidate and retry
3022 malloc from scratch rather than getting memory from system. This
3023 can occur only if nb is in smallbin range so we didn't consolidate
3024 upon entry to malloc. It is much easier to handle this case here
3025 than in malloc proper.
3026 */
3028 if (have_fastchunks(av)) {
3029 assert(in_smallbin_range(nb));
3030 malloc_consolidate(av);
3031 return mALLOc(nb - MALLOC_ALIGN_MASK);
3035 #if HAVE_MMAP
3037 /*
3038 If have mmap, and the request size meets the mmap threshold, and
3039 the system supports mmap, and there are few enough currently
3040 allocated mmapped regions, try to directly map this request
3041 rather than expanding top.
3042 */
3044 if ((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
3045 (av->n_mmaps < av->n_mmaps_max)) {
3047 char* mm; /* return value from mmap call*/
3049 /*
3050 Round up size to nearest page. For mmapped chunks, the overhead
3051 is one SIZE_SZ unit larger than for normal chunks, because there
3052 is no following chunk whose prev_size field could be used.
3053 */
3054 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
3056 /* Don't try if size wraps around 0 */
3057 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3059 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3061 if (mm != (char*)(MORECORE_FAILURE)) {
3063 /*
3064 The offset to the start of the mmapped region is stored
3065 in the prev_size field of the chunk. This allows us to adjust
3066 returned start address to meet alignment requirements here
3067 and in memalign(), and still be able to compute proper
3068 address argument for later munmap in free() and realloc().
3069 */
3071 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
3072 if (front_misalign > 0) {
3073 correction = MALLOC_ALIGNMENT - front_misalign;
3074 p = (mchunkptr)(mm + correction);
3075 p->prev_size = correction;
3076 set_head(p, (size - correction) |IS_MMAPPED);
3078 else {
3079 p = (mchunkptr)mm;
3080 p->prev_size = 0;
3081 set_head(p, size|IS_MMAPPED);
3084 /* update statistics */
3086 if (++av->n_mmaps > av->max_n_mmaps)
3087 av->max_n_mmaps = av->n_mmaps;
3089 sum = av->mmapped_mem += size;
3090 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
3091 av->max_mmapped_mem = sum;
3092 sum += av->sbrked_mem;
3093 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3094 av->max_total_mem = sum;
3096 check_chunk(p);
3098 return chunk2mem(p);
3102 #endif
3104 /* Record incoming configuration of top */
3106 old_top = av->top;
3107 old_size = chunksize(old_top);
3108 old_end = (char*)(chunk_at_offset(old_top, old_size));
3110 brk = snd_brk = (char*)(MORECORE_FAILURE);
3112 /*
3113 If not the first time through, we require old_size to be
3114 at least MINSIZE and to have prev_inuse set.
3115 */
3117 assert((old_top == initial_top(av) && old_size == 0) ||
3118 ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
3119 prev_inuse(old_top)));
3121 /* Precondition: not enough current space to satisfy nb request */
3122 assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
3124 /* Precondition: all fastbins are consolidated */
3125 assert(!have_fastchunks(av));
3128 /* Request enough space for nb + pad + overhead */
3130 size = nb + av->top_pad + MINSIZE;
3132 /*
3133 If contiguous, we can subtract out existing space that we hope to
3134 combine with new space. We add it back later only if
3135 we don't actually get contiguous space.
3136 */
3138 if (contiguous(av))
3139 size -= old_size;
3141 /*
3142 Round to a multiple of page size.
3143 If MORECORE is not contiguous, this ensures that we only call it
3144 with whole-page arguments. And if MORECORE is contiguous and
3145 this is not first time through, this preserves page-alignment of
3146 previous calls. Otherwise, we correct to page-align below.
3147 */
3149 size = (size + pagemask) & ~pagemask;
3151 /*
3152 Don't try to call MORECORE if argument is so big as to appear
3153 negative. Note that since mmap takes size_t arg, it may succeed
3154 below even if we cannot call MORECORE.
3155 */
3157 if (size > 0)
3158 brk = (char*)(MORECORE(size));
3160 /*
3161 If have mmap, try using it as a backup when MORECORE fails or
3162 cannot be used. This is worth doing on systems that have "holes" in
3163 address space, so sbrk cannot extend to give contiguous space, but
3164 space is available elsewhere. Note that we ignore mmap max count
3165 and threshold limits, since the space will not be used as a
3166 segregated mmap region.
3167 */
3169 #if HAVE_MMAP
3170 if (brk == (char*)(MORECORE_FAILURE)) {
3172 /* Cannot merge with old top, so add its size back in */
3173 if (contiguous(av))
3174 size = (size + old_size + pagemask) & ~pagemask;
3176 /* If we are relying on mmap as backup, then use larger units */
3177 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
3178 size = MMAP_AS_MORECORE_SIZE;
3180 /* Don't try if size wraps around 0 */
3181 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
3183 brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3185 if (brk != (char*)(MORECORE_FAILURE)) {
3187 /* We do not need, and cannot use, another sbrk call to find end */
3188 snd_brk = brk + size;
3190 /*
3191 Record that we no longer have a contiguous sbrk region.
3192 After the first time mmap is used as backup, we do not
3193 ever rely on contiguous space since this could incorrectly
3194 bridge regions.
3195 */
3196 set_noncontiguous(av);
3200 #endif
3202 if (brk != (char*)(MORECORE_FAILURE)) {
3203 av->sbrked_mem += size;
3205 /*
3206 If MORECORE extends previous space, we can likewise extend top size.
3207 */
3209 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
3210 set_head(old_top, (size + old_size) | PREV_INUSE);
3213 /*
3214 Otherwise, make adjustments:
3216 * If the first time through or noncontiguous, we need to call sbrk
3217 just to find out where the end of memory lies.
3219 * We need to ensure that all returned chunks from malloc will meet
3220 MALLOC_ALIGNMENT
3222 * If there was an intervening foreign sbrk, we need to adjust sbrk
3223 request size to account for fact that we will not be able to
3224 combine new space with existing space in old_top.
3226 * Almost all systems internally allocate whole pages at a time, in
3227 which case we might as well use the whole last page of request.
3228 So we allocate enough more memory to hit a page boundary now,
3229 which in turn causes future contiguous calls to page-align.
3230 */
3232 else {
3233 front_misalign = 0;
3234 end_misalign = 0;
3235 correction = 0;
3236 aligned_brk = brk;
3238 /*
3239 If MORECORE returns an address lower than we have seen before,
3240 we know it isn't really contiguous. This and some subsequent
3241 checks help cope with non-conforming MORECORE functions and
3242 the presence of "foreign" calls to MORECORE from outside of
3243 malloc or by other threads. We cannot guarantee to detect
3244 these in all cases, but cope with the ones we do detect.
3245 */
3246 if (contiguous(av) && old_size != 0 && brk < old_end) {
3247 set_noncontiguous(av);
3250 /* handle contiguous cases */
3251 if (contiguous(av)) {
3253 /*
3254 We can tolerate forward non-contiguities here (usually due
3255 to foreign calls) but treat them as part of our space for
3256 stats reporting.
3257 */
3258 if (old_size != 0)
3259 av->sbrked_mem += brk - old_end;
3261 /* Guarantee alignment of first new chunk made from this space */
3263 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
3264 if (front_misalign > 0) {
3266 /*
3267 Skip over some bytes to arrive at an aligned position.
3268 We don't need to specially mark these wasted front bytes.
3269 They will never be accessed anyway because
3270 prev_inuse of av->top (and any chunk created from its start)
3271 is always true after initialization.
3272 */
3274 correction = MALLOC_ALIGNMENT - front_misalign;
3275 aligned_brk += correction;
3278 /*
3279 If this isn't adjacent to existing space, then we will not
3280 be able to merge with old_top space, so must add to 2nd request.
3281 */
3283 correction += old_size;
3285 /* Extend the end address to hit a page boundary */
3286 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3287 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3289 assert(correction >= 0);
3290 snd_brk = (char*)(MORECORE(correction));
3292 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3293 /*
3294 If can't allocate correction, try to at least find out current
3295 brk. It might be enough to proceed without failing.
3296 */
3297 correction = 0;
3298 snd_brk = (char*)(MORECORE(0));
3300 else if (snd_brk < brk) {
3301 /*
3302 If the second call gives noncontiguous space even though
3303 it says it won't, the only course of action is to ignore
3304 results of second call, and conservatively estimate where
3305 the first call left us. Also set noncontiguous, so this
3306 won't happen again, leaving at most one hole.
3308 Note that this check is intrinsically incomplete. Because
3309 MORECORE is allowed to give more space than we ask for,
3310 there is no reliable way to detect a noncontiguity
3311 producing a forward gap for the second call.
3312 */
3313 snd_brk = brk + size;
3314 correction = 0;
3315 set_noncontiguous(av);
3320 /* handle non-contiguous cases */
3321 else {
3322 /* MORECORE/mmap must correctly align */
3323 assert(aligned_OK(chunk2mem(brk)));
3325 /* Find out current end of memory */
3326 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3327 snd_brk = (char*)(MORECORE(0));
3328 av->sbrked_mem += snd_brk - brk - size;
3332 /* Adjust top based on results of second sbrk */
3333 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3334 av->top = (mchunkptr)aligned_brk;
3335 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3336 av->sbrked_mem += correction;
3338 /*
3339 If not the first time through, we either have a
3340 gap due to foreign sbrk or a non-contiguous region. Insert a
3341 double fencepost at old_top to prevent consolidation with space
3342 we don't own. These fenceposts are artificial chunks that are
3343 marked as inuse and are in any case too small to use. We need
3344 two to make sizes and alignments work out.
3345 */
3347 if (old_size != 0) {
3348 /*
3349 Shrink old_top to insert fenceposts, keeping size a
3350 multiple of MALLOC_ALIGNMENT. We know there is at least
3351 enough space in old_top to do this.
3352 */
3353 old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3354 set_head(old_top, old_size | PREV_INUSE);
3356 /*
3357 Note that the following assignments completely overwrite
3358 old_top when old_size was previously MINSIZE. This is
3359 intentional. We need the fencepost, even if old_top otherwise gets
3360 lost.
3361 */
3362 chunk_at_offset(old_top, old_size )->size =
3363 SIZE_SZ|PREV_INUSE;
3365 chunk_at_offset(old_top, old_size + SIZE_SZ)->size =
3366 SIZE_SZ|PREV_INUSE;
3368 /*
3369 If possible, release the rest, suppressing trimming.
3370 */
3371 if (old_size >= MINSIZE) {
3372 INTERNAL_SIZE_T tt = av->trim_threshold;
3373 av->trim_threshold = (INTERNAL_SIZE_T)(-1);
3374 fREe(chunk2mem(old_top));
3375 av->trim_threshold = tt;
3381 /* Update statistics */
3382 sum = av->sbrked_mem;
3383 if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
3384 av->max_sbrked_mem = sum;
3386 sum += av->mmapped_mem;
3387 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
3388 av->max_total_mem = sum;
3390 check_malloc_state();
3392 /* finally, do the allocation */
3394 p = av->top;
3395 size = chunksize(p);
3397 /* check that one of the above allocation paths succeeded */
3398 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
3399 remainder_size = size - nb;
3400 remainder = chunk_at_offset(p, nb);
3401 av->top = remainder;
3402 set_head(p, nb | PREV_INUSE);
3403 set_head(remainder, remainder_size | PREV_INUSE);
3404 check_malloced_chunk(p, nb);
3405 return chunk2mem(p);
3410 /* catch all failure paths */
3411 MALLOC_FAILURE_ACTION;
3412 return 0;
3418 #ifndef MORECORE_CANNOT_TRIM
3419 /*
3420 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3421 to the system (via negative arguments to sbrk) if there is unused
3422 memory at the `high' end of the malloc pool. It is called
3423 automatically by free() when top space exceeds the trim
3424 threshold. It is also called by the public malloc_trim routine. It
3425 returns 1 if it actually released any memory, else 0.
3426 */
3428 #if __STD_C
3429 static int sYSTRIm(size_t pad, mstate av)
3430 #else
3431 static int sYSTRIm(pad, av) size_t pad; mstate av;
3432 #endif
3434 long top_size; /* Amount of top-most memory */
3435 long extra; /* Amount to release */
3436 long released; /* Amount actually released */
3437 char* current_brk; /* address returned by pre-check sbrk call */
3438 char* new_brk; /* address returned by post-check sbrk call */
3439 size_t pagesz;
3441 pagesz = av->pagesize;
3442 top_size = chunksize(av->top);
3444 /* Release in pagesize units, keeping at least one page */
3445 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3447 if (extra > 0) {
3449 /*
3450 Only proceed if end of memory is where we last set it.
3451 This avoids problems if there were foreign sbrk calls.
3452 */
3453 current_brk = (char*)(MORECORE(0));
3454 if (current_brk == (char*)(av->top) + top_size) {
3456 /*
3457 Attempt to release memory. We ignore MORECORE return value,
3458 and instead call again to find out where new end of memory is.
3459 This avoids problems if first call releases less than we asked,
3460 of if failure somehow altered brk value. (We could still
3461 encounter problems if it altered brk in some very bad way,
3462 but the only thing we can do is adjust anyway, which will cause
3463 some downstream failure.)
3464 */
3466 MORECORE(-extra);
3467 new_brk = (char*)(MORECORE(0));
3469 if (new_brk != (char*)MORECORE_FAILURE) {
3470 released = (long)(current_brk - new_brk);
3472 if (released != 0) {
3473 /* Success. Adjust top. */
3474 av->sbrked_mem -= released;
3475 set_head(av->top, (top_size - released) | PREV_INUSE);
3476 check_malloc_state();
3477 return 1;
3482 return 0;
3484 #endif
3486 /*
3487 ------------------------------ malloc ------------------------------
3488 */
3491 #if __STD_C
3492 Void_t* mALLOc(size_t bytes)
3493 #else
3494 Void_t* mALLOc(bytes) size_t bytes;
3495 #endif
3497 mstate av = get_malloc_state();
3499 INTERNAL_SIZE_T nb; /* normalized request size */
3500 unsigned int idx; /* associated bin index */
3501 mbinptr bin; /* associated bin */
3502 mfastbinptr* fb; /* associated fastbin */
3504 mchunkptr victim; /* inspected/selected chunk */
3505 INTERNAL_SIZE_T size; /* its size */
3506 int victim_index; /* its bin index */
3508 mchunkptr remainder; /* remainder from a split */
3509 CHUNK_SIZE_T remainder_size; /* its size */
3511 unsigned int block; /* bit map traverser */
3512 unsigned int bit; /* bit map traverser */
3513 unsigned int map; /* current word of binmap */
3515 mchunkptr fwd; /* misc temp for linking */
3516 mchunkptr bck; /* misc temp for linking */
3518 /*
3519 Convert request size to internal form by adding SIZE_SZ bytes
3520 overhead plus possibly more to obtain necessary alignment and/or
3521 to obtain a size of at least MINSIZE, the smallest allocatable
3522 size. Also, checked_request2size traps (returning 0) request sizes
3523 that are so large that they wrap around zero when padded and
3524 aligned.
3525 */
3527 checked_request2size(bytes, nb);
3529 /*
3530 Bypass search if no frees yet
3531 */
3532 if (!have_anychunks(av)) {
3533 if (av->max_fast == 0) /* initialization check */
3534 malloc_consolidate(av);
3535 goto use_top;
3538 /*
3539 If the size qualifies as a fastbin, first check corresponding bin.
3540 */
3542 if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {
3543 fb = &(av->fastbins[(fastbin_index(nb))]);
3544 if ( (victim = *fb) != 0) {
3545 *fb = victim->fd;
3546 check_remalloced_chunk(victim, nb);
3547 return chunk2mem(victim);
3551 /*
3552 If a small request, check regular bin. Since these "smallbins"
3553 hold one size each, no searching within bins is necessary.
3554 (For a large request, we need to wait until unsorted chunks are
3555 processed to find best fit. But for small ones, fits are exact
3556 anyway, so we can check now, which is faster.)
3557 */
3559 if (in_smallbin_range(nb)) {
3560 idx = smallbin_index(nb);
3561 bin = bin_at(av,idx);
3563 if ( (victim = last(bin)) != bin) {
3564 bck = victim->bk;
3565 set_inuse_bit_at_offset(victim, nb);
3566 bin->bk = bck;
3567 bck->fd = bin;
3569 check_malloced_chunk(victim, nb);
3570 return chunk2mem(victim);
3574 /*
3575 If this is a large request, consolidate fastbins before continuing.
3576 While it might look excessive to kill all fastbins before
3577 even seeing if there is space available, this avoids
3578 fragmentation problems normally associated with fastbins.
3579 Also, in practice, programs tend to have runs of either small or
3580 large requests, but less often mixtures, so consolidation is not
3581 invoked all that often in most programs. And the programs that
3582 it is called frequently in otherwise tend to fragment.
3583 */
3585 else {
3586 idx = largebin_index(nb);
3587 if (have_fastchunks(av))
3588 malloc_consolidate(av);
3591 /*
3592 Process recently freed or remaindered chunks, taking one only if
3593 it is exact fit, or, if this a small request, the chunk is remainder from
3594 the most recent non-exact fit. Place other traversed chunks in
3595 bins. Note that this step is the only place in any routine where
3596 chunks are placed in bins.
3597 */
3599 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
3600 bck = victim->bk;
3601 size = chunksize(victim);
3603 /*
3604 If a small request, try to use last remainder if it is the
3605 only chunk in unsorted bin. This helps promote locality for
3606 runs of consecutive small requests. This is the only
3607 exception to best-fit, and applies only when there is
3608 no exact fit for a small chunk.
3609 */
3611 if (in_smallbin_range(nb) &&
3612 bck == unsorted_chunks(av) &&
3613 victim == av->last_remainder &&
3614 (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
3616 /* split and reattach remainder */
3617 remainder_size = size - nb;
3618 remainder = chunk_at_offset(victim, nb);
3619 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3620 av->last_remainder = remainder;
3621 remainder->bk = remainder->fd = unsorted_chunks(av);
3623 set_head(victim, nb | PREV_INUSE);
3624 set_head(remainder, remainder_size | PREV_INUSE);
3625 set_foot(remainder, remainder_size);
3627 check_malloced_chunk(victim, nb);
3628 return chunk2mem(victim);
3631 /* remove from unsorted list */
3632 unsorted_chunks(av)->bk = bck;
3633 bck->fd = unsorted_chunks(av);
3635 /* Take now instead of binning if exact fit */
3637 if (size == nb) {
3638 set_inuse_bit_at_offset(victim, size);
3639 check_malloced_chunk(victim, nb);
3640 return chunk2mem(victim);
3643 /* place chunk in bin */
3645 if (in_smallbin_range(size)) {
3646 victim_index = smallbin_index(size);
3647 bck = bin_at(av, victim_index);
3648 fwd = bck->fd;
3650 else {
3651 victim_index = largebin_index(size);
3652 bck = bin_at(av, victim_index);
3653 fwd = bck->fd;
3655 if (fwd != bck) {
3656 /* if smaller than smallest, place first */
3657 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
3658 fwd = bck;
3659 bck = bck->bk;
3661 else if ((CHUNK_SIZE_T)(size) >=
3662 (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
3664 /* maintain large bins in sorted order */
3665 size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
3666 while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size))
3667 fwd = fwd->fd;
3668 bck = fwd->bk;
3673 mark_bin(av, victim_index);
3674 victim->bk = bck;
3675 victim->fd = fwd;
3676 fwd->bk = victim;
3677 bck->fd = victim;
3680 /*
3681 If a large request, scan through the chunks of current bin to
3682 find one that fits. (This will be the smallest that fits unless
3683 FIRST_SORTED_BIN_SIZE has been changed from default.) This is
3684 the only step where an unbounded number of chunks might be
3685 scanned without doing anything useful with them. However the
3686 lists tend to be short.
3687 */
3689 if (!in_smallbin_range(nb)) {
3690 bin = bin_at(av, idx);
3692 for (victim = last(bin); victim != bin; victim = victim->bk) {
3693 size = chunksize(victim);
3695 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
3696 remainder_size = size - nb;
3697 unlink(victim, bck, fwd);
3699 /* Exhaust */
3700 if (remainder_size < MINSIZE) {
3701 set_inuse_bit_at_offset(victim, size);
3702 check_malloced_chunk(victim, nb);
3703 return chunk2mem(victim);
3705 /* Split */
3706 else {
3707 remainder = chunk_at_offset(victim, nb);
3708 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3709 remainder->bk = remainder->fd = unsorted_chunks(av);
3710 set_head(victim, nb | PREV_INUSE);
3711 set_head(remainder, remainder_size | PREV_INUSE);
3712 set_foot(remainder, remainder_size);
3713 check_malloced_chunk(victim, nb);
3714 return chunk2mem(victim);
3720 /*
3721 Search for a chunk by scanning bins, starting with next largest
3722 bin. This search is strictly by best-fit; i.e., the smallest
3723 (with ties going to approximately the least recently used) chunk
3724 that fits is selected.
3726 The bitmap avoids needing to check that most blocks are nonempty.
3727 */
3729 ++idx;
3730 bin = bin_at(av,idx);
3731 block = idx2block(idx);
3732 map = av->binmap[block];
3733 bit = idx2bit(idx);
3735 for (;;) {
3737 /* Skip rest of block if there are no more set bits in this block. */
3738 if (bit > map || bit == 0) {
3739 do {
3740 if (++block >= BINMAPSIZE) /* out of bins */
3741 goto use_top;
3742 } while ( (map = av->binmap[block]) == 0);
3744 bin = bin_at(av, (block << BINMAPSHIFT));
3745 bit = 1;
3748 /* Advance to bin with set bit. There must be one. */
3749 while ((bit & map) == 0) {
3750 bin = next_bin(bin);
3751 bit <<= 1;
3752 assert(bit != 0);
3755 /* Inspect the bin. It is likely to be non-empty */
3756 victim = last(bin);
3758 /* If a false alarm (empty bin), clear the bit. */
3759 if (victim == bin) {
3760 av->binmap[block] = map &= ~bit; /* Write through */
3761 bin = next_bin(bin);
3762 bit <<= 1;
3765 else {
3766 size = chunksize(victim);
3768 /* We know the first chunk in this bin is big enough to use. */
3769 assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
3771 remainder_size = size - nb;
3773 /* unlink */
3774 bck = victim->bk;
3775 bin->bk = bck;
3776 bck->fd = bin;
3778 /* Exhaust */
3779 if (remainder_size < MINSIZE) {
3780 set_inuse_bit_at_offset(victim, size);
3781 check_malloced_chunk(victim, nb);
3782 return chunk2mem(victim);
3785 /* Split */
3786 else {
3787 remainder = chunk_at_offset(victim, nb);
3789 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3790 remainder->bk = remainder->fd = unsorted_chunks(av);
3791 /* advertise as last remainder */
3792 if (in_smallbin_range(nb))
3793 av->last_remainder = remainder;
3795 set_head(victim, nb | PREV_INUSE);
3796 set_head(remainder, remainder_size | PREV_INUSE);
3797 set_foot(remainder, remainder_size);
3798 check_malloced_chunk(victim, nb);
3799 return chunk2mem(victim);
3804 use_top:
3805 /*
3806 If large enough, split off the chunk bordering the end of memory
3807 (held in av->top). Note that this is in accord with the best-fit
3808 search rule. In effect, av->top is treated as larger (and thus
3809 less well fitting) than any other available chunk since it can
3810 be extended to be as large as necessary (up to system
3811 limitations).
3813 We require that av->top always exists (i.e., has size >=
3814 MINSIZE) after initialization, so if it would otherwise be
3815 exhuasted by current request, it is replenished. (The main
3816 reason for ensuring it exists is that we may need MINSIZE space
3817 to put in fenceposts in sysmalloc.)
3818 */
3820 victim = av->top;
3821 size = chunksize(victim);
3823 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
3824 remainder_size = size - nb;
3825 remainder = chunk_at_offset(victim, nb);
3826 av->top = remainder;
3827 set_head(victim, nb | PREV_INUSE);
3828 set_head(remainder, remainder_size | PREV_INUSE);
3830 check_malloced_chunk(victim, nb);
3831 return chunk2mem(victim);
3834 /*
3835 If no space in top, relay to handle system-dependent cases
3836 */
3837 return sYSMALLOc(nb, av);
3840 /*
3841 ------------------------------ free ------------------------------
3842 */
3844 #if __STD_C
3845 void fREe(Void_t* mem)
3846 #else
3847 void fREe(mem) Void_t* mem;
3848 #endif
3850 mstate av = get_malloc_state();
3852 mchunkptr p; /* chunk corresponding to mem */
3853 INTERNAL_SIZE_T size; /* its size */
3854 mfastbinptr* fb; /* associated fastbin */
3855 mchunkptr nextchunk; /* next contiguous chunk */
3856 INTERNAL_SIZE_T nextsize; /* its size */
3857 int nextinuse; /* true if nextchunk is used */
3858 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
3859 mchunkptr bck; /* misc temp for linking */
3860 mchunkptr fwd; /* misc temp for linking */
3862 /* free(0) has no effect */
3863 if (mem != 0) {
3864 p = mem2chunk(mem);
3865 size = chunksize(p);
3867 check_inuse_chunk(p);
3869 /*
3870 If eligible, place chunk on a fastbin so it can be found
3871 and used quickly in malloc.
3872 */
3874 if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
3876 #if TRIM_FASTBINS
3877 /*
3878 If TRIM_FASTBINS set, don't place chunks
3879 bordering top into fastbins
3880 */
3881 && (chunk_at_offset(p, size) != av->top)
3882 #endif
3883 ) {
3885 set_fastchunks(av);
3886 fb = &(av->fastbins[fastbin_index(size)]);
3887 p->fd = *fb;
3888 *fb = p;
3891 /*
3892 Consolidate other non-mmapped chunks as they arrive.
3893 */
3895 else if (!chunk_is_mmapped(p)) {
3896 set_anychunks(av);
3898 nextchunk = chunk_at_offset(p, size);
3899 nextsize = chunksize(nextchunk);
3901 /* consolidate backward */
3902 if (!prev_inuse(p)) {
3903 prevsize = p->prev_size;
3904 size += prevsize;
3905 p = chunk_at_offset(p, -((long) prevsize));
3906 unlink(p, bck, fwd);
3909 if (nextchunk != av->top) {
3910 /* get and clear inuse bit */
3911 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
3912 set_head(nextchunk, nextsize);
3914 /* consolidate forward */
3915 if (!nextinuse) {
3916 unlink(nextchunk, bck, fwd);
3917 size += nextsize;
3920 /*
3921 Place the chunk in unsorted chunk list. Chunks are
3922 not placed into regular bins until after they have
3923 been given one chance to be used in malloc.
3924 */
3926 bck = unsorted_chunks(av);
3927 fwd = bck->fd;
3928 p->bk = bck;
3929 p->fd = fwd;
3930 bck->fd = p;
3931 fwd->bk = p;
3933 set_head(p, size | PREV_INUSE);
3934 set_foot(p, size);
3936 check_free_chunk(p);
3939 /*
3940 If the chunk borders the current high end of memory,
3941 consolidate into top
3942 */
3944 else {
3945 size += nextsize;
3946 set_head(p, size | PREV_INUSE);
3947 av->top = p;
3948 check_chunk(p);
3951 /*
3952 If freeing a large space, consolidate possibly-surrounding
3953 chunks. Then, if the total unused topmost memory exceeds trim
3954 threshold, ask malloc_trim to reduce top.
3956 Unless max_fast is 0, we don't know if there are fastbins
3957 bordering top, so we cannot tell for sure whether threshold
3958 has been reached unless fastbins are consolidated. But we
3959 don't want to consolidate on each free. As a compromise,
3960 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
3961 is reached.
3962 */
3964 if ((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
3965 if (have_fastchunks(av))
3966 malloc_consolidate(av);
3968 #ifndef MORECORE_CANNOT_TRIM
3969 if ((CHUNK_SIZE_T)(chunksize(av->top)) >=
3970 (CHUNK_SIZE_T)(av->trim_threshold))
3971 sYSTRIm(av->top_pad, av);
3972 #endif
3976 /*
3977 If the chunk was allocated via mmap, release via munmap()
3978 Note that if HAVE_MMAP is false but chunk_is_mmapped is
3979 true, then user must have overwritten memory. There's nothing
3980 we can do to catch this error unless DEBUG is set, in which case
3981 check_inuse_chunk (above) will have triggered error.
3982 */
3984 else {
3985 #if HAVE_MMAP
3986 int ret;
3987 INTERNAL_SIZE_T offset = p->prev_size;
3988 av->n_mmaps--;
3989 av->mmapped_mem -= (size + offset);
3990 ret = munmap((char*)p - offset, size + offset);
3991 /* munmap returns non-zero on failure */
3992 assert(ret == 0);
3993 #endif
3998 /*
3999 ------------------------- malloc_consolidate -------------------------
4001 malloc_consolidate is a specialized version of free() that tears
4002 down chunks held in fastbins. Free itself cannot be used for this
4003 purpose since, among other things, it might place chunks back onto
4004 fastbins. So, instead, we need to use a minor variant of the same
4005 code.
4007 Also, because this routine needs to be called the first time through
4008 malloc anyway, it turns out to be the perfect place to trigger
4009 initialization code.
4010 */
4012 #if __STD_C
4013 static void malloc_consolidate(mstate av)
4014 #else
4015 static void malloc_consolidate(av) mstate av;
4016 #endif
4018 mfastbinptr* fb; /* current fastbin being consolidated */
4019 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4020 mchunkptr p; /* current chunk being consolidated */
4021 mchunkptr nextp; /* next chunk to consolidate */
4022 mchunkptr unsorted_bin; /* bin header */
4023 mchunkptr first_unsorted; /* chunk to link to */
4025 /* These have same use as in free() */
4026 mchunkptr nextchunk;
4027 INTERNAL_SIZE_T size;
4028 INTERNAL_SIZE_T nextsize;
4029 INTERNAL_SIZE_T prevsize;
4030 int nextinuse;
4031 mchunkptr bck;
4032 mchunkptr fwd;
4034 /*
4035 If max_fast is 0, we know that av hasn't
4036 yet been initialized, in which case do so below
4037 */
4039 if (av->max_fast != 0) {
4040 clear_fastchunks(av);
4042 unsorted_bin = unsorted_chunks(av);
4044 /*
4045 Remove each chunk from fast bin and consolidate it, placing it
4046 then in unsorted bin. Among other reasons for doing this,
4047 placing in unsorted bin avoids needing to calculate actual bins
4048 until malloc is sure that chunks aren't immediately going to be
4049 reused anyway.
4050 */
4052 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4053 fb = &(av->fastbins[0]);
4054 do {
4055 if ( (p = *fb) != 0) {
4056 *fb = 0;
4058 do {
4059 check_inuse_chunk(p);
4060 nextp = p->fd;
4062 /* Slightly streamlined version of consolidation code in free() */
4063 size = p->size & ~PREV_INUSE;
4064 nextchunk = chunk_at_offset(p, size);
4065 nextsize = chunksize(nextchunk);
4067 if (!prev_inuse(p)) {
4068 prevsize = p->prev_size;
4069 size += prevsize;
4070 p = chunk_at_offset(p, -((long) prevsize));
4071 unlink(p, bck, fwd);
4074 if (nextchunk != av->top) {
4075 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4076 set_head(nextchunk, nextsize);
4078 if (!nextinuse) {
4079 size += nextsize;
4080 unlink(nextchunk, bck, fwd);
4083 first_unsorted = unsorted_bin->fd;
4084 unsorted_bin->fd = p;
4085 first_unsorted->bk = p;
4087 set_head(p, size | PREV_INUSE);
4088 p->bk = unsorted_bin;
4089 p->fd = first_unsorted;
4090 set_foot(p, size);
4093 else {
4094 size += nextsize;
4095 set_head(p, size | PREV_INUSE);
4096 av->top = p;
4099 } while ( (p = nextp) != 0);
4102 } while (fb++ != maxfb);
4104 else {
4105 malloc_init_state(av);
4106 check_malloc_state();
4110 /*
4111 ------------------------------ realloc ------------------------------
4112 */
4115 #if __STD_C
4116 Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
4117 #else
4118 Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
4119 #endif
4121 mstate av = get_malloc_state();
4123 INTERNAL_SIZE_T nb; /* padded request size */
4125 mchunkptr oldp; /* chunk corresponding to oldmem */
4126 INTERNAL_SIZE_T oldsize; /* its size */
4128 mchunkptr newp; /* chunk to return */
4129 INTERNAL_SIZE_T newsize; /* its size */
4130 Void_t* newmem; /* corresponding user mem */
4132 mchunkptr next; /* next contiguous chunk after oldp */
4134 mchunkptr remainder; /* extra space at end of newp */
4135 CHUNK_SIZE_T remainder_size; /* its size */
4137 mchunkptr bck; /* misc temp for linking */
4138 mchunkptr fwd; /* misc temp for linking */
4140 CHUNK_SIZE_T copysize; /* bytes to copy */
4141 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4142 INTERNAL_SIZE_T* s; /* copy source */
4143 INTERNAL_SIZE_T* d; /* copy destination */
4146 #ifdef REALLOC_ZERO_BYTES_FREES
4147 if (bytes == 0) {
4148 fREe(oldmem);
4149 return 0;
4151 #endif
4153 /* realloc of null is supposed to be same as malloc */
4154 if (oldmem == 0) return mALLOc(bytes);
4156 checked_request2size(bytes, nb);
4158 oldp = mem2chunk(oldmem);
4159 oldsize = chunksize(oldp);
4161 check_inuse_chunk(oldp);
4163 if (!chunk_is_mmapped(oldp)) {
4165 if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb)) {
4166 /* already big enough; split below */
4167 newp = oldp;
4168 newsize = oldsize;
4171 else {
4172 next = chunk_at_offset(oldp, oldsize);
4174 /* Try to expand forward into top */
4175 if (next == av->top &&
4176 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4177 (CHUNK_SIZE_T)(nb + MINSIZE)) {
4178 set_head_size(oldp, nb);
4179 av->top = chunk_at_offset(oldp, nb);
4180 set_head(av->top, (newsize - nb) | PREV_INUSE);
4181 return chunk2mem(oldp);
4184 /* Try to expand forward into next chunk; split off remainder below */
4185 else if (next != av->top &&
4186 !inuse(next) &&
4187 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
4188 (CHUNK_SIZE_T)(nb)) {
4189 newp = oldp;
4190 unlink(next, bck, fwd);
4193 /* allocate, copy, free */
4194 else {
4195 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4196 if (newmem == 0)
4197 return 0; /* propagate failure */
4199 newp = mem2chunk(newmem);
4200 newsize = chunksize(newp);
4202 /*
4203 Avoid copy if newp is next chunk after oldp.
4204 */
4205 if (newp == next) {
4206 newsize += oldsize;
4207 newp = oldp;
4209 else {
4210 /*
4211 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4212 We know that contents have an odd number of
4213 INTERNAL_SIZE_T-sized words; minimally 3.
4214 */
4216 copysize = oldsize - SIZE_SZ;
4217 s = (INTERNAL_SIZE_T*)(oldmem);
4218 d = (INTERNAL_SIZE_T*)(newmem);
4219 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4220 assert(ncopies >= 3);
4222 if (ncopies > 9)
4223 MALLOC_COPY(d, s, copysize);
4225 else {
4226 *(d+0) = *(s+0);
4227 *(d+1) = *(s+1);
4228 *(d+2) = *(s+2);
4229 if (ncopies > 4) {
4230 *(d+3) = *(s+3);
4231 *(d+4) = *(s+4);
4232 if (ncopies > 6) {
4233 *(d+5) = *(s+5);
4234 *(d+6) = *(s+6);
4235 if (ncopies > 8) {
4236 *(d+7) = *(s+7);
4237 *(d+8) = *(s+8);
4243 fREe(oldmem);
4244 check_inuse_chunk(newp);
4245 return chunk2mem(newp);
4250 /* If possible, free extra space in old or extended chunk */
4252 assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
4254 remainder_size = newsize - nb;
4256 if (remainder_size < MINSIZE) { /* not enough extra to split off */
4257 set_head_size(newp, newsize);
4258 set_inuse_bit_at_offset(newp, newsize);
4260 else { /* split remainder */
4261 remainder = chunk_at_offset(newp, nb);
4262 set_head_size(newp, nb);
4263 set_head(remainder, remainder_size | PREV_INUSE);
4264 /* Mark remainder as inuse so free() won't complain */
4265 set_inuse_bit_at_offset(remainder, remainder_size);
4266 fREe(chunk2mem(remainder));
4269 check_inuse_chunk(newp);
4270 return chunk2mem(newp);
4273 /*
4274 Handle mmap cases
4275 */
4277 else {
4278 #if HAVE_MMAP
4280 #if HAVE_MREMAP
4281 INTERNAL_SIZE_T offset = oldp->prev_size;
4282 size_t pagemask = av->pagesize - 1;
4283 char *cp;
4284 CHUNK_SIZE_T sum;
4286 /* Note the extra SIZE_SZ overhead */
4287 newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
4289 /* don't need to remap if still within same page */
4290 if (oldsize == newsize - offset)
4291 return oldmem;
4293 cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
4295 if (cp != (char*)MORECORE_FAILURE) {
4297 newp = (mchunkptr)(cp + offset);
4298 set_head(newp, (newsize - offset)|IS_MMAPPED);
4300 assert(aligned_OK(chunk2mem(newp)));
4301 assert((newp->prev_size == offset));
4303 /* update statistics */
4304 sum = av->mmapped_mem += newsize - oldsize;
4305 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
4306 av->max_mmapped_mem = sum;
4307 sum += av->sbrked_mem;
4308 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
4309 av->max_total_mem = sum;
4311 return chunk2mem(newp);
4313 #endif
4315 /* Note the extra SIZE_SZ overhead. */
4316 if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))
4317 newmem = oldmem; /* do nothing */
4318 else {
4319 /* Must alloc, copy, free. */
4320 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4321 if (newmem != 0) {
4322 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
4323 fREe(oldmem);
4326 return newmem;
4328 #else
4329 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
4330 check_malloc_state();
4331 MALLOC_FAILURE_ACTION;
4332 return 0;
4333 #endif
4337 /*
4338 ------------------------------ memalign ------------------------------
4339 */
4341 #if __STD_C
4342 Void_t* mEMALIGn(size_t alignment, size_t bytes)
4343 #else
4344 Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
4345 #endif
4347 INTERNAL_SIZE_T nb; /* padded request size */
4348 char* m; /* memory returned by malloc call */
4349 mchunkptr p; /* corresponding chunk */
4350 char* brk; /* alignment point within p */
4351 mchunkptr newp; /* chunk to return */
4352 INTERNAL_SIZE_T newsize; /* its size */
4353 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4354 mchunkptr remainder; /* spare room at end to split off */
4355 CHUNK_SIZE_T remainder_size; /* its size */
4356 INTERNAL_SIZE_T size;
4358 /* If need less alignment than we give anyway, just relay to malloc */
4360 if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
4362 /* Otherwise, ensure that it is at least a minimum chunk size */
4364 if (alignment < MINSIZE) alignment = MINSIZE;
4366 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4367 if ((alignment & (alignment - 1)) != 0) {
4368 size_t a = MALLOC_ALIGNMENT * 2;
4369 while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
4370 alignment = a;
4373 checked_request2size(bytes, nb);
4375 /*
4376 Strategy: find a spot within that chunk that meets the alignment
4377 request, and then possibly free the leading and trailing space.
4378 */
4381 /* Call malloc with worst case padding to hit alignment. */
4383 m = (char*)(mALLOc(nb + alignment + MINSIZE));
4385 if (m == 0) return 0; /* propagate failure */
4387 p = mem2chunk(m);
4389 if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
4391 /*
4392 Find an aligned spot inside chunk. Since we need to give back
4393 leading space in a chunk of at least MINSIZE, if the first
4394 calculation places us at a spot with less than MINSIZE leader,
4395 we can move to the next aligned spot -- we've allocated enough
4396 total room so that this is always possible.
4397 */
4399 brk = (char*)mem2chunk((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
4400 -((signed long) alignment)));
4401 if ((CHUNK_SIZE_T)(brk - (char*)(p)) < MINSIZE)
4402 brk += alignment;
4404 newp = (mchunkptr)brk;
4405 leadsize = brk - (char*)(p);
4406 newsize = chunksize(p) - leadsize;
4408 /* For mmapped chunks, just adjust offset */
4409 if (chunk_is_mmapped(p)) {
4410 newp->prev_size = p->prev_size + leadsize;
4411 set_head(newp, newsize|IS_MMAPPED);
4412 return chunk2mem(newp);
4415 /* Otherwise, give back leader, use the rest */
4416 set_head(newp, newsize | PREV_INUSE);
4417 set_inuse_bit_at_offset(newp, newsize);
4418 set_head_size(p, leadsize);
4419 fREe(chunk2mem(p));
4420 p = newp;
4422 assert (newsize >= nb &&
4423 (((PTR_UINT)(chunk2mem(p))) % alignment) == 0);
4426 /* Also give back spare room at the end */
4427 if (!chunk_is_mmapped(p)) {
4428 size = chunksize(p);
4429 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
4430 remainder_size = size - nb;
4431 remainder = chunk_at_offset(p, nb);
4432 set_head(remainder, remainder_size | PREV_INUSE);
4433 set_head_size(p, nb);
4434 fREe(chunk2mem(remainder));
4438 check_inuse_chunk(p);
4439 return chunk2mem(p);
4442 /*
4443 ------------------------------ calloc ------------------------------
4444 */
4446 #if __STD_C
4447 Void_t* cALLOc(size_t n_elements, size_t elem_size)
4448 #else
4449 Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
4450 #endif
4452 mchunkptr p;
4453 CHUNK_SIZE_T clearsize;
4454 CHUNK_SIZE_T nclears;
4455 INTERNAL_SIZE_T* d;
4457 Void_t* mem = mALLOc(n_elements * elem_size);
4459 if (mem != 0) {
4460 p = mem2chunk(mem);
4462 if (!chunk_is_mmapped(p))
4464 /*
4465 Unroll clear of <= 36 bytes (72 if 8byte sizes)
4466 We know that contents have an odd number of
4467 INTERNAL_SIZE_T-sized words; minimally 3.
4468 */
4470 d = (INTERNAL_SIZE_T*)mem;
4471 clearsize = chunksize(p) - SIZE_SZ;
4472 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
4473 assert(nclears >= 3);
4475 if (nclears > 9)
4476 MALLOC_ZERO(d, clearsize);
4478 else {
4479 *(d+0) = 0;
4480 *(d+1) = 0;
4481 *(d+2) = 0;
4482 if (nclears > 4) {
4483 *(d+3) = 0;
4484 *(d+4) = 0;
4485 if (nclears > 6) {
4486 *(d+5) = 0;
4487 *(d+6) = 0;
4488 if (nclears > 8) {
4489 *(d+7) = 0;
4490 *(d+8) = 0;
4496 #if ! MMAP_CLEARS
4497 else
4499 d = (INTERNAL_SIZE_T*)mem;
4500 /*
4501 Note the additional SIZE_SZ
4502 */
4503 clearsize = chunksize(p) - 2*SIZE_SZ;
4504 MALLOC_ZERO(d, clearsize);
4506 #endif
4508 return mem;
4511 /*
4512 ------------------------------ cfree ------------------------------
4513 */
4515 #if __STD_C
4516 void cFREe(Void_t *mem)
4517 #else
4518 void cFREe(mem) Void_t *mem;
4519 #endif
4521 fREe(mem);
4524 /*
4525 ------------------------- independent_calloc -------------------------
4526 */
4528 #if __STD_C
4529 Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[])
4530 #else
4531 Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[];
4532 #endif
4534 size_t sz = elem_size; /* serves as 1-element array */
4535 /* opts arg of 3 means all elements are same size, and should be cleared */
4536 return iALLOc(n_elements, &sz, 3, chunks);
4539 /*
4540 ------------------------- independent_comalloc -------------------------
4541 */
4543 #if __STD_C
4544 Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[])
4545 #else
4546 Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[];
4547 #endif
4549 return iALLOc(n_elements, sizes, 0, chunks);
4553 /*
4554 ------------------------------ ialloc ------------------------------
4555 ialloc provides common support for independent_X routines, handling all of
4556 the combinations that can result.
4558 The opts arg has:
4559 bit 0 set if all elements are same size (using sizes[0])
4560 bit 1 set if elements should be zeroed
4561 */
4564 #if __STD_C
4565 static Void_t** iALLOc(size_t n_elements,
4566 size_t* sizes,
4567 int opts,
4568 Void_t* chunks[])
4569 #else
4570 static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
4571 #endif
4573 mstate av = get_malloc_state();
4574 INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
4575 INTERNAL_SIZE_T contents_size; /* total size of elements */
4576 INTERNAL_SIZE_T array_size; /* request size of pointer array */
4577 Void_t* mem; /* malloced aggregate space */
4578 mchunkptr p; /* corresponding chunk */
4579 INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
4580 Void_t** marray; /* either "chunks" or malloced ptr array */
4581 mchunkptr array_chunk; /* chunk for malloced ptr array */
4582 int mmx; /* to disable mmap */
4583 INTERNAL_SIZE_T size;
4584 size_t i;
4586 /* Ensure initialization */
4587 if (av->max_fast == 0) malloc_consolidate(av);
4589 /* compute array length, if needed */
4590 if (chunks != 0) {
4591 if (n_elements == 0)
4592 return chunks; /* nothing to do */
4593 marray = chunks;
4594 array_size = 0;
4596 else {
4597 /* if empty req, must still return chunk representing empty array */
4598 if (n_elements == 0)
4599 return (Void_t**) mALLOc(0);
4600 marray = 0;
4601 array_size = request2size(n_elements * (sizeof(Void_t*)));
4604 /* compute total element size */
4605 if (opts & 0x1) { /* all-same-size */
4606 element_size = request2size(*sizes);
4607 contents_size = n_elements * element_size;
4609 else { /* add up all the sizes */
4610 element_size = 0;
4611 contents_size = 0;
4612 for (i = 0; i != n_elements; ++i)
4613 contents_size += request2size(sizes[i]);
4616 /* subtract out alignment bytes from total to minimize overallocation */
4617 size = contents_size + array_size - MALLOC_ALIGN_MASK;
4619 /*
4620 Allocate the aggregate chunk.
4621 But first disable mmap so malloc won't use it, since
4622 we would not be able to later free/realloc space internal
4623 to a segregated mmap region.
4624 */
4625 mmx = av->n_mmaps_max; /* disable mmap */
4626 av->n_mmaps_max = 0;
4627 mem = mALLOc(size);
4628 av->n_mmaps_max = mmx; /* reset mmap */
4629 if (mem == 0)
4630 return 0;
4632 p = mem2chunk(mem);
4633 assert(!chunk_is_mmapped(p));
4634 remainder_size = chunksize(p);
4636 if (opts & 0x2) { /* optionally clear the elements */
4637 MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
4640 /* If not provided, allocate the pointer array as final part of chunk */
4641 if (marray == 0) {
4642 array_chunk = chunk_at_offset(p, contents_size);
4643 marray = (Void_t**) (chunk2mem(array_chunk));
4644 set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE);
4645 remainder_size = contents_size;
4648 /* split out elements */
4649 for (i = 0; ; ++i) {
4650 marray[i] = chunk2mem(p);
4651 if (i != n_elements-1) {
4652 if (element_size != 0)
4653 size = element_size;
4654 else
4655 size = request2size(sizes[i]);
4656 remainder_size -= size;
4657 set_head(p, size | PREV_INUSE);
4658 p = chunk_at_offset(p, size);
4660 else { /* the final element absorbs any overallocation slop */
4661 set_head(p, remainder_size | PREV_INUSE);
4662 break;
4666 #if DEBUG
4667 if (marray != chunks) {
4668 /* final element must have exactly exhausted chunk */
4669 if (element_size != 0)
4670 assert(remainder_size == element_size);
4671 else
4672 assert(remainder_size == request2size(sizes[i]));
4673 check_inuse_chunk(mem2chunk(marray));
4676 for (i = 0; i != n_elements; ++i)
4677 check_inuse_chunk(mem2chunk(marray[i]));
4678 #endif
4680 return marray;
4684 /*
4685 ------------------------------ valloc ------------------------------
4686 */
4688 #if __STD_C
4689 Void_t* vALLOc(size_t bytes)
4690 #else
4691 Void_t* vALLOc(bytes) size_t bytes;
4692 #endif
4694 /* Ensure initialization */
4695 mstate av = get_malloc_state();
4696 if (av->max_fast == 0) malloc_consolidate(av);
4697 return mEMALIGn(av->pagesize, bytes);
4700 /*
4701 ------------------------------ pvalloc ------------------------------
4702 */
4705 #if __STD_C
4706 Void_t* pVALLOc(size_t bytes)
4707 #else
4708 Void_t* pVALLOc(bytes) size_t bytes;
4709 #endif
4711 mstate av = get_malloc_state();
4712 size_t pagesz;
4714 /* Ensure initialization */
4715 if (av->max_fast == 0) malloc_consolidate(av);
4716 pagesz = av->pagesize;
4717 return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
4721 /*
4722 ------------------------------ malloc_trim ------------------------------
4723 */
4725 #if __STD_C
4726 int mTRIm(size_t pad)
4727 #else
4728 int mTRIm(pad) size_t pad;
4729 #endif
4731 mstate av = get_malloc_state();
4732 /* Ensure initialization/consolidation */
4733 malloc_consolidate(av);
4735 #ifndef MORECORE_CANNOT_TRIM
4736 return sYSTRIm(pad, av);
4737 #else
4738 return 0;
4739 #endif
4743 /*
4744 ------------------------- malloc_usable_size -------------------------
4745 */
4747 #if __STD_C
4748 size_t mUSABLe(Void_t* mem)
4749 #else
4750 size_t mUSABLe(mem) Void_t* mem;
4751 #endif
4753 mchunkptr p;
4754 if (mem != 0) {
4755 p = mem2chunk(mem);
4756 if (chunk_is_mmapped(p))
4757 return chunksize(p) - 2*SIZE_SZ;
4758 else if (inuse(p))
4759 return chunksize(p) - SIZE_SZ;
4761 return 0;
4764 /*
4765 ------------------------------ mallinfo ------------------------------
4766 */
4768 struct mallinfo mALLINFo()
4770 mstate av = get_malloc_state();
4771 struct mallinfo mi;
4772 int i;
4773 mbinptr b;
4774 mchunkptr p;
4775 INTERNAL_SIZE_T avail;
4776 INTERNAL_SIZE_T fastavail;
4777 int nblocks;
4778 int nfastblocks;
4780 /* Ensure initialization */
4781 if (av->top == 0) malloc_consolidate(av);
4783 check_malloc_state();
4785 /* Account for top */
4786 avail = chunksize(av->top);
4787 nblocks = 1; /* top always exists */
4789 /* traverse fastbins */
4790 nfastblocks = 0;
4791 fastavail = 0;
4793 for (i = 0; i < NFASTBINS; ++i) {
4794 for (p = av->fastbins[i]; p != 0; p = p->fd) {
4795 ++nfastblocks;
4796 fastavail += chunksize(p);
4800 avail += fastavail;
4802 /* traverse regular bins */
4803 for (i = 1; i < NBINS; ++i) {
4804 b = bin_at(av, i);
4805 for (p = last(b); p != b; p = p->bk) {
4806 ++nblocks;
4807 avail += chunksize(p);
4811 mi.smblks = nfastblocks;
4812 mi.ordblks = nblocks;
4813 mi.fordblks = avail;
4814 mi.uordblks = av->sbrked_mem - avail;
4815 mi.arena = av->sbrked_mem;
4816 mi.hblks = av->n_mmaps;
4817 mi.hblkhd = av->mmapped_mem;
4818 mi.fsmblks = fastavail;
4819 mi.keepcost = chunksize(av->top);
4820 mi.usmblks = av->max_total_mem;
4821 return mi;
4824 /*
4825 ------------------------------ malloc_stats ------------------------------
4826 */
4828 void mSTATs()
4830 struct mallinfo mi = mALLINFo();
4832 #ifdef WIN32
4834 CHUNK_SIZE_T free, reserved, committed;
4835 vminfo (&free, &reserved, &committed);
4836 fprintf(stderr, "free bytes = %10lu\n",
4837 free);
4838 fprintf(stderr, "reserved bytes = %10lu\n",
4839 reserved);
4840 fprintf(stderr, "committed bytes = %10lu\n",
4841 committed);
4843 #endif
4845 /* RN XXX */
4846 printf("max system bytes = %10lu\n",
4847 (CHUNK_SIZE_T)(mi.usmblks));
4848 printf("system bytes = %10lu\n",
4849 (CHUNK_SIZE_T)(mi.arena + mi.hblkhd));
4850 printf("in use bytes = %10lu\n",
4851 (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
4853 #ifdef WIN32
4855 CHUNK_SIZE_T kernel, user;
4856 if (cpuinfo (TRUE, &kernel, &user)) {
4857 fprintf(stderr, "kernel ms = %10lu\n",
4858 kernel);
4859 fprintf(stderr, "user ms = %10lu\n",
4860 user);
4863 #endif
4867 /*
4868 ------------------------------ mallopt ------------------------------
4869 */
4871 #if __STD_C
4872 int mALLOPt(int param_number, int value)
4873 #else
4874 int mALLOPt(param_number, value) int param_number; int value;
4875 #endif
4877 mstate av = get_malloc_state();
4878 /* Ensure initialization/consolidation */
4879 malloc_consolidate(av);
4881 switch(param_number) {
4882 case M_MXFAST:
4883 if (value >= 0 && value <= MAX_FAST_SIZE) {
4884 set_max_fast(av, value);
4885 return 1;
4887 else
4888 return 0;
4890 case M_TRIM_THRESHOLD:
4891 av->trim_threshold = value;
4892 return 1;
4894 case M_TOP_PAD:
4895 av->top_pad = value;
4896 return 1;
4898 case M_MMAP_THRESHOLD:
4899 av->mmap_threshold = value;
4900 return 1;
4902 case M_MMAP_MAX:
4903 #if !HAVE_MMAP
4904 if (value != 0)
4905 return 0;
4906 #endif
4907 av->n_mmaps_max = value;
4908 return 1;
4910 default:
4911 return 0;
4916 /*
4917 -------------------- Alternative MORECORE functions --------------------
4918 */
4921 /*
4922 General Requirements for MORECORE.
4924 The MORECORE function must have the following properties:
4926 If MORECORE_CONTIGUOUS is false:
4928 * MORECORE must allocate in multiples of pagesize. It will
4929 only be called with arguments that are multiples of pagesize.
4931 * MORECORE(0) must return an address that is at least
4932 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
4934 else (i.e. If MORECORE_CONTIGUOUS is true):
4936 * Consecutive calls to MORECORE with positive arguments
4937 return increasing addresses, indicating that space has been
4938 contiguously extended.
4940 * MORECORE need not allocate in multiples of pagesize.
4941 Calls to MORECORE need not have args of multiples of pagesize.
4943 * MORECORE need not page-align.
4945 In either case:
4947 * MORECORE may allocate more memory than requested. (Or even less,
4948 but this will generally result in a malloc failure.)
4950 * MORECORE must not allocate memory when given argument zero, but
4951 instead return one past the end address of memory from previous
4952 nonzero call. This malloc does NOT call MORECORE(0)
4953 until at least one call with positive arguments is made, so
4954 the initial value returned is not important.
4956 * Even though consecutive calls to MORECORE need not return contiguous
4957 addresses, it must be OK for malloc'ed chunks to span multiple
4958 regions in those cases where they do happen to be contiguous.
4960 * MORECORE need not handle negative arguments -- it may instead
4961 just return MORECORE_FAILURE when given negative arguments.
4962 Negative arguments are always multiples of pagesize. MORECORE
4963 must not misinterpret negative args as large positive unsigned
4964 args. You can suppress all such calls from even occurring by defining
4965 MORECORE_CANNOT_TRIM,
4967 There is some variation across systems about the type of the
4968 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
4969 actually be size_t, because sbrk supports negative args, so it is
4970 normally the signed type of the same width as size_t (sometimes
4971 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
4972 matter though. Internally, we use "long" as arguments, which should
4973 work across all reasonable possibilities.
4975 Additionally, if MORECORE ever returns failure for a positive
4976 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
4977 system allocator. This is a useful backup strategy for systems with
4978 holes in address spaces -- in this case sbrk cannot contiguously
4979 expand the heap, but mmap may be able to map noncontiguous space.
4981 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
4982 a function that always returns MORECORE_FAILURE.
4984 Malloc only has limited ability to detect failures of MORECORE
4985 to supply contiguous space when it says it can. In particular,
4986 multithreaded programs that do not use locks may result in
4987 rece conditions across calls to MORECORE that result in gaps
4988 that cannot be detected as such, and subsequent corruption.
4990 If you are using this malloc with something other than sbrk (or its
4991 emulation) to supply memory regions, you probably want to set
4992 MORECORE_CONTIGUOUS as false. As an example, here is a custom
4993 allocator kindly contributed for pre-OSX macOS. It uses virtually
4994 but not necessarily physically contiguous non-paged memory (locked
4995 in, present and won't get swapped out). You can use it by
4996 uncommenting this section, adding some #includes, and setting up the
4997 appropriate defines above:
4999 #define MORECORE osMoreCore
5000 #define MORECORE_CONTIGUOUS 0
5002 There is also a shutdown routine that should somehow be called for
5003 cleanup upon program exit.
5005 #define MAX_POOL_ENTRIES 100
5006 #define MINIMUM_MORECORE_SIZE (64 * 1024)
5007 static int next_os_pool;
5008 void *our_os_pools[MAX_POOL_ENTRIES];
5010 void *osMoreCore(int size)
5012 void *ptr = 0;
5013 static void *sbrk_top = 0;
5015 if (size > 0)
5017 if (size < MINIMUM_MORECORE_SIZE)
5018 size = MINIMUM_MORECORE_SIZE;
5019 if (CurrentExecutionLevel() == kTaskLevel)
5020 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5021 if (ptr == 0)
5023 return (void *) MORECORE_FAILURE;
5025 // save ptrs so they can be freed during cleanup
5026 our_os_pools[next_os_pool] = ptr;
5027 next_os_pool++;
5028 ptr = (void *) ((((CHUNK_SIZE_T) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5029 sbrk_top = (char *) ptr + size;
5030 return ptr;
5032 else if (size < 0)
5034 // we don't currently support shrink behavior
5035 return (void *) MORECORE_FAILURE;
5037 else
5039 return sbrk_top;
5043 // cleanup any allocated memory pools
5044 // called as last thing before shutting down driver
5046 void osCleanupMem(void)
5048 void **ptr;
5050 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5051 if (*ptr)
5053 PoolDeallocate(*ptr);
5054 *ptr = 0;
5058 */
5061 /*
5062 --------------------------------------------------------------
5064 Emulation of sbrk for win32.
5065 Donated by J. Walter <Walter@GeNeSys-e.de>.
5066 For additional information about this code, and malloc on Win32, see
5067 http://www.genesys-e.de/jwalter/
5068 */
5071 #ifdef WIN32
5073 #ifdef _DEBUG
5074 /* #define TRACE */
5075 #endif
5077 /* Support for USE_MALLOC_LOCK */
5078 #ifdef USE_MALLOC_LOCK
5080 /* Wait for spin lock */
5081 static int slwait (int *sl) {
5082 while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0)
5083 Sleep (0);
5084 return 0;
5087 /* Release spin lock */
5088 static int slrelease (int *sl) {
5089 InterlockedExchange (sl, 0);
5090 return 0;
5093 #ifdef NEEDED
5094 /* Spin lock for emulation code */
5095 static int g_sl;
5096 #endif
5098 #endif /* USE_MALLOC_LOCK */
5100 /* getpagesize for windows */
5101 static long getpagesize (void) {
5102 static long g_pagesize = 0;
5103 if (! g_pagesize) {
5104 SYSTEM_INFO system_info;
5105 GetSystemInfo (&system_info);
5106 g_pagesize = system_info.dwPageSize;
5108 return g_pagesize;
5110 static long getregionsize (void) {
5111 static long g_regionsize = 0;
5112 if (! g_regionsize) {
5113 SYSTEM_INFO system_info;
5114 GetSystemInfo (&system_info);
5115 g_regionsize = system_info.dwAllocationGranularity;
5117 return g_regionsize;
5120 /* A region list entry */
5121 typedef struct _region_list_entry {
5122 void *top_allocated;
5123 void *top_committed;
5124 void *top_reserved;
5125 long reserve_size;
5126 struct _region_list_entry *previous;
5127 } region_list_entry;
5129 /* Allocate and link a region entry in the region list */
5130 static int region_list_append (region_list_entry **last, void *base_reserved, long reserve_size) {
5131 region_list_entry *next = HeapAlloc (GetProcessHeap (), 0, sizeof (region_list_entry));
5132 if (! next)
5133 return FALSE;
5134 next->top_allocated = (char *) base_reserved;
5135 next->top_committed = (char *) base_reserved;
5136 next->top_reserved = (char *) base_reserved + reserve_size;
5137 next->reserve_size = reserve_size;
5138 next->previous = *last;
5139 *last = next;
5140 return TRUE;
5142 /* Free and unlink the last region entry from the region list */
5143 static int region_list_remove (region_list_entry **last) {
5144 region_list_entry *previous = (*last)->previous;
5145 if (! HeapFree (GetProcessHeap (), sizeof (region_list_entry), *last))
5146 return FALSE;
5147 *last = previous;
5148 return TRUE;
5151 #define CEIL(size,to) (((size)+(to)-1)&~((to)-1))
5152 #define FLOOR(size,to) ((size)&~((to)-1))
5154 #define SBRK_SCALE 0
5155 /* #define SBRK_SCALE 1 */
5156 /* #define SBRK_SCALE 2 */
5157 /* #define SBRK_SCALE 4 */
5159 /* sbrk for windows */
5160 static void *sbrk (long size) {
5161 static long g_pagesize, g_my_pagesize;
5162 static long g_regionsize, g_my_regionsize;
5163 static region_list_entry *g_last;
5164 void *result = (void *) MORECORE_FAILURE;
5165 #ifdef TRACE
5166 printf ("sbrk %d\n", size);
5167 #endif
5168 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5169 /* Wait for spin lock */
5170 slwait (&g_sl);
5171 #endif
5172 /* First time initialization */
5173 if (! g_pagesize) {
5174 g_pagesize = getpagesize ();
5175 g_my_pagesize = g_pagesize << SBRK_SCALE;
5177 if (! g_regionsize) {
5178 g_regionsize = getregionsize ();
5179 g_my_regionsize = g_regionsize << SBRK_SCALE;
5181 if (! g_last) {
5182 if (! region_list_append (&g_last, 0, 0))
5183 goto sbrk_exit;
5185 /* Assert invariants */
5186 assert (g_last);
5187 assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
5188 g_last->top_allocated <= g_last->top_committed);
5189 assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
5190 g_last->top_committed <= g_last->top_reserved &&
5191 (unsigned) g_last->top_committed % g_pagesize == 0);
5192 assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
5193 assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
5194 /* Allocation requested? */
5195 if (size >= 0) {
5196 /* Allocation size is the requested size */
5197 long allocate_size = size;
5198 /* Compute the size to commit */
5199 long to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
5200 /* Do we reach the commit limit? */
5201 if (to_commit > 0) {
5202 /* Round size to commit */
5203 long commit_size = CEIL (to_commit, g_my_pagesize);
5204 /* Compute the size to reserve */
5205 long to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved;
5206 /* Do we reach the reserve limit? */
5207 if (to_reserve > 0) {
5208 /* Compute the remaining size to commit in the current region */
5209 long remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed;
5210 if (remaining_commit_size > 0) {
5211 /* Assert preconditions */
5212 assert ((unsigned) g_last->top_committed % g_pagesize == 0);
5213 assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); {
5214 /* Commit this */
5215 void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size,
5216 MEM_COMMIT, PAGE_READWRITE);
5217 /* Check returned pointer for consistency */
5218 if (base_committed != g_last->top_committed)
5219 goto sbrk_exit;
5220 /* Assert postconditions */
5221 assert ((unsigned) base_committed % g_pagesize == 0);
5222 #ifdef TRACE
5223 printf ("Commit %p %d\n", base_committed, remaining_commit_size);
5224 #endif
5225 /* Adjust the regions commit top */
5226 g_last->top_committed = (char *) base_committed + remaining_commit_size;
5228 } {
5229 /* Now we are going to search and reserve. */
5230 int contiguous = -1;
5231 int found = FALSE;
5232 MEMORY_BASIC_INFORMATION memory_info;
5233 void *base_reserved;
5234 long reserve_size;
5235 do {
5236 /* Assume contiguous memory */
5237 contiguous = TRUE;
5238 /* Round size to reserve */
5239 reserve_size = CEIL (to_reserve, g_my_regionsize);
5240 /* Start with the current region's top */
5241 memory_info.BaseAddress = g_last->top_reserved;
5242 /* Assert preconditions */
5243 assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
5244 assert (0 < reserve_size && reserve_size % g_regionsize == 0);
5245 while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
5246 /* Assert postconditions */
5247 assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
5248 #ifdef TRACE
5249 printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize,
5250 memory_info.State == MEM_FREE ? "FREE":
5251 (memory_info.State == MEM_RESERVE ? "RESERVED":
5252 (memory_info.State == MEM_COMMIT ? "COMMITTED": "?")));
5253 #endif
5254 /* Region is free, well aligned and big enough: we are done */
5255 if (memory_info.State == MEM_FREE &&
5256 (unsigned) memory_info.BaseAddress % g_regionsize == 0 &&
5257 memory_info.RegionSize >= (unsigned) reserve_size) {
5258 found = TRUE;
5259 break;
5261 /* From now on we can't get contiguous memory! */
5262 contiguous = FALSE;
5263 /* Recompute size to reserve */
5264 reserve_size = CEIL (allocate_size, g_my_regionsize);
5265 memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
5266 /* Assert preconditions */
5267 assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
5268 assert (0 < reserve_size && reserve_size % g_regionsize == 0);
5270 /* Search failed? */
5271 if (! found)
5272 goto sbrk_exit;
5273 /* Assert preconditions */
5274 assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0);
5275 assert (0 < reserve_size && reserve_size % g_regionsize == 0);
5276 /* Try to reserve this */
5277 base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size,
5278 MEM_RESERVE, PAGE_NOACCESS);
5279 if (! base_reserved) {
5280 int rc = GetLastError ();
5281 if (rc != ERROR_INVALID_ADDRESS)
5282 goto sbrk_exit;
5284 /* A null pointer signals (hopefully) a race condition with another thread. */
5285 /* In this case, we try again. */
5286 } while (! base_reserved);
5287 /* Check returned pointer for consistency */
5288 if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress)
5289 goto sbrk_exit;
5290 /* Assert postconditions */
5291 assert ((unsigned) base_reserved % g_regionsize == 0);
5292 #ifdef TRACE
5293 printf ("Reserve %p %d\n", base_reserved, reserve_size);
5294 #endif
5295 /* Did we get contiguous memory? */
5296 if (contiguous) {
5297 long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated;
5298 /* Adjust allocation size */
5299 allocate_size -= start_size;
5300 /* Adjust the regions allocation top */
5301 g_last->top_allocated = g_last->top_committed;
5302 /* Recompute the size to commit */
5303 to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
5304 /* Round size to commit */
5305 commit_size = CEIL (to_commit, g_my_pagesize);
5307 /* Append the new region to the list */
5308 if (! region_list_append (&g_last, base_reserved, reserve_size))
5309 goto sbrk_exit;
5310 /* Didn't we get contiguous memory? */
5311 if (! contiguous) {
5312 /* Recompute the size to commit */
5313 to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
5314 /* Round size to commit */
5315 commit_size = CEIL (to_commit, g_my_pagesize);
5319 /* Assert preconditions */
5320 assert ((unsigned) g_last->top_committed % g_pagesize == 0);
5321 assert (0 < commit_size && commit_size % g_pagesize == 0); {
5322 /* Commit this */
5323 void *base_committed = VirtualAlloc (g_last->top_committed, commit_size,
5324 MEM_COMMIT, PAGE_READWRITE);
5325 /* Check returned pointer for consistency */
5326 if (base_committed != g_last->top_committed)
5327 goto sbrk_exit;
5328 /* Assert postconditions */
5329 assert ((unsigned) base_committed % g_pagesize == 0);
5330 #ifdef TRACE
5331 printf ("Commit %p %d\n", base_committed, commit_size);
5332 #endif
5333 /* Adjust the regions commit top */
5334 g_last->top_committed = (char *) base_committed + commit_size;
5337 /* Adjust the regions allocation top */
5338 g_last->top_allocated = (char *) g_last->top_allocated + allocate_size;
5339 result = (char *) g_last->top_allocated - size;
5340 /* Deallocation requested? */
5341 } else if (size < 0) {
5342 long deallocate_size = - size;
5343 /* As long as we have a region to release */
5344 while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) {
5345 /* Get the size to release */
5346 long release_size = g_last->reserve_size;
5347 /* Get the base address */
5348 void *base_reserved = (char *) g_last->top_reserved - release_size;
5349 /* Assert preconditions */
5350 assert ((unsigned) base_reserved % g_regionsize == 0);
5351 assert (0 < release_size && release_size % g_regionsize == 0); {
5352 /* Release this */
5353 int rc = VirtualFree (base_reserved, 0,
5354 MEM_RELEASE);
5355 /* Check returned code for consistency */
5356 if (! rc)
5357 goto sbrk_exit;
5358 #ifdef TRACE
5359 printf ("Release %p %d\n", base_reserved, release_size);
5360 #endif
5362 /* Adjust deallocation size */
5363 deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved;
5364 /* Remove the old region from the list */
5365 if (! region_list_remove (&g_last))
5366 goto sbrk_exit;
5367 } {
5368 /* Compute the size to decommit */
5369 long to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size);
5370 if (to_decommit >= g_my_pagesize) {
5371 /* Compute the size to decommit */
5372 long decommit_size = FLOOR (to_decommit, g_my_pagesize);
5373 /* Compute the base address */
5374 void *base_committed = (char *) g_last->top_committed - decommit_size;
5375 /* Assert preconditions */
5376 assert ((unsigned) base_committed % g_pagesize == 0);
5377 assert (0 < decommit_size && decommit_size % g_pagesize == 0); {
5378 /* Decommit this */
5379 int rc = VirtualFree ((char *) base_committed, decommit_size,
5380 MEM_DECOMMIT);
5381 /* Check returned code for consistency */
5382 if (! rc)
5383 goto sbrk_exit;
5384 #ifdef TRACE
5385 printf ("Decommit %p %d\n", base_committed, decommit_size);
5386 #endif
5388 /* Adjust deallocation size and regions commit and allocate top */
5389 deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed;
5390 g_last->top_committed = base_committed;
5391 g_last->top_allocated = base_committed;
5394 /* Adjust regions allocate top */
5395 g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size;
5396 /* Check for underflow */
5397 if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated ||
5398 g_last->top_allocated > g_last->top_committed) {
5399 /* Adjust regions allocate top */
5400 g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size;
5401 goto sbrk_exit;
5403 result = g_last->top_allocated;
5405 /* Assert invariants */
5406 assert (g_last);
5407 assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
5408 g_last->top_allocated <= g_last->top_committed);
5409 assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
5410 g_last->top_committed <= g_last->top_reserved &&
5411 (unsigned) g_last->top_committed % g_pagesize == 0);
5412 assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
5413 assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
5415 sbrk_exit:
5416 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5417 /* Release spin lock */
5418 slrelease (&g_sl);
5419 #endif
5420 return result;
5423 /* mmap for windows */
5424 static void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) {
5425 static long g_pagesize;
5426 static long g_regionsize;
5427 #ifdef TRACE
5428 printf ("mmap %d\n", size);
5429 #endif
5430 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5431 /* Wait for spin lock */
5432 slwait (&g_sl);
5433 #endif
5434 /* First time initialization */
5435 if (! g_pagesize)
5436 g_pagesize = getpagesize ();
5437 if (! g_regionsize)
5438 g_regionsize = getregionsize ();
5439 /* Assert preconditions */
5440 assert ((unsigned) ptr % g_regionsize == 0);
5441 assert (size % g_pagesize == 0);
5442 /* Allocate this */
5443 ptr = VirtualAlloc (ptr, size,
5444 MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE);
5445 if (! ptr) {
5446 ptr = (void *) MORECORE_FAILURE;
5447 goto mmap_exit;
5449 /* Assert postconditions */
5450 assert ((unsigned) ptr % g_regionsize == 0);
5451 #ifdef TRACE
5452 printf ("Commit %p %d\n", ptr, size);
5453 #endif
5454 mmap_exit:
5455 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5456 /* Release spin lock */
5457 slrelease (&g_sl);
5458 #endif
5459 return ptr;
5462 /* munmap for windows */
5463 static long munmap (void *ptr, long size) {
5464 static long g_pagesize;
5465 static long g_regionsize;
5466 int rc = MUNMAP_FAILURE;
5467 #ifdef TRACE
5468 printf ("munmap %p %d\n", ptr, size);
5469 #endif
5470 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5471 /* Wait for spin lock */
5472 slwait (&g_sl);
5473 #endif
5474 /* First time initialization */
5475 if (! g_pagesize)
5476 g_pagesize = getpagesize ();
5477 if (! g_regionsize)
5478 g_regionsize = getregionsize ();
5479 /* Assert preconditions */
5480 assert ((unsigned) ptr % g_regionsize == 0);
5481 assert (size % g_pagesize == 0);
5482 /* Free this */
5483 if (! VirtualFree (ptr, 0,
5484 MEM_RELEASE))
5485 goto munmap_exit;
5486 rc = 0;
5487 #ifdef TRACE
5488 printf ("Release %p %d\n", ptr, size);
5489 #endif
5490 munmap_exit:
5491 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5492 /* Release spin lock */
5493 slrelease (&g_sl);
5494 #endif
5495 return rc;
5498 static void vminfo (CHUNK_SIZE_T *free, CHUNK_SIZE_T *reserved, CHUNK_SIZE_T *committed) {
5499 MEMORY_BASIC_INFORMATION memory_info;
5500 memory_info.BaseAddress = 0;
5501 *free = *reserved = *committed = 0;
5502 while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
5503 switch (memory_info.State) {
5504 case MEM_FREE:
5505 *free += memory_info.RegionSize;
5506 break;
5507 case MEM_RESERVE:
5508 *reserved += memory_info.RegionSize;
5509 break;
5510 case MEM_COMMIT:
5511 *committed += memory_info.RegionSize;
5512 break;
5514 memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
5518 static int cpuinfo (int whole, CHUNK_SIZE_T *kernel, CHUNK_SIZE_T *user) {
5519 if (whole) {
5520 __int64 creation64, exit64, kernel64, user64;
5521 int rc = GetProcessTimes (GetCurrentProcess (),
5522 (FILETIME *) &creation64,
5523 (FILETIME *) &exit64,
5524 (FILETIME *) &kernel64,
5525 (FILETIME *) &user64);
5526 if (! rc) {
5527 *kernel = 0;
5528 *user = 0;
5529 return FALSE;
5531 *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
5532 *user = (CHUNK_SIZE_T) (user64 / 10000);
5533 return TRUE;
5534 } else {
5535 __int64 creation64, exit64, kernel64, user64;
5536 int rc = GetThreadTimes (GetCurrentThread (),
5537 (FILETIME *) &creation64,
5538 (FILETIME *) &exit64,
5539 (FILETIME *) &kernel64,
5540 (FILETIME *) &user64);
5541 if (! rc) {
5542 *kernel = 0;
5543 *user = 0;
5544 return FALSE;
5546 *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
5547 *user = (CHUNK_SIZE_T) (user64 / 10000);
5548 return TRUE;
5552 #endif /* WIN32 */
5554 /* ------------------------------------------------------------
5555 History:
5556 V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
5557 * Fix malloc_state bitmap array misdeclaration
5559 V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
5560 * Allow tuning of FIRST_SORTED_BIN_SIZE
5561 * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
5562 * Better detection and support for non-contiguousness of MORECORE.
5563 Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5564 * Bypass most of malloc if no frees. Thanks To Emery Berger.
5565 * Fix freeing of old top non-contiguous chunk im sysmalloc.
5566 * Raised default trim and map thresholds to 256K.
5567 * Fix mmap-related #defines. Thanks to Lubos Lunak.
5568 * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5569 * Branch-free bin calculation
5570 * Default trim and mmap thresholds now 256K.
5572 V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5573 * Introduce independent_comalloc and independent_calloc.
5574 Thanks to Michael Pachos for motivation and help.
5575 * Make optional .h file available
5576 * Allow > 2GB requests on 32bit systems.
5577 * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5578 Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5579 and Anonymous.
5580 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5581 helping test this.)
5582 * memalign: check alignment arg
5583 * realloc: don't try to shift chunks backwards, since this
5584 leads to more fragmentation in some programs and doesn't
5585 seem to help in any others.
5586 * Collect all cases in malloc requiring system memory into sYSMALLOc
5587 * Use mmap as backup to sbrk
5588 * Place all internal state in malloc_state
5589 * Introduce fastbins (although similar to 2.5.1)
5590 * Many minor tunings and cosmetic improvements
5591 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5592 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5593 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5594 * Include errno.h to support default failure action.
5596 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5597 * return null for negative arguments
5598 * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5599 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5600 (e.g. WIN32 platforms)
5601 * Cleanup header file inclusion for WIN32 platforms
5602 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5603 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5604 memory allocation routines
5605 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5606 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5607 usage of 'assert' in non-WIN32 code
5608 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5609 avoid infinite loop
5610 * Always call 'fREe()' rather than 'free()'
5612 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5613 * Fixed ordering problem with boundary-stamping
5615 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5616 * Added pvalloc, as recommended by H.J. Liu
5617 * Added 64bit pointer support mainly from Wolfram Gloger
5618 * Added anonymously donated WIN32 sbrk emulation
5619 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5620 * malloc_extend_top: fix mask error that caused wastage after
5621 foreign sbrks
5622 * Add linux mremap support code from HJ Liu
5624 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5625 * Integrated most documentation with the code.
5626 * Add support for mmap, with help from
5627 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5628 * Use last_remainder in more cases.
5629 * Pack bins using idea from colin@nyx10.cs.du.edu
5630 * Use ordered bins instead of best-fit threshhold
5631 * Eliminate block-local decls to simplify tracing and debugging.
5632 * Support another case of realloc via move into top
5633 * Fix error occuring when initial sbrk_base not word-aligned.
5634 * Rely on page size for units instead of SBRK_UNIT to
5635 avoid surprises about sbrk alignment conventions.
5636 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5637 (raymond@es.ele.tue.nl) for the suggestion.
5638 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5639 * More precautions for cases where other routines call sbrk,
5640 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5641 * Added macros etc., allowing use in linux libc from
5642 H.J. Lu (hjl@gnu.ai.mit.edu)
5643 * Inverted this history list
5645 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5646 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5647 * Removed all preallocation code since under current scheme
5648 the work required to undo bad preallocations exceeds
5649 the work saved in good cases for most test programs.
5650 * No longer use return list or unconsolidated bins since
5651 no scheme using them consistently outperforms those that don't
5652 given above changes.
5653 * Use best fit for very large chunks to prevent some worst-cases.
5654 * Added some support for debugging
5656 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5657 * Removed footers when chunks are in use. Thanks to
5658 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5660 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5661 * Added malloc_trim, with help from Wolfram Gloger
5662 (wmglo@Dent.MED.Uni-Muenchen.DE).
5664 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5666 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5667 * realloc: try to expand in both directions
5668 * malloc: swap order of clean-bin strategy;
5669 * realloc: only conditionally expand backwards
5670 * Try not to scavenge used bins
5671 * Use bin counts as a guide to preallocation
5672 * Occasionally bin return list chunks in first scan
5673 * Add a few optimizations from colin@nyx10.cs.du.edu
5675 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5676 * faster bin computation & slightly different binning
5677 * merged all consolidations to one part of malloc proper
5678 (eliminating old malloc_find_space & malloc_clean_bin)
5679 * Scan 2 returns chunks (not just 1)
5680 * Propagate failure in realloc if malloc returns 0
5681 * Add stuff to allow compilation on non-ANSI compilers
5682 from kpv@research.att.com
5684 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5685 * removed potential for odd address access in prev_chunk
5686 * removed dependency on getpagesize.h
5687 * misc cosmetics and a bit more internal documentation
5688 * anticosmetics: mangled names in macros to evade debugger strangeness
5689 * tested on sparc, hp-700, dec-mips, rs6000
5690 with gcc & native cc (hp, dec only) allowing
5691 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5693 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5694 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5695 structure of old version, but most details differ.)
5697 */