ia64/xen-unstable

changeset 787:75d9daa46c7a

bitkeeper revision 1.478.1.1 (3f815149k7sE-z_IK6MG5eHi34m-Qg)

Minimal guest OS (based on some old code from Keir)
author rneugeba@wyvis.research
date Mon Oct 06 11:26:01 2003 +0000 (2003-10-06)
parents b45bc774c22c
children 3e6f62b7d409
files .bk-to-hg .hg-to-bk .rootkeys mini-os/Makefile mini-os/README mini-os/entry.S mini-os/events.c mini-os/h/events.h mini-os/h/hypervisor.h mini-os/h/lib.h mini-os/h/list.h mini-os/h/mm.h mini-os/h/os.h mini-os/h/time.h mini-os/h/types.h mini-os/head.S mini-os/hypervisor.c mini-os/kernel.c mini-os/lib/malloc.c mini-os/lib/math.c mini-os/lib/printf.c mini-os/lib/string.c mini-os/mm.c mini-os/time.c mini-os/traps.c mini-os/vmlinux.lds
line diff
     1.1 --- a/.bk-to-hg	Fri Oct 03 16:36:21 2003 +0000
     1.2 +++ b/.bk-to-hg	Mon Oct 06 11:26:01 2003 +0000
     1.3 @@ -1,5 +1,13 @@
     1.4  #!/bin/sh -x
     1.5  set -e
     1.6 +test -L mini-os/h/hypervisor-ifs/block.h
     1.7 +rm      mini-os/h/hypervisor-ifs/block.h
     1.8 +test -L mini-os/h/hypervisor-ifs/hypervisor-if.h
     1.9 +rm      mini-os/h/hypervisor-ifs/hypervisor-if.h
    1.10 +test -L mini-os/h/hypervisor-ifs/kbd.h
    1.11 +rm      mini-os/h/hypervisor-ifs/kbd.h
    1.12 +test -L mini-os/h/hypervisor-ifs/network.h
    1.13 +rm      mini-os/h/hypervisor-ifs/network.h
    1.14  test -L xenolinux-sparse
    1.15  rm      xenolinux-sparse
    1.16  (find -depth -type d -print | xargs -r rmdir 2>/dev/null) || true
     2.1 --- a/.hg-to-bk	Fri Oct 03 16:36:21 2003 +0000
     2.2 +++ b/.hg-to-bk	Mon Oct 06 11:26:01 2003 +0000
     2.3 @@ -1,5 +1,12 @@
     2.4  #!/bin/sh -x
     2.5  set -e
     2.6 +mkdir -p mini-os
     2.7 +mkdir -p mini-os/h
     2.8 +mkdir -p mini-os/h/hypervisor-ifs
     2.9 +ln -s ../../../xen/include/hypervisor-ifs/block.h mini-os/h/hypervisor-ifs/block.h
    2.10 +ln -s ../../../xen/include/hypervisor-ifs/hypervisor-if.h mini-os/h/hypervisor-ifs/hypervisor-if.h
    2.11 +ln -s ../../../xen/include/hypervisor-ifs/kbd.h mini-os/h/hypervisor-ifs/kbd.h
    2.12 +ln -s ../../../xen/include/hypervisor-ifs/network.h mini-os/h/hypervisor-ifs/network.h
    2.13  ln -s xenolinux-2.4.22-sparse xenolinux-sparse
    2.14  (find -depth -type d -print | xargs -r rmdir 2>/dev/null) || true
    2.15  exit 0
     3.1 --- a/.rootkeys	Fri Oct 03 16:36:21 2003 +0000
     3.2 +++ b/.rootkeys	Mon Oct 06 11:26:01 2003 +0000
     3.3 @@ -6,6 +6,33 @@ 3eb788d6Kleck_Cut0ouGneviGzliQ Makefile
     3.4  3f5ef5a24IaQasQE2tyMxrfxskMmvw README
     3.5  3f5ef5a2l4kfBYSQTUaOyyD76WROZQ README.CD
     3.6  3f69d8abYB1vMyD_QVDvzxy5Zscf1A TODO
     3.7 +3f815144d1vI2777JI-dO4wk49Iw7g mini-os/Makefile
     3.8 +3f815144zTnCV5591ulIJQrpe5b-5Q mini-os/README
     3.9 +3f815144wiiDekmfMl9LIPIvhR83Uw mini-os/entry.S
    3.10 +3f815144r7AHj8GPvc3Nl1L9OSsWIg mini-os/events.c
    3.11 +3f815144h-Chna6E38yo40jqU95G1Q mini-os/h/events.h
    3.12 +3f815144oqr2OlUDzE2GfkKX5Hcxqg mini-os/h/hypervisor-ifs/block.h
    3.13 +3f8151443nGXvfUTFG67VXOIH8P4lg mini-os/h/hypervisor-ifs/hypervisor-if.h
    3.14 +3f81514417ZlYqiRdM_AHPy7G11htA mini-os/h/hypervisor-ifs/kbd.h
    3.15 +3f815144J3ZfU5am03Td7Wjfrz30qQ mini-os/h/hypervisor-ifs/network.h
    3.16 +3f8151445bYdgThGHQPeOW49PsrJ_A mini-os/h/hypervisor.h
    3.17 +3f815144f2Vg3qb6tiwt2VZad-DWsg mini-os/h/lib.h
    3.18 +3f815144iqXtdYup_pyfPSmDZuvZcg mini-os/h/list.h
    3.19 +3f81514437EzzRWAnZl4_Ej1oznMjg mini-os/h/mm.h
    3.20 +3f815144nbSjjT1h4m99-QPbeSWY0Q mini-os/h/os.h
    3.21 +3f815144L1t0AevJt2JDXPegv6JTrw mini-os/h/time.h
    3.22 +3f815144UxddtL0ICCKisN-NDHNFaA mini-os/h/types.h
    3.23 +3f815145W2mamPMclRLOzm5B38vWUQ mini-os/head.S
    3.24 +3f815145LqcH11TCEZbAvcjarckkJw mini-os/hypervisor.c
    3.25 +3f815145vwnmxhCwN7dMRWv_XFtXbg mini-os/kernel.c
    3.26 +3f8151451k5emQAlRe80JdIvfSN4VA mini-os/lib/malloc.c
    3.27 +3f815145Mb9WSKjOPsYTLsPIvPyy4Q mini-os/lib/math.c
    3.28 +3f8151454rEuPjN74V2Bcu65RLnM-Q mini-os/lib/printf.c
    3.29 +3f815145MQZrUJV0iRmTK2KIhwB2wg mini-os/lib/string.c
    3.30 +3f815145CB8XdPUqsmhAjSDFuwOoqA mini-os/mm.c
    3.31 +3f815145vGYx1WY79voKkZB9yKwJKQ mini-os/time.c
    3.32 +3f815145xlKBAQmal9oces3G_Mvxqw mini-os/traps.c
    3.33 +3f815145AYE58Kpmsj5U7oHDpVDZJA mini-os/vmlinux.lds
    3.34  3f776bd1Hy9rn69ntXBhPReUFw9IEA tools/Makefile
    3.35  3e6377b24eQqYMsDi9XrFkIgTzZ47A tools/balloon/Makefile
    3.36  3e6377d6eiFjF1hHIS6JEIOFk62xSA tools/balloon/README
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/mini-os/Makefile	Mon Oct 06 11:26:01 2003 +0000
     4.3 @@ -0,0 +1,40 @@
     4.4 +
     4.5 +CC := gcc
     4.6 +LD := ld
     4.7 +# Linker should relocate monitor to this address
     4.8 +MONITOR_BASE := 0xE0100000
     4.9 +CFLAGS  := -fno-builtin -O3 -Wall -Ih/
    4.10 +
    4.11 +TARGET := image.final
    4.12 +
    4.13 +LOBJS:= lib/malloc.o lib/math.o lib/printf.o lib/string.o 
    4.14 +OBJS := entry.o kernel.o traps.o hypervisor.o mm.o events.o time.o ${LOBJS}
    4.15 +
    4.16 +HINTF := h/hypervisor-ifs/hypervisor-if.h
    4.17 +HDRS :=  h/os.h h/types.h h/hypervisor.h h/mm.h h/events.h h/time.h h/lib.h $(HINTF)
    4.18 +
    4.19 +default: $(TARGET)
    4.20 +
    4.21 +$(TARGET): head.o $(OBJS)
    4.22 +	# Image will load at 0xC0000000. First bytes from head.o
    4.23 +	#$(LD) -N -Ttext 0xC0000000 head.o $(OBJS) -o image.elf
    4.24 +	$(LD) -N -T vmlinux.lds head.o $(OBJS) -o image.elf
    4.25 +	# Guest OS header -- first 8 bytes are identifier 'XenoGues'.
    4.26 +	echo -e -n 'XenoGues' >$@ 
    4.27 +	# Guest OS header -- next 4 bytes are load address (0xC0000000).
    4.28 +	echo -e -n '\000\000\000\300' >>$@
    4.29 +	# Create a raw bag of bytes from the ELF image.
    4.30 +	objcopy -O binary -R .note -R .comment image.elf image.raw
    4.31 +	# Guest OS header is immediately followed by raw OS image.
    4.32 +	cat image.raw >>$@
    4.33 +	#gzip -f -9 $@
    4.34 +
    4.35 +clean:
    4.36 +	rm -f *.o *~ core image.elf image.raw image.final image.final.gz
    4.37 +
    4.38 +%.o: %.c $(HDRS) Makefile
    4.39 +	$(CC) $(CFLAGS) -c $< -o $@
    4.40 +
    4.41 +%.o: %.S $(HDRS) Makefile
    4.42 +	$(CC) $(CFLAGS) -D__ASSEMBLY__ -c $< -o $@
    4.43 +
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/mini-os/README	Mon Oct 06 11:26:01 2003 +0000
     5.3 @@ -0,0 +1,35 @@
     5.4 + Minimal OS
     5.5 + ----------
     5.6 +
     5.7 +This shows some of the stuff that any guest OS will have to set up.
     5.8 +
     5.9 +This includes:
    5.10 +
    5.11 + * installing a virtual exception table
    5.12 + * handling virtual exceptions
    5.13 + * handling asynchronous events
    5.14 + * enabling/disabling async events
    5.15 + * parsing start_info struct at start-of-day
    5.16 + * registering virtual interrupt handlers (for timer interrupts)
    5.17 + * a simple page and memory allocator
    5.18 + * minimal libc support
    5.19 +
    5.20 +Stuff it doesn't show:
    5.21 + 
    5.22 + * modifying page tables
    5.23 + * network code
    5.24 + * block-device code
    5.25 +
    5.26 +
    5.27 +- to build it just type make.
    5.28 +
    5.29 +- copy image.final somewhere where dom0 can access it
    5.30 +
    5.31 +- in dom0
    5.32 +  # xi_create 16000 test
    5.33 +    <domid>
    5.34 +  # xi_build <domid> image.final 0
    5.35 +  # xi_start <domid>
    5.36 +
    5.37 +this prints out a bunch of stuff and then every 1000 timer interrupts the
    5.38 +system time.
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/mini-os/entry.S	Mon Oct 06 11:26:01 2003 +0000
     6.3 @@ -0,0 +1,348 @@
     6.4 +/*
     6.5 + *  linux/arch/i386/entry.S
     6.6 + *
     6.7 + *  Copyright (C) 1991, 1992  Linus Torvalds
     6.8 + *
     6.9 + *  Adjusted for XenoLinux use by K A Frasier
    6.10 + *  Adjusted for Xen minimal os by R Neugebauer
    6.11 + */
    6.12 +
    6.13 +        
    6.14 +#include <os.h>
    6.15 +        
    6.16 +EBX		= 0x00
    6.17 +ECX		= 0x04
    6.18 +EDX		= 0x08
    6.19 +ESI		= 0x0C
    6.20 +EDI		= 0x10
    6.21 +EBP		= 0x14
    6.22 +EAX		= 0x18
    6.23 +DS		= 0x1C
    6.24 +ES		= 0x20
    6.25 +ORIG_EAX	= 0x24
    6.26 +EIP		= 0x28
    6.27 +CS		= 0x2C
    6.28 +EFLAGS		= 0x30
    6.29 +OLDESP		= 0x34
    6.30 +OLDSS		= 0x38
    6.31 +
    6.32 +CF_MASK		= 0x00000001
    6.33 +IF_MASK		= 0x00000200
    6.34 +NT_MASK		= 0x00004000
    6.35 +
    6.36 +/* Declare a globally-visible label */
    6.37 +#define ENTRY(X) .globl X ; X :
    6.38 +
    6.39 +/* A Linux hangover. Just ignore it. */
    6.40 +#define SYMBOL_NAME(X) X
    6.41 +        
    6.42 +#define SAVE_ALL \
    6.43 +	cld; \
    6.44 +	pushl %es; \
    6.45 +	pushl %ds; \
    6.46 +	pushl %eax; \
    6.47 +	pushl %ebp; \
    6.48 +	pushl %edi; \
    6.49 +	pushl %esi; \
    6.50 +	pushl %edx; \
    6.51 +	pushl %ecx; \
    6.52 +	pushl %ebx; \
    6.53 +	movl $(__KERNEL_DS),%edx; \
    6.54 +	movl %edx,%ds; \
    6.55 +	movl %edx,%es;
    6.56 +
    6.57 +#define RESTORE_ALL	\
    6.58 +	popl %ebx;	\
    6.59 +	popl %ecx;	\
    6.60 +	popl %edx;	\
    6.61 +	popl %esi;	\
    6.62 +	popl %edi;	\
    6.63 +	popl %ebp;	\
    6.64 +	popl %eax;	\
    6.65 +1:	popl %ds;	\
    6.66 +2:	popl %es;	\
    6.67 +	addl $4,%esp;	\
    6.68 +3:	iret;		\
    6.69 +.section .fixup,"ax";	\
    6.70 +4:	movl $0,(%esp);	\
    6.71 +	jmp 1b;		\
    6.72 +5:	movl $0,(%esp);	\
    6.73 +	jmp 2b;		\
    6.74 +6:	pushl %ss;	\
    6.75 +	popl %ds;	\
    6.76 +	pushl %ss;	\
    6.77 +	popl %es;	\
    6.78 +	pushl $11;	\
    6.79 +        call do_exit;	\
    6.80 +.previous;		\
    6.81 +.section __ex_table,"a";\
    6.82 +	.align 4;	\
    6.83 +	.long 1b,4b;	\
    6.84 +	.long 2b,5b;	\
    6.85 +	.long 3b,6b;	\
    6.86 +.previous
    6.87 +
    6.88 +ENTRY(divide_error)
    6.89 +	pushl $0		# no error code
    6.90 +	pushl $ SYMBOL_NAME(do_divide_error)
    6.91 +	.align 4
    6.92 +error_code:
    6.93 +	pushl %ds
    6.94 +	pushl %eax
    6.95 +	xorl %eax,%eax
    6.96 +	pushl %ebp
    6.97 +	pushl %edi
    6.98 +	pushl %esi
    6.99 +	pushl %edx
   6.100 +	decl %eax			# eax = -1
   6.101 +	pushl %ecx
   6.102 +	pushl %ebx
   6.103 +	cld
   6.104 +	movl %es,%ecx
   6.105 +	movl ORIG_EAX(%esp), %esi	# get the error code
   6.106 +	movl ES(%esp), %edi		# get the function address
   6.107 +	movl %eax, ORIG_EAX(%esp)
   6.108 +	movl %ecx, ES(%esp)
   6.109 +	movl %esp,%edx
   6.110 +	pushl %esi			# push the error code
   6.111 +	pushl %edx			# push the pt_regs pointer
   6.112 +	movl $(__KERNEL_DS),%edx
   6.113 +	movl %edx,%ds
   6.114 +	movl %edx,%es
   6.115 +	call *%edi
   6.116 +	addl $8,%esp
   6.117 +
   6.118 +# These are the tests Linux makes before exiting the OS back to userland.
   6.119 +# At these point preeemption may occur, or signals may get delivered.
   6.120 +ret_to_user_tests:
   6.121 +#        cmpl $0,need_resched(%ebx)
   6.122 +#        jne reschedule
   6.123 +#        cmpl $0,sigpending(%ebx)
   6.124 +#        je   safesti
   6.125 +        jmp safesti
   6.126 +               
   6.127 +        
   6.128 +ret_from_exception:
   6.129 +        movb CS(%esp),%cl
   6.130 +	    test $2,%cl          # slow return to ring 2 or 3
   6.131 +	    jne  ret_to_user_tests
   6.132 +        RESTORE_ALL
   6.133 +
   6.134 +# A note on the "critical region" in our callback handler.
   6.135 +# We want to avoid stacking callback handlers due to events occurring
   6.136 +# during handling of the last event. To do this, we keep events disabled
   6.137 +# until weve done all processing. HOWEVER, we must enable events before
   6.138 +# popping the stack frame (cant be done atomically) and so it would still
   6.139 +# be possible to get enough handler activations to overflow the stack.
   6.140 +# Although unlikely, bugs of that kind are hard to track down, so wed
   6.141 +# like to avoid the possibility.
   6.142 +# So, on entry to the handler we detect whether we interrupted an
   6.143 +# existing activation in its critical region -- if so, we pop the current
   6.144 +# activation and restart the handler using the previous one.
   6.145 +ENTRY(hypervisor_callback)
   6.146 +        pushl %eax
   6.147 +        SAVE_ALL
   6.148 +        movl EIP(%esp),%eax
   6.149 +        cmpl $scrit,%eax
   6.150 +        jb   11f
   6.151 +        cmpl $ecrit,%eax
   6.152 +        jb   critical_region_fixup
   6.153 +11:     push %esp
   6.154 +        call do_hypervisor_callback
   6.155 +        add  $4,%esp
   6.156 +        movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
   6.157 +        xorl %eax,%eax
   6.158 +        movb CS(%esp),%cl
   6.159 +    	test $2,%cl          # slow return to ring 2 or 3
   6.160 +	    jne  ret_to_user_tests
   6.161 +safesti:btsl $31,4(%esi)     # reenable event callbacks
   6.162 +scrit:  /**** START OF CRITICAL REGION ****/
   6.163 +        cmpl %eax,(%esi)
   6.164 +        jne  14f              # process more events if necessary...
   6.165 +        RESTORE_ALL
   6.166 +14:     btrl %eax,4(%esi)
   6.167 +        jmp  11b
   6.168 +ecrit:  /**** END OF CRITICAL REGION ****/
   6.169 +# [How we do the fixup]. We want to merge the current stack frame with the
   6.170 +# just-interrupted frame. How we do this depends on where in the critical
   6.171 +# region the interrupted handler was executing, and so how many saved
   6.172 +# registers are in each frame. We do this quickly using the lookup table
   6.173 +# 'critical_fixup_table'. For each byte offset in the critical region, it
   6.174 +# provides the number of bytes which have already been popped from the
   6.175 +# interrupted stack frame. 
   6.176 +critical_region_fixup:
   6.177 +        addl $critical_fixup_table-scrit,%eax
   6.178 +        movzbl (%eax),%eax    # %eax contains num bytes popped
   6.179 +        mov  %esp,%esi
   6.180 +        add  %eax,%esi        # %esi points at end of src region
   6.181 +        mov  %esp,%edi
   6.182 +        add  $0x34,%edi       # %edi points at end of dst region
   6.183 +        mov  %eax,%ecx
   6.184 +        shr  $2,%ecx          # convert words to bytes
   6.185 +        je   16f              # skip loop if nothing to copy
   6.186 +15:     subl $4,%esi          # pre-decrementing copy loop
   6.187 +        subl $4,%edi
   6.188 +        movl (%esi),%eax
   6.189 +        movl %eax,(%edi)
   6.190 +        loop 15b
   6.191 +16:     movl %edi,%esp        # final %edi is top of merged stack
   6.192 +        jmp  11b
   6.193 +         
   6.194 +critical_fixup_table:        
   6.195 +        .byte 0x00,0x00                       # cmpl %eax,(%esi)
   6.196 +        .byte 0x00,0x00                       # jne  14f
   6.197 +        .byte 0x00                            # pop  %ebx
   6.198 +        .byte 0x04                            # pop  %ecx
   6.199 +        .byte 0x08                            # pop  %edx
   6.200 +        .byte 0x0c                            # pop  %esi
   6.201 +        .byte 0x10                            # pop  %edi
   6.202 +        .byte 0x14                            # pop  %ebp
   6.203 +        .byte 0x18                            # pop  %eax
   6.204 +        .byte 0x1c                            # pop  %ds
   6.205 +        .byte 0x20                            # pop  %es
   6.206 +        .byte 0x24,0x24,0x24                  # add  $4,%esp
   6.207 +        .byte 0x28                            # iret
   6.208 +        .byte 0x00,0x00,0x00,0x00,0x00        # btrl $31,4(%esi)
   6.209 +        .byte 0x00,0x00                       # jmp  11b
   6.210 +       
   6.211 +# Hypervisor uses this for application faults while it executes.
   6.212 +ENTRY(failsafe_callback)
   6.213 +1:      pop  %ds
   6.214 +2:      pop  %es
   6.215 +3:      pop  %fs
   6.216 +4:      pop  %gs
   6.217 +5:      iret
   6.218 +.section .fixup,"ax";	\
   6.219 +6:	movl $0,(%esp);	\
   6.220 +	jmp 1b;		\
   6.221 +7:	movl $0,(%esp);	\
   6.222 +	jmp 2b;		\
   6.223 +8:	movl $0,(%esp);	\
   6.224 +	jmp 3b;		\
   6.225 +9:	movl $0,(%esp);	\
   6.226 +	jmp 4b;		\
   6.227 +10:	pushl %ss;	\
   6.228 +	popl %ds;	\
   6.229 +	pushl %ss;	\
   6.230 +	popl %es;	\
   6.231 +	pushl $11;	\
   6.232 +	call do_exit;	\
   6.233 +.previous;		\
   6.234 +.section __ex_table,"a";\
   6.235 +	.align 4;	\
   6.236 +	.long 1b,6b;	\
   6.237 +	.long 2b,7b;	\
   6.238 +	.long 3b,8b;	\
   6.239 +	.long 4b,9b;	\
   6.240 +	.long 5b,10b;	\
   6.241 +.previous
   6.242 +                
   6.243 +ENTRY(coprocessor_error)
   6.244 +	pushl $0
   6.245 +	pushl $ SYMBOL_NAME(do_coprocessor_error)
   6.246 +	jmp error_code
   6.247 +
   6.248 +ENTRY(simd_coprocessor_error)
   6.249 +	pushl $0
   6.250 +	pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
   6.251 +	jmp error_code
   6.252 +
   6.253 +ENTRY(device_not_available)
   6.254 +	pushl $-1		# mark this as an int
   6.255 +	SAVE_ALL
   6.256 +	#call SYMBOL_NAME(math_state_restore)
   6.257 +	jmp ret_from_exception
   6.258 +
   6.259 +ENTRY(debug)
   6.260 +	pushl $0
   6.261 +	pushl $ SYMBOL_NAME(do_debug)
   6.262 +	jmp error_code
   6.263 +
   6.264 +ENTRY(int3)
   6.265 +	pushl $0
   6.266 +	pushl $ SYMBOL_NAME(do_int3)
   6.267 +	jmp error_code
   6.268 +
   6.269 +ENTRY(overflow)
   6.270 +	pushl $0
   6.271 +	pushl $ SYMBOL_NAME(do_overflow)
   6.272 +	jmp error_code
   6.273 +
   6.274 +ENTRY(bounds)
   6.275 +	pushl $0
   6.276 +	pushl $ SYMBOL_NAME(do_bounds)
   6.277 +	jmp error_code
   6.278 +
   6.279 +ENTRY(invalid_op)
   6.280 +	pushl $0
   6.281 +	pushl $ SYMBOL_NAME(do_invalid_op)
   6.282 +	jmp error_code
   6.283 +
   6.284 +ENTRY(coprocessor_segment_overrun)
   6.285 +	pushl $0
   6.286 +	pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
   6.287 +	jmp error_code
   6.288 +
   6.289 +ENTRY(double_fault)
   6.290 +	pushl $ SYMBOL_NAME(do_double_fault)
   6.291 +	jmp error_code
   6.292 +
   6.293 +ENTRY(invalid_TSS)
   6.294 +	pushl $ SYMBOL_NAME(do_invalid_TSS)
   6.295 +	jmp error_code
   6.296 +
   6.297 +ENTRY(segment_not_present)
   6.298 +	pushl $ SYMBOL_NAME(do_segment_not_present)
   6.299 +	jmp error_code
   6.300 +
   6.301 +ENTRY(stack_segment)
   6.302 +	pushl $ SYMBOL_NAME(do_stack_segment)
   6.303 +	jmp error_code
   6.304 +
   6.305 +ENTRY(general_protection)
   6.306 +	pushl $ SYMBOL_NAME(do_general_protection)
   6.307 +	jmp error_code
   6.308 +
   6.309 +ENTRY(alignment_check)
   6.310 +	pushl $ SYMBOL_NAME(do_alignment_check)
   6.311 +	jmp error_code
   6.312 +
   6.313 +# This handler is special, because it gets an extra value on its stack,
   6.314 +# which is the linear faulting address.
   6.315 +ENTRY(page_fault)
   6.316 +	pushl %ds
   6.317 +	pushl %eax
   6.318 +	xorl %eax,%eax
   6.319 +	pushl %ebp
   6.320 +	pushl %edi
   6.321 +	pushl %esi
   6.322 +	pushl %edx
   6.323 +	decl %eax			# eax = -1
   6.324 +	pushl %ecx
   6.325 +	pushl %ebx
   6.326 +	cld
   6.327 +	movl %es,%ecx
   6.328 +	movl ORIG_EAX(%esp), %esi	# get the error code
   6.329 +	movl ES(%esp), %edi		# get the faulting address
   6.330 +	movl %eax, ORIG_EAX(%esp)
   6.331 +	movl %ecx, ES(%esp)
   6.332 +	movl %esp,%edx
   6.333 +        pushl %edi                      # push the faulting address
   6.334 +	pushl %esi			# push the error code
   6.335 +	pushl %edx			# push the pt_regs pointer
   6.336 +	movl $(__KERNEL_DS),%edx
   6.337 +	movl %edx,%ds
   6.338 +	movl %edx,%es
   6.339 +	call SYMBOL_NAME(do_page_fault)
   6.340 +	addl $12,%esp
   6.341 +	jmp ret_from_exception
   6.342 +
   6.343 +ENTRY(machine_check)
   6.344 +	pushl $0
   6.345 +	pushl $ SYMBOL_NAME(do_machine_check)
   6.346 +	jmp error_code
   6.347 +
   6.348 +ENTRY(spurious_interrupt_bug)
   6.349 +	pushl $0
   6.350 +	pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
   6.351 +	jmp error_code
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/mini-os/events.c	Mon Oct 06 11:26:01 2003 +0000
     7.3 @@ -0,0 +1,106 @@
     7.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
     7.5 + ****************************************************************************
     7.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
     7.7 + ****************************************************************************
     7.8 + *
     7.9 + *        File: events.c
    7.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
    7.11 + *     Changes: 
    7.12 + *              
    7.13 + *        Date: Jul 2003
    7.14 + * 
    7.15 + * Environment: Xen Minimal OS
    7.16 + * Description: Deal with events
    7.17 + *
    7.18 + ****************************************************************************
    7.19 + * $Id: c-insert.c,v 1.7 2002/11/08 16:04:34 rn Exp $
    7.20 + ****************************************************************************
    7.21 + */
    7.22 +
    7.23 +#include <os.h>
    7.24 +#include <hypervisor.h>
    7.25 +#include <events.h>
    7.26 +#include <lib.h>
    7.27 +
    7.28 +static ev_action_t ev_actions[NR_EVS];
    7.29 +void default_handler(int ev, struct pt_regs *regs);
    7.30 +
    7.31 +
    7.32 +/*
    7.33 + * demux events to different handlers
    7.34 + */
    7.35 +asmlinkage unsigned int do_event(int ev, struct pt_regs *regs)
    7.36 +{
    7.37 +    ev_action_t  *action;
    7.38 +
    7.39 +    if (ev >= NR_EVS) {
    7.40 +        printk("Large event number %d\n", ev);
    7.41 +        return 0;
    7.42 +    }
    7.43 +
    7.44 +    action = &ev_actions[ev];
    7.45 +    action->count++;
    7.46 +    ack_hypervisor_event(ev);
    7.47 +
    7.48 +    if (!action->handler)
    7.49 +        goto out;
    7.50 +    
    7.51 +    if (action->status & EVS_DISABLED)
    7.52 +        goto out;
    7.53 +    
    7.54 +    /* call the handler */
    7.55 +    action->handler(ev, regs);
    7.56 +    
    7.57 + out:
    7.58 +    return 1;
    7.59 +
    7.60 +}
    7.61 +
    7.62 +/*
    7.63 + * add a handler
    7.64 + */
    7.65 +unsigned int add_ev_action( int ev, void (*handler)(int, struct pt_regs *) )
    7.66 +{
    7.67 +    if (ev_actions[ev].handler) {
    7.68 +        printk ("event[%d] already handled by %p", ev, ev_actions[ev].handler);
    7.69 +        return 0;
    7.70 +    }
    7.71 +
    7.72 +    ev_actions[ev].handler = handler;
    7.73 +    return 1;
    7.74 +}
    7.75 +
    7.76 +unsigned int enable_ev_action( int ev )
    7.77 +{
    7.78 +    if (!ev_actions[ev].handler) {
    7.79 +        printk ("enable event[%d], no handler installed", ev);
    7.80 +        return 0;
    7.81 +    }
    7.82 +    ev_actions[ev].status &= ~EVS_DISABLED;
    7.83 +    return 1;
    7.84 +}
    7.85 +
    7.86 +unsigned int disable_ev_action( int ev )
    7.87 +{
    7.88 +    ev_actions[ev].status |= EVS_DISABLED;
    7.89 +    return 1;
    7.90 +}
    7.91 +
    7.92 +/*
    7.93 + * initially all events are without a handler and disabled
    7.94 + */
    7.95 +void init_events(void)
    7.96 +{
    7.97 +    int i;
    7.98 +
    7.99 +    /* inintialise event handler */
   7.100 +    for ( i = 0; i < NR_EVS; i++ )
   7.101 +    {
   7.102 +        ev_actions[i].status  = EVS_DISABLED;
   7.103 +        ev_actions[i].handler = NULL;
   7.104 +    }
   7.105 +}
   7.106 +
   7.107 +void default_handler(int ev, struct pt_regs *regs) {
   7.108 +    printk("X[%d] ", ev);
   7.109 +}
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/mini-os/h/events.h	Mon Oct 06 11:26:01 2003 +0000
     8.3 @@ -0,0 +1,53 @@
     8.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
     8.5 + ****************************************************************************
     8.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
     8.7 + ****************************************************************************
     8.8 + *
     8.9 + *        File: events.h
    8.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
    8.11 + *     Changes: 
    8.12 + *              
    8.13 + *        Date: Jul 2003
    8.14 + * 
    8.15 + * Environment: Xen Minimal OS
    8.16 + * Description: deal with events
    8.17 + *
    8.18 + ****************************************************************************
    8.19 + * $Id: h-insert.h,v 1.4 2002/11/08 16:03:55 rn Exp $
    8.20 + ****************************************************************************
    8.21 + */
    8.22 +
    8.23 +#ifndef _EVENTS_H_
    8.24 +#define _EVENTS_H_
    8.25 +
    8.26 +/* _EVENT_* are defined in hypervisor-if.h  */
    8.27 +#define EV_BLKDEV _EVENT_BLKDEV
    8.28 +#define EV_TIMER  _EVENT_TIMER
    8.29 +#define EV_DIE    _EVENT_DIE
    8.30 +#define EV_DEBUG  _EVENT_DEBUG
    8.31 +#define EV_NET    _EVENT_NET
    8.32 +#define EV_PS2    _EVENT_PS2
    8.33 +
    8.34 +#define NR_EVS (sizeof(HYPERVISOR_shared_info->events) * 8)
    8.35 +
    8.36 +/* ev handler status */
    8.37 +#define EVS_INPROGRESS	1	/* Event handler active - do not enter! */
    8.38 +#define EVS_DISABLED	2	/* Event disabled - do not enter! */
    8.39 +#define EVS_PENDING	    4	/* Event pending - replay on enable */
    8.40 +#define EVS_REPLAY	    8	/* Event has been replayed but not acked yet */
    8.41 +
    8.42 +/* this represents a event handler. Chaining or sharing is not allowed */
    8.43 +typedef struct _ev_action_t {
    8.44 +	void (*handler)(int, struct pt_regs *);
    8.45 +    unsigned int status;		/* IRQ status */
    8.46 +    u32 count;
    8.47 +} ev_action_t;
    8.48 +
    8.49 +/* prototypes */
    8.50 +unsigned int do_event(int ev, struct pt_regs *regs);
    8.51 +unsigned int add_ev_action( int ev, void (*handler)(int, struct pt_regs *) );
    8.52 +unsigned int enable_ev_action( int ev );
    8.53 +unsigned int disable_ev_action( int ev );
    8.54 +void init_events(void);
    8.55 +
    8.56 +#endif /* _EVENTS_H_ */
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/mini-os/h/hypervisor.h	Mon Oct 06 11:26:01 2003 +0000
     9.3 @@ -0,0 +1,266 @@
     9.4 +
     9.5 +/******************************************************************************
     9.6 + * hypervisor.h
     9.7 + * 
     9.8 + * Linux-specific hypervisor handling.
     9.9 + * 
    9.10 + * Adjusted by R Neugebauer for Xen minimal OS
    9.11 + *
    9.12 + * Copyright (c) 2002, K A Fraser
    9.13 + */
    9.14 +
    9.15 +#ifndef _HYPERVISOR_H_
    9.16 +#define _HYPERVISOR_H_
    9.17 +
    9.18 +#include <types.h>
    9.19 +
    9.20 +/* include the hypervisor interface */
    9.21 +#include <hypervisor-ifs/network.h>
    9.22 +#include <hypervisor-ifs/block.h>
    9.23 +#include <hypervisor-ifs/hypervisor-if.h>
    9.24 +
    9.25 +
    9.26 +/*
    9.27 + * a placeholder for the start of day information passed up from the hypervisor
    9.28 + */
    9.29 +union start_info_union
    9.30 +{
    9.31 +    start_info_t start_info;
    9.32 +    char padding[512];
    9.33 +};
    9.34 +extern union start_info_union start_info_union;
    9.35 +#define start_info (start_info_union.start_info)
    9.36 +
    9.37 +
    9.38 +/* hypervisor.c */
    9.39 +void do_hypervisor_callback(struct pt_regs *regs);
    9.40 +void enable_hypervisor_event(unsigned int ev);
    9.41 +void disable_hypervisor_event(unsigned int ev);
    9.42 +void ack_hypervisor_event(unsigned int ev);
    9.43 +
    9.44 +/*
    9.45 + * Assembler stubs for hyper-calls.
    9.46 + */
    9.47 +
    9.48 +static inline int HYPERVISOR_set_trap_table(trap_info_t *table)
    9.49 +{
    9.50 +    int ret;
    9.51 +    __asm__ __volatile__ (
    9.52 +        TRAP_INSTR
    9.53 +        : "=a" (ret) : "0" (__HYPERVISOR_set_trap_table),
    9.54 +        "b" (table) );
    9.55 +
    9.56 +    return ret;
    9.57 +}
    9.58 +
    9.59 +static inline int HYPERVISOR_pt_update(page_update_request_t *req, int count)
    9.60 +{
    9.61 +    int ret;
    9.62 +    __asm__ __volatile__ (
    9.63 +        TRAP_INSTR
    9.64 +        : "=a" (ret) : "0" (__HYPERVISOR_pt_update), 
    9.65 +        "b" (req), "c" (count) );
    9.66 +
    9.67 +    return ret;
    9.68 +}
    9.69 +
    9.70 +static inline int HYPERVISOR_console_write(const char *str, int count)
    9.71 +{
    9.72 +    int ret;
    9.73 +    __asm__ __volatile__ (
    9.74 +        TRAP_INSTR
    9.75 +        : "=a" (ret) : "0" (__HYPERVISOR_console_write), 
    9.76 +        "b" (str), "c" (count) );
    9.77 +
    9.78 +
    9.79 +    return ret;
    9.80 +}
    9.81 +
    9.82 +static inline int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
    9.83 +{
    9.84 +    int ret;
    9.85 +    __asm__ __volatile__ (
    9.86 +        TRAP_INSTR
    9.87 +        : "=a" (ret) : "0" (__HYPERVISOR_set_gdt), 
    9.88 +        "b" (frame_list), "c" (entries) );
    9.89 +
    9.90 +
    9.91 +    return ret;
    9.92 +}
    9.93 +
    9.94 +static inline int HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
    9.95 +{
    9.96 +    int ret;
    9.97 +    __asm__ __volatile__ (
    9.98 +        TRAP_INSTR
    9.99 +        : "=a" (ret) : "0" (__HYPERVISOR_stack_switch),
   9.100 +        "b" (ss), "c" (esp) : "memory" );
   9.101 +
   9.102 +    return ret;
   9.103 +}
   9.104 +
   9.105 +static inline int HYPERVISOR_set_callbacks(
   9.106 +    unsigned long event_selector, unsigned long event_address,
   9.107 +    unsigned long failsafe_selector, unsigned long failsafe_address)
   9.108 +{
   9.109 +    int ret;
   9.110 +    __asm__ __volatile__ (
   9.111 +        TRAP_INSTR
   9.112 +        : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
   9.113 +        "b" (event_selector), "c" (event_address), 
   9.114 +        "d" (failsafe_selector), "S" (failsafe_address) : "memory" );
   9.115 +
   9.116 +    return ret;
   9.117 +}
   9.118 +
   9.119 +static inline int HYPERVISOR_net_update(void)
   9.120 +{
   9.121 +    int ret;
   9.122 +    __asm__ __volatile__ (
   9.123 +        TRAP_INSTR
   9.124 +        : "=a" (ret) : "0" (__HYPERVISOR_net_update) );
   9.125 +
   9.126 +    return ret;
   9.127 +}
   9.128 +
   9.129 +static inline int HYPERVISOR_fpu_taskswitch(void)
   9.130 +{
   9.131 +    int ret;
   9.132 +    __asm__ __volatile__ (
   9.133 +        TRAP_INSTR
   9.134 +        : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) );
   9.135 +
   9.136 +    return ret;
   9.137 +}
   9.138 +
   9.139 +static inline int HYPERVISOR_yield(void)
   9.140 +{
   9.141 +    int ret;
   9.142 +    __asm__ __volatile__ (
   9.143 +        TRAP_INSTR
   9.144 +        : "=a" (ret) : "0" (__HYPERVISOR_yield) );
   9.145 +
   9.146 +    return ret;
   9.147 +}
   9.148 +
   9.149 +static inline int HYPERVISOR_exit(void)
   9.150 +{
   9.151 +    int ret;
   9.152 +    __asm__ __volatile__ (
   9.153 +        TRAP_INSTR
   9.154 +        : "=a" (ret) : "0" (__HYPERVISOR_exit) );
   9.155 +
   9.156 +    return ret;
   9.157 +}
   9.158 +
   9.159 +static inline int HYPERVISOR_dom0_op(void *dom0_op)
   9.160 +{
   9.161 +    int ret;
   9.162 +    __asm__ __volatile__ (
   9.163 +        TRAP_INSTR
   9.164 +        : "=a" (ret) : "0" (__HYPERVISOR_dom0_op),
   9.165 +        "b" (dom0_op) : "memory" );
   9.166 +
   9.167 +    return ret;
   9.168 +}
   9.169 +
   9.170 +static inline int HYPERVISOR_network_op(void *network_op)
   9.171 +{
   9.172 +    int ret;
   9.173 +    __asm__ __volatile__ (
   9.174 +        TRAP_INSTR
   9.175 +        : "=a" (ret) : "0" (__HYPERVISOR_network_op),
   9.176 +        "b" (network_op) );
   9.177 +
   9.178 +    return ret;
   9.179 +}
   9.180 +
   9.181 +static inline int HYPERVISOR_block_io_op(void)
   9.182 +{
   9.183 +    int ret;
   9.184 +    __asm__ __volatile__ (
   9.185 +        TRAP_INSTR
   9.186 +        : "=a" (ret) : "0" (__HYPERVISOR_block_io_op) ); 
   9.187 +
   9.188 +    return ret;
   9.189 +}
   9.190 +
   9.191 +static inline int HYPERVISOR_set_debugreg(int reg, unsigned long value)
   9.192 +{
   9.193 +    int ret;
   9.194 +    __asm__ __volatile__ (
   9.195 +        TRAP_INSTR
   9.196 +        : "=a" (ret) : "0" (__HYPERVISOR_set_debugreg),
   9.197 +        "b" (reg), "c" (value) );
   9.198 +
   9.199 +    return ret;
   9.200 +}
   9.201 +
   9.202 +static inline unsigned long HYPERVISOR_get_debugreg(int reg)
   9.203 +{
   9.204 +    unsigned long ret;
   9.205 +    __asm__ __volatile__ (
   9.206 +        TRAP_INSTR
   9.207 +        : "=a" (ret) : "0" (__HYPERVISOR_get_debugreg),
   9.208 +        "b" (reg) );
   9.209 +
   9.210 +    return ret;
   9.211 +}
   9.212 +
   9.213 +static inline int HYPERVISOR_update_descriptor(
   9.214 +    unsigned long pa, unsigned long word1, unsigned long word2)
   9.215 +{
   9.216 +    int ret;
   9.217 +    __asm__ __volatile__ (
   9.218 +        TRAP_INSTR
   9.219 +        : "=a" (ret) : "0" (__HYPERVISOR_update_descriptor), 
   9.220 +        "b" (pa), "c" (word1), "d" (word2) );
   9.221 +
   9.222 +    return ret;
   9.223 +}
   9.224 +
   9.225 +static inline int HYPERVISOR_set_fast_trap(int idx)
   9.226 +{
   9.227 +    int ret;
   9.228 +    __asm__ __volatile__ (
   9.229 +        TRAP_INSTR
   9.230 +        : "=a" (ret) : "0" (__HYPERVISOR_set_fast_trap), 
   9.231 +        "b" (idx) );
   9.232 +
   9.233 +    return ret;
   9.234 +}
   9.235 +
   9.236 +static inline int HYPERVISOR_dom_mem_op(void *dom_mem_op)
   9.237 +{
   9.238 +    int ret;
   9.239 +    __asm__ __volatile__ (
   9.240 +        TRAP_INSTR
   9.241 +        : "=a" (ret) : "0" (__HYPERVISOR_dom_mem_op),
   9.242 +        "b" (dom_mem_op) : "memory" );
   9.243 +
   9.244 +    return ret;
   9.245 +}
   9.246 +
   9.247 +static inline int HYPERVISOR_multicall(void *call_list, int nr_calls)
   9.248 +{
   9.249 +    int ret;
   9.250 +    __asm__ __volatile__ (
   9.251 +        TRAP_INSTR
   9.252 +        : "=a" (ret) : "0" (__HYPERVISOR_multicall),
   9.253 +        "b" (call_list), "c" (nr_calls) : "memory" );
   9.254 +
   9.255 +    return ret;
   9.256 +}
   9.257 +
   9.258 +static inline long HYPERVISOR_kbd_op(unsigned char op, unsigned char val)
   9.259 +{
   9.260 +    int ret;
   9.261 +    __asm__ __volatile__ (
   9.262 +        TRAP_INSTR
   9.263 +        : "=a" (ret) : "0" (__HYPERVISOR_kbd_op),
   9.264 +        "b" (op), "c" (val) );
   9.265 +
   9.266 +    return ret;
   9.267 +}
   9.268 +
   9.269 +#endif /* __HYPERVISOR_H__ */
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/mini-os/h/lib.h	Mon Oct 06 11:26:01 2003 +0000
    10.3 @@ -0,0 +1,129 @@
    10.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    10.5 + ****************************************************************************
    10.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
    10.7 + ****************************************************************************
    10.8 + *
    10.9 + *        File: lib.h
   10.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
   10.11 + *     Changes: 
   10.12 + *              
   10.13 + *        Date: Aug 2003
   10.14 + * 
   10.15 + * Environment: Xen Minimal OS
   10.16 + * Description: Random useful library functions, contains some freebsd stuff
   10.17 + *
   10.18 + ****************************************************************************
   10.19 + * $Id: h-insert.h,v 1.4 2002/11/08 16:03:55 rn Exp $
   10.20 + ****************************************************************************
   10.21 + *
   10.22 + *-
   10.23 + * Copyright (c) 1991, 1993
   10.24 + *      The Regents of the University of California.  All rights reserved.
   10.25 + *
   10.26 + * Redistribution and use in source and binary forms, with or without
   10.27 + * modification, are permitted provided that the following conditions
   10.28 + * are met:
   10.29 + * 1. Redistributions of source code must retain the above copyright
   10.30 + *    notice, this list of conditions and the following disclaimer.
   10.31 + * 2. Redistributions in binary form must reproduce the above copyright
   10.32 + *    notice, this list of conditions and the following disclaimer in the
   10.33 + *    documentation and/or other materials provided with the distribution.
   10.34 + * 3. All advertising materials mentioning features or use of this software
   10.35 + *    must display the following acknowledgement:
   10.36 + *      This product includes software developed by the University of
   10.37 + *      California, Berkeley and its contributors.
   10.38 + * 4. Neither the name of the University nor the names of its contributors
   10.39 + *    may be used to endorse or promote products derived from this software
   10.40 + *    without specific prior written permission.
   10.41 + *
   10.42 + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   10.43 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   10.44 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   10.45 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   10.46 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   10.47 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   10.48 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   10.49 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   10.50 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   10.51 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   10.52 + * SUCH DAMAGE.
   10.53 + *
   10.54 + *      @(#)stdarg.h    8.1 (Berkeley) 6/10/93
   10.55 + * $FreeBSD: src/sys/i386/include/stdarg.h,v 1.10 1999/08/28 00:44:26 peter Exp $
   10.56 + */
   10.57 +
   10.58 +#ifndef _LIB_H_
   10.59 +#define _LIB_H_
   10.60 +
   10.61 +
   10.62 +/* variadic function support */
   10.63 +typedef char *va_list;
   10.64 +#define __va_size(type) \
   10.65 +        (((sizeof(type) + sizeof(int) - 1) / sizeof(int)) * sizeof(int))
   10.66 +#ifdef __GNUC__
   10.67 +#define va_start(ap, last) \
   10.68 +        ((ap) = (va_list)__builtin_next_arg(last))
   10.69 +#else
   10.70 +#define va_start(ap, last) \
   10.71 +        ((ap) = (va_list)&(last) + __va_size(last))
   10.72 +#endif
   10.73 +#define va_arg(ap, type) \
   10.74 +        (*(type *)((ap) += __va_size(type), (ap) - __va_size(type)))
   10.75 +#define va_end(ap)
   10.76 +
   10.77 +
   10.78 +/* printing */
   10.79 +#define printk  printf
   10.80 +#define kprintf printf
   10.81 +int printf(const char *fmt, ...);
   10.82 +int vprintf(const char *fmt, va_list ap);
   10.83 +int sprintf(char *buf, const char *cfmt, ...);
   10.84 +int vsprintf(char *buf, const char *cfmt, va_list ap);
   10.85 +
   10.86 +/* string and memory manipulation */
   10.87 +int    memcmp(const void *cs, const void *ct, size_t count);
   10.88 +void  *memcpy(void *dest, const void *src, size_t count);
   10.89 +int    strncmp(const char *cs, const char *ct, size_t count);
   10.90 +int    strcmp(const char *cs, const char *ct);
   10.91 +char  *strcpy(char *dest, const char *src);
   10.92 +char  *strncpy(char *dest, const char *src, size_t count);
   10.93 +void  *memset(void *s,int c, size_t count);
   10.94 +size_t strnlen(const char *s, size_t count);
   10.95 +size_t strlen(const char *s);
   10.96 +char  *strchr(const char *s, int c);
   10.97 +char  *strstr(const char *s1, const char *s2);
   10.98 +
   10.99 +
  10.100 +/* dlmalloc functions */
  10.101 +struct mallinfo {
  10.102 +  int arena;    /* non-mmapped space allocated from system */
  10.103 +  int ordblks;  /* number of free chunks */
  10.104 +  int smblks;   /* number of fastbin blocks */
  10.105 +  int hblks;    /* number of mmapped regions */
  10.106 +  int hblkhd;   /* space in mmapped regions */
  10.107 +  int usmblks;  /* maximum total allocated space */
  10.108 +  int fsmblks;  /* space available in freed fastbin blocks */
  10.109 +  int uordblks; /* total allocated space */
  10.110 +  int fordblks; /* total free space */
  10.111 +  int keepcost; /* top-most, releasable (via malloc_trim) space */
  10.112 +};
  10.113 +
  10.114 +void *malloc(size_t n);
  10.115 +void *calloc(size_t n_elements, size_t element_size);
  10.116 +void  free(void* p);
  10.117 +void *realloc(void* p, size_t n);
  10.118 +void *memalign(size_t alignment, size_t n);
  10.119 +void *valloc(size_t n);
  10.120 +struct mallinfo mallinfo();
  10.121 +int  mallopt(int parameter_number, int parameter_value);
  10.122 +
  10.123 +void **independent_calloc(size_t n_elements, size_t size, void* chunks[]);
  10.124 +void **independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
  10.125 +void *pvalloc(size_t n);
  10.126 +void cfree(void* p);
  10.127 +int malloc_trim(size_t pad);
  10.128 +size_t malloc_usable_size(void* p);
  10.129 +void malloc_stats();
  10.130 +
  10.131 +
  10.132 +#endif /* _LIB_H_ */
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/mini-os/h/list.h	Mon Oct 06 11:26:01 2003 +0000
    11.3 @@ -0,0 +1,164 @@
    11.4 +#ifndef _LINUX_LIST_H
    11.5 +#define _LINUX_LIST_H
    11.6 +
    11.7 +#define ASSERT(x) ((void)0)
    11.8 +
    11.9 +/*
   11.10 + * Simple doubly linked list implementation.
   11.11 + *
   11.12 + * Some of the internal functions ("__xxx") are useful when
   11.13 + * manipulating whole lists rather than single entries, as
   11.14 + * sometimes we already know the next/prev entries and we can
   11.15 + * generate better code by using them directly rather than
   11.16 + * using the generic single-entry routines.
   11.17 + */
   11.18 +
   11.19 +struct list_head {
   11.20 +	struct list_head *next, *prev;
   11.21 +};
   11.22 +
   11.23 +#define LIST_HEAD_INIT(name) { &(name), &(name) }
   11.24 +
   11.25 +#define LIST_HEAD(name) \
   11.26 +	struct list_head name = LIST_HEAD_INIT(name)
   11.27 +
   11.28 +#define INIT_LIST_HEAD(ptr) do { \
   11.29 +	(ptr)->next = (ptr); (ptr)->prev = (ptr); \
   11.30 +} while (0)
   11.31 +
   11.32 +/*
   11.33 + * Insert a new entry between two known consecutive entries. 
   11.34 + *
   11.35 + * This is only for internal list manipulation where we know
   11.36 + * the prev/next entries already!
   11.37 + */
   11.38 +static __inline__ void __list_add(struct list_head * new,
   11.39 +	struct list_head * prev,
   11.40 +	struct list_head * next)
   11.41 +{
   11.42 +	next->prev = new;
   11.43 +	new->next = next;
   11.44 +	new->prev = prev;
   11.45 +	prev->next = new;
   11.46 +}
   11.47 +
   11.48 +/**
   11.49 + * list_add - add a new entry
   11.50 + * @new: new entry to be added
   11.51 + * @head: list head to add it after
   11.52 + *
   11.53 + * Insert a new entry after the specified head.
   11.54 + * This is good for implementing stacks.
   11.55 + */
   11.56 +static __inline__ void list_add(struct list_head *new, struct list_head *head)
   11.57 +{
   11.58 +	__list_add(new, head, head->next);
   11.59 +}
   11.60 +
   11.61 +/**
   11.62 + * list_add_tail - add a new entry
   11.63 + * @new: new entry to be added
   11.64 + * @head: list head to add it before
   11.65 + *
   11.66 + * Insert a new entry before the specified head.
   11.67 + * This is useful for implementing queues.
   11.68 + */
   11.69 +static __inline__ void list_add_tail(struct list_head *new, struct list_head *head)
   11.70 +{
   11.71 +	__list_add(new, head->prev, head);
   11.72 +}
   11.73 +
   11.74 +/*
   11.75 + * Delete a list entry by making the prev/next entries
   11.76 + * point to each other.
   11.77 + *
   11.78 + * This is only for internal list manipulation where we know
   11.79 + * the prev/next entries already!
   11.80 + */
   11.81 +static __inline__ void __list_del(struct list_head * prev,
   11.82 +				  struct list_head * next)
   11.83 +{
   11.84 +	next->prev = prev;
   11.85 +	prev->next = next;
   11.86 +}
   11.87 +
   11.88 +/**
   11.89 + * list_del - deletes entry from list.
   11.90 + * @entry: the element to delete from the list.
   11.91 + * Note: list_empty on entry does not return true after this, the entry is in an undefined state.
   11.92 + */
   11.93 +static __inline__ void list_del(struct list_head *entry)
   11.94 +{
   11.95 +	ASSERT(entry->next->prev == entry);
   11.96 +	ASSERT(entry->prev->next == entry);
   11.97 +	__list_del(entry->prev, entry->next);
   11.98 +}
   11.99 +
  11.100 +/**
  11.101 + * list_del_init - deletes entry from list and reinitialize it.
  11.102 + * @entry: the element to delete from the list.
  11.103 + */
  11.104 +static __inline__ void list_del_init(struct list_head *entry)
  11.105 +{
  11.106 +	__list_del(entry->prev, entry->next);
  11.107 +	INIT_LIST_HEAD(entry); 
  11.108 +}
  11.109 +
  11.110 +/**
  11.111 + * list_empty - tests whether a list is empty
  11.112 + * @head: the list to test.
  11.113 + */
  11.114 +static __inline__ int list_empty(struct list_head *head)
  11.115 +{
  11.116 +	return head->next == head;
  11.117 +}
  11.118 +
  11.119 +/**
  11.120 + * list_splice - join two lists
  11.121 + * @list: the new list to add.
  11.122 + * @head: the place to add it in the first list.
  11.123 + */
  11.124 +static __inline__ void list_splice(struct list_head *list, struct list_head *head)
  11.125 +{
  11.126 +	struct list_head *first = list->next;
  11.127 +
  11.128 +	if (first != list) {
  11.129 +		struct list_head *last = list->prev;
  11.130 +		struct list_head *at = head->next;
  11.131 +
  11.132 +		first->prev = head;
  11.133 +		head->next = first;
  11.134 +
  11.135 +		last->next = at;
  11.136 +		at->prev = last;
  11.137 +	}
  11.138 +}
  11.139 +
  11.140 +/**
  11.141 + * list_entry - get the struct for this entry
  11.142 + * @ptr:	the &struct list_head pointer.
  11.143 + * @type:	the type of the struct this is embedded in.
  11.144 + * @member:	the name of the list_struct within the struct.
  11.145 + */
  11.146 +#define list_entry(ptr, type, member) \
  11.147 +	((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
  11.148 +
  11.149 +/**
  11.150 + * list_for_each	-	iterate over a list
  11.151 + * @pos:	the &struct list_head to use as a loop counter.
  11.152 + * @head:	the head for your list.
  11.153 + */
  11.154 +#define list_for_each(pos, head) \
  11.155 +	for (pos = (head)->next; pos != (head); pos = pos->next)
  11.156 +        	
  11.157 +/**
  11.158 + * list_for_each_safe	-	iterate over a list safe against removal of list entry
  11.159 + * @pos:	the &struct list_head to use as a loop counter.
  11.160 + * @n:		another &struct list_head to use as temporary storage
  11.161 + * @head:	the head for your list.
  11.162 + */
  11.163 +#define list_for_each_safe(pos, n, head) \
  11.164 +	for (pos = (head)->next, n = pos->next; pos != (head); \
  11.165 +		pos = n, n = pos->next)
  11.166 +
  11.167 +#endif
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/mini-os/h/mm.h	Mon Oct 06 11:26:01 2003 +0000
    12.3 @@ -0,0 +1,107 @@
    12.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    12.5 + ****************************************************************************
    12.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
    12.7 + ****************************************************************************
    12.8 + *
    12.9 + *        File: mm.h
   12.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
   12.11 + *     Changes: 
   12.12 + *              
   12.13 + *        Date: Aug 2003
   12.14 + * 
   12.15 + * Environment: 
   12.16 + * Description: 
   12.17 + *
   12.18 + ****************************************************************************
   12.19 + * $Id: h-insert.h,v 1.4 2002/11/08 16:03:55 rn Exp $
   12.20 + ****************************************************************************
   12.21 + */
   12.22 +
   12.23 +#ifndef _MM_H_
   12.24 +#define _MM_H_
   12.25 +
   12.26 +/* PAGE_SHIFT determines the page size */
   12.27 +#define PAGE_SHIFT      12
   12.28 +#define PAGE_SIZE       (1UL << PAGE_SHIFT)
   12.29 +#define PAGE_MASK       (~(PAGE_SIZE-1))
   12.30 +
   12.31 +
   12.32 +#define PFN_UP(x)	(((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
   12.33 +#define PFN_DOWN(x)	((x) >> PAGE_SHIFT)
   12.34 +#define PFN_PHYS(x)	((x) << PAGE_SHIFT)
   12.35 +
   12.36 +
   12.37 +/* to align the pointer to the (next) page boundary */
   12.38 +#define PAGE_ALIGN(addr)        (((addr)+PAGE_SIZE-1)&PAGE_MASK)
   12.39 +
   12.40 +
   12.41 +extern unsigned long *phys_to_machine_mapping;
   12.42 +#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
   12.43 +#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
   12.44 +static inline unsigned long phys_to_machine(unsigned long phys)
   12.45 +{
   12.46 +    unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
   12.47 +    machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
   12.48 +    return machine;
   12.49 +}
   12.50 +static inline unsigned long machine_to_phys(unsigned long machine)
   12.51 +{
   12.52 +    unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
   12.53 +    phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
   12.54 +    return phys;
   12.55 +}
   12.56 +
   12.57 +/* VIRT <-> MACHINE conversion */
   12.58 +#define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
   12.59 +#define machine_to_virt(_m) (__va(machine_to_phys(_m)))
   12.60 +
   12.61 +/*
   12.62 + * This handles the memory map.. We could make this a config
   12.63 + * option, but too many people screw it up, and too few need
   12.64 + * it.
   12.65 + *
   12.66 + * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
   12.67 + * a virtual address space of one gigabyte, which limits the
   12.68 + * amount of physical memory you can use to about 950MB. 
   12.69 + *
   12.70 + * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
   12.71 + * and CONFIG_HIGHMEM64G options in the kernel configuration.
   12.72 + */
   12.73 +
   12.74 +#define __PAGE_OFFSET           (0xC0000000)
   12.75 +
   12.76 +#define PAGE_OFFSET             ((unsigned long)__PAGE_OFFSET)
   12.77 +#define __pa(x)                 ((unsigned long)(x)-PAGE_OFFSET)
   12.78 +#define __va(x)                 ((void *)((unsigned long)(x)+PAGE_OFFSET))
   12.79 +#define virt_to_page(kaddr)     (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
   12.80 +#define VALID_PAGE(page)        ((page - mem_map) < max_mapnr)
   12.81 +
   12.82 +#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_EXEC | \
   12.83 +                                 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
   12.84 +
   12.85 +
   12.86 +/* prototypes */
   12.87 +void init_mm();
   12.88 +void release_bytes_to_allocator(unsigned long min, unsigned long max);
   12.89 +unsigned long __get_free_pages(int order);
   12.90 +void __free_pages(unsigned long p, int order);
   12.91 +#define get_free_pages(_o) (__get_free_pages(_o))
   12.92 +#define get_free_page() (__get_free_pages(0))
   12.93 +#define free_pages(_p,_o) (__free_pages(_p,_o))
   12.94 +#define free_page(_p) (__free_pages(_p,0))
   12.95 +
   12.96 +static __inline__ int get_order(unsigned long size)
   12.97 +{
   12.98 +    int order;
   12.99 +    
  12.100 +    size = (size-1) >> (PAGE_SHIFT-1);
  12.101 +    order = -1;
  12.102 +    do {
  12.103 +        size >>= 1;
  12.104 +        order++;
  12.105 +    } while (size);
  12.106 +    return order;
  12.107 +}
  12.108 +
  12.109 +
  12.110 +#endif /* _MM_H_ */
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/mini-os/h/os.h	Mon Oct 06 11:26:01 2003 +0000
    13.3 @@ -0,0 +1,270 @@
    13.4 +/******************************************************************************
    13.5 + * os.h
    13.6 + * 
    13.7 + * random collection of macros and definition
    13.8 + */
    13.9 +
   13.10 +#ifndef _OS_H_
   13.11 +#define _OS_H_
   13.12 +
   13.13 +
   13.14 +#define NULL 0
   13.15 +
   13.16 +/*
   13.17 + * These are the segment descriptors provided for us by the hypervisor.
   13.18 + * For now, these are hardwired -- guest OSes cannot update the GDT
   13.19 + * or LDT.
   13.20 + * 
   13.21 + * It shouldn't be hard to support descriptor-table frobbing -- let me 
   13.22 + * know if the BSD or XP ports require flexibility here.
   13.23 + */
   13.24 +
   13.25 +
   13.26 +/*
   13.27 + * these are also defined in hypervisor-if.h but can't be pulled in as
   13.28 + * they are used in start of day assembly. Need to clean up the .h files
   13.29 + * a bit more...
   13.30 + */
   13.31 +
   13.32 +#ifndef FLAT_RING1_CS
   13.33 +#define FLAT_RING1_CS		0x0819
   13.34 +#define FLAT_RING1_DS		0x0821
   13.35 +#define FLAT_RING3_CS		0x082b
   13.36 +#define FLAT_RING3_DS		0x0833
   13.37 +#endif
   13.38 +
   13.39 +#define __KERNEL_CS        FLAT_RING1_CS
   13.40 +#define __KERNEL_DS        FLAT_RING1_DS
   13.41 +
   13.42 +/* Everything below this point is not included by assembler (.S) files. */
   13.43 +#ifndef __ASSEMBLY__
   13.44 +
   13.45 +#include <types.h>
   13.46 +#include <hypervisor-ifs/hypervisor-if.h>
   13.47 +
   13.48 +
   13.49 +/* this struct defines the way the registers are stored on the 
   13.50 +   stack during an exception or interrupt. */
   13.51 +struct pt_regs {
   13.52 +	long ebx;
   13.53 +	long ecx;
   13.54 +	long edx;
   13.55 +	long esi;
   13.56 +	long edi;
   13.57 +	long ebp;
   13.58 +	long eax;
   13.59 +	int  xds;
   13.60 +	int  xes;
   13.61 +	long orig_eax;
   13.62 +	long eip;
   13.63 +	int  xcs;
   13.64 +	long eflags;
   13.65 +	long esp;
   13.66 +	int  xss;
   13.67 +};
   13.68 +
   13.69 +
   13.70 +/*
   13.71 + * STI/CLI equivalents. These basically set and clear the virtual
   13.72 + * event_enable flag in teh shared_info structure. Note that when
   13.73 + * the enable bit is set, there may be pending events to be handled.
   13.74 + * We may therefore call into do_hypervisor_callback() directly.
   13.75 + */
   13.76 +#define unlikely(x)  __builtin_expect((x),0)
   13.77 +#define __save_flags(x)                                                       \
   13.78 +do {                                                                          \
   13.79 +    (x) = test_bit(EVENTS_MASTER_ENABLE_BIT,                                  \
   13.80 +                   &HYPERVISOR_shared_info->events_mask);                     \
   13.81 +    barrier();                                                                \
   13.82 +} while (0)
   13.83 +
   13.84 +#define __restore_flags(x)                                                    \
   13.85 +do {                                                                          \
   13.86 +    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
   13.87 +    if (x) set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);          \
   13.88 +    barrier();                                                                \
   13.89 +    if ( unlikely(_shared->events) && (x) ) do_hypervisor_callback(NULL);     \
   13.90 +} while (0)
   13.91 +
   13.92 +#define __cli()                                                               \
   13.93 +do {                                                                          \
   13.94 +    clear_bit(EVENTS_MASTER_ENABLE_BIT, &HYPERVISOR_shared_info->events_mask);\
   13.95 +    barrier();                                                                \
   13.96 +} while (0)
   13.97 +
   13.98 +#define __sti()                                                               \
   13.99 +do {                                                                          \
  13.100 +    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
  13.101 +    set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);                 \
  13.102 +    barrier();                                                                \
  13.103 +    if ( unlikely(_shared->events) ) do_hypervisor_callback(NULL);            \
  13.104 +} while (0)
  13.105 +#define cli() __cli()
  13.106 +#define sti() __sti()
  13.107 +#define save_flags(x) __save_flags(x)
  13.108 +#define restore_flags(x) __restore_flags(x)
  13.109 +#define save_and_cli(x) __save_and_cli(x)
  13.110 +#define save_and_sti(x) __save_and_sti(x)
  13.111 +
  13.112 +
  13.113 +
  13.114 +/* This is a barrier for the compiler only, NOT the processor! */
  13.115 +#define barrier() __asm__ __volatile__("": : :"memory")
  13.116 +
  13.117 +#define LOCK_PREFIX ""
  13.118 +#define LOCK ""
  13.119 +#define ADDR (*(volatile long *) addr)
  13.120 +/*
  13.121 + * Make sure gcc doesn't try to be clever and move things around
  13.122 + * on us. We need to use _exactly_ the address the user gave us,
  13.123 + * not some alias that contains the same information.
  13.124 + */
  13.125 +typedef struct { volatile int counter; } atomic_t;
  13.126 +
  13.127 +
  13.128 +/*
  13.129 + * This XCHG macro is straight from Linux. It is gross.
  13.130 + */
  13.131 +#define xchg(ptr,v) \
  13.132 +        ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
  13.133 +struct __xchg_dummy { unsigned long a[100]; };
  13.134 +#define __xg(x) ((struct __xchg_dummy *)(x))
  13.135 +static inline unsigned long __xchg(unsigned long x, volatile void * ptr,
  13.136 +                                   int size)
  13.137 +{
  13.138 +    switch (size) {
  13.139 +    case 1:
  13.140 +        __asm__ __volatile__("xchgb %b0,%1"
  13.141 +                             :"=q" (x)
  13.142 +                             :"m" (*__xg(ptr)), "0" (x)
  13.143 +                             :"memory");
  13.144 +        break;
  13.145 +    case 2:
  13.146 +        __asm__ __volatile__("xchgw %w0,%1"
  13.147 +                             :"=r" (x)
  13.148 +                             :"m" (*__xg(ptr)), "0" (x)
  13.149 +                             :"memory");
  13.150 +        break;
  13.151 +    case 4:
  13.152 +        __asm__ __volatile__("xchgl %0,%1"
  13.153 +                             :"=r" (x)
  13.154 +                             :"m" (*__xg(ptr)), "0" (x)
  13.155 +                             :"memory");
  13.156 +        break;
  13.157 +    }
  13.158 +    return x;
  13.159 +}
  13.160 +
  13.161 +/**
  13.162 + * test_and_clear_bit - Clear a bit and return its old value
  13.163 + * @nr: Bit to set
  13.164 + * @addr: Address to count from
  13.165 + *
  13.166 + * This operation is atomic and cannot be reordered.  
  13.167 + * It also implies a memory barrier.
  13.168 + */
  13.169 +static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  13.170 +{
  13.171 +        int oldbit;
  13.172 +
  13.173 +        __asm__ __volatile__( LOCK_PREFIX
  13.174 +                "btrl %2,%1\n\tsbbl %0,%0"
  13.175 +                :"=r" (oldbit),"=m" (ADDR)
  13.176 +                :"Ir" (nr) : "memory");
  13.177 +        return oldbit;
  13.178 +}
  13.179 +
  13.180 +static __inline__ int constant_test_bit(int nr, const volatile void * addr)
  13.181 +{
  13.182 +    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  13.183 +}
  13.184 +
  13.185 +static __inline__ int variable_test_bit(int nr, volatile void * addr)
  13.186 +{
  13.187 +    int oldbit;
  13.188 +    
  13.189 +    __asm__ __volatile__(
  13.190 +        "btl %2,%1\n\tsbbl %0,%0"
  13.191 +        :"=r" (oldbit)
  13.192 +        :"m" (ADDR),"Ir" (nr));
  13.193 +    return oldbit;
  13.194 +}
  13.195 +
  13.196 +#define test_bit(nr,addr) \
  13.197 +(__builtin_constant_p(nr) ? \
  13.198 + constant_test_bit((nr),(addr)) : \
  13.199 + variable_test_bit((nr),(addr)))
  13.200 +
  13.201 +
  13.202 +/**
  13.203 + * set_bit - Atomically set a bit in memory
  13.204 + * @nr: the bit to set
  13.205 + * @addr: the address to start counting from
  13.206 + *
  13.207 + * This function is atomic and may not be reordered.  See __set_bit()
  13.208 + * if you do not require the atomic guarantees.
  13.209 + * Note that @nr may be almost arbitrarily large; this function is not
  13.210 + * restricted to acting on a single-word quantity.
  13.211 + */
  13.212 +static __inline__ void set_bit(int nr, volatile void * addr)
  13.213 +{
  13.214 +        __asm__ __volatile__( LOCK_PREFIX
  13.215 +                "btsl %1,%0"
  13.216 +                :"=m" (ADDR)
  13.217 +                :"Ir" (nr));
  13.218 +}
  13.219 +
  13.220 +/**
  13.221 + * clear_bit - Clears a bit in memory
  13.222 + * @nr: Bit to clear
  13.223 + * @addr: Address to start counting from
  13.224 + *
  13.225 + * clear_bit() is atomic and may not be reordered.  However, it does
  13.226 + * not contain a memory barrier, so if it is used for locking purposes,
  13.227 + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  13.228 + * in order to ensure changes are visible on other processors.
  13.229 + */
  13.230 +static __inline__ void clear_bit(int nr, volatile void * addr)
  13.231 +{
  13.232 +        __asm__ __volatile__( LOCK_PREFIX
  13.233 +                "btrl %1,%0"
  13.234 +                :"=m" (ADDR)
  13.235 +                :"Ir" (nr));
  13.236 +}
  13.237 +
  13.238 +/**
  13.239 + * atomic_inc - increment atomic variable
  13.240 + * @v: pointer of type atomic_t
  13.241 + * 
  13.242 + * Atomically increments @v by 1.  Note that the guaranteed
  13.243 + * useful range of an atomic_t is only 24 bits.
  13.244 + */ 
  13.245 +static __inline__ void atomic_inc(atomic_t *v)
  13.246 +{
  13.247 +        __asm__ __volatile__(
  13.248 +                LOCK "incl %0"
  13.249 +                :"=m" (v->counter)
  13.250 +                :"m" (v->counter));
  13.251 +}
  13.252 +
  13.253 +
  13.254 +/* useful hypervisor macros */
  13.255 +
  13.256 +struct desc_struct {
  13.257 +        unsigned long a,b;
  13.258 +};
  13.259 +extern struct desc_struct default_ldt[];
  13.260 +
  13.261 +#define asmlinkage        __attribute__((regparm(0)))
  13.262 +
  13.263 +/*
  13.264 + * some random linux macros
  13.265 + */
  13.266 +
  13.267 +#define rdtscll(val) \
  13.268 +     __asm__ __volatile__("rdtsc" : "=A" (val))
  13.269 +
  13.270 +
  13.271 +#endif /* !__ASSEMBLY__ */
  13.272 +
  13.273 +#endif /* _OS_H_ */
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/mini-os/h/time.h	Mon Oct 06 11:26:01 2003 +0000
    14.3 @@ -0,0 +1,59 @@
    14.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    14.5 + ****************************************************************************
    14.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
    14.7 + ****************************************************************************
    14.8 + *
    14.9 + *        File: time.h
   14.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
   14.11 + *     Changes: 
   14.12 + *              
   14.13 + *        Date: Jul 2003
   14.14 + * 
   14.15 + * Environment: Xen Minimal OS
   14.16 + * Description: Time and timer functions
   14.17 + *
   14.18 + ****************************************************************************
   14.19 + * $Id: h-insert.h,v 1.4 2002/11/08 16:03:55 rn Exp $
   14.20 + ****************************************************************************
   14.21 + */
   14.22 +
   14.23 +#ifndef _TIME_H_
   14.24 +#define _TIME_H_
   14.25 +
   14.26 +#include <list.h>
   14.27 +
   14.28 +/*
   14.29 + * System Time
   14.30 + * 64 bit value containing the nanoseconds elapsed since boot time.
   14.31 + * This value is adjusted by frequency drift.
   14.32 + * NOW() returns the current time.
   14.33 + * The other macros are for convenience to approximate short intervals
   14.34 + * of real time into system time 
   14.35 + */
   14.36 +typedef s64 s_time_t;
   14.37 +#define NOW()                   ((s_time_t)get_s_time())
   14.38 +#define SECONDS(_s)             (((s_time_t)(_s))  * 1000000000UL )
   14.39 +#define TENTHS(_ts)             (((s_time_t)(_ts)) * 100000000UL )
   14.40 +#define HUNDREDTHS(_hs)         (((s_time_t)(_hs)) * 10000000UL )
   14.41 +#define MILLISECS(_ms)          (((s_time_t)(_ms)) * 1000000UL )
   14.42 +#define MICROSECS(_us)          (((s_time_t)(_us)) * 1000UL )
   14.43 +#define Time_Max                ((s_time_t) 0x7fffffffffffffffLL)
   14.44 +#define FOREVER                 Time_Max
   14.45 +
   14.46 +
   14.47 +/* wall clock time  */
   14.48 +typedef long time_t;
   14.49 +typedef long suseconds_t;
   14.50 +struct timeval {
   14.51 +	time_t		tv_sec;		/* seconds */
   14.52 +	suseconds_t	tv_usec;	/* microseconds */
   14.53 +};
   14.54 +
   14.55 +
   14.56 +/* prototypes */
   14.57 +void     init_time(void);
   14.58 +s_time_t get_s_time(void);
   14.59 +s_time_t get_v_time(void);
   14.60 +void     gettimeofday(struct timeval *tv);
   14.61 +
   14.62 +#endif /* _TIME_H_ */
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/mini-os/h/types.h	Mon Oct 06 11:26:01 2003 +0000
    15.3 @@ -0,0 +1,41 @@
    15.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    15.5 + ****************************************************************************
    15.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
    15.7 + ****************************************************************************
    15.8 + *
    15.9 + *        File: types.h
   15.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
   15.11 + *     Changes: 
   15.12 + *              
   15.13 + *        Date: May 2003
   15.14 + * 
   15.15 + * Environment: Xeno Minimal OS
   15.16 + * Description: a random collection of type definitions
   15.17 + *
   15.18 + ****************************************************************************
   15.19 + * $Id: h-insert.h,v 1.4 2002/11/08 16:03:55 rn Exp $
   15.20 + ****************************************************************************
   15.21 + */
   15.22 +
   15.23 +#ifndef _TYPES_H_
   15.24 +#define _TYPES_H_
   15.25 +
   15.26 +typedef signed char         s8;
   15.27 +typedef unsigned char       u8;
   15.28 +typedef signed short        s16;
   15.29 +typedef unsigned short      u16;
   15.30 +typedef signed int          s32;
   15.31 +typedef unsigned int        u32;
   15.32 +typedef signed long long    s64;
   15.33 +typedef unsigned long long  u64;
   15.34 +
   15.35 +typedef unsigned int        size_t;
   15.36 +
   15.37 +/* FreeBSD compat types */
   15.38 +typedef unsigned char       u_char;
   15.39 +typedef unsigned int        u_int;
   15.40 +typedef unsigned long       u_long;
   15.41 +typedef long long           quad_t;
   15.42 +typedef unsigned long long  u_quad_t;
   15.43 +typedef unsigned int        uintptr_t;
   15.44 +#endif /* _TYPES_H_ */
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/mini-os/head.S	Mon Oct 06 11:26:01 2003 +0000
    16.3 @@ -0,0 +1,46 @@
    16.4 +#include <os.h>
    16.5 +
    16.6 +/* Offsets in start_info structure */
    16.7 +#define SHARED_INFO  4
    16.8 +#define MOD_START   12
    16.9 +#define MOD_LEN     16
   16.10 +
   16.11 +#define ENTRY(X) .globl X ; X :
   16.12 +
   16.13 +.globl _start                
   16.14 +_start:
   16.15 +        cld
   16.16 +        
   16.17 +        lss stack_start,%esp
   16.18 +        
   16.19 +        /* Copy any module somewhere safe before it's clobbered by BSS. */
   16.20 +        mov  MOD_LEN(%esi),%ecx
   16.21 +        shr  $2,%ecx
   16.22 +        jz   2f        /* bail from copy loop if no module */
   16.23 +        
   16.24 +        mov  $_end,%edi
   16.25 +        add  MOD_LEN(%esi),%edi
   16.26 +        mov  MOD_START(%esi),%eax
   16.27 +        add  MOD_LEN(%esi),%eax
   16.28 +1:      sub  $4,%eax
   16.29 +        sub  $4,%edi
   16.30 +        mov  (%eax),%ebx
   16.31 +        mov  %ebx,(%edi)
   16.32 +        loop 1b
   16.33 +        mov  %edi,MOD_START(%esi)
   16.34 +
   16.35 +        /* Clear BSS first so that there are no surprises... */
   16.36 +2:      xorl %eax,%eax
   16.37 +	    movl $__bss_start,%edi
   16.38 +	    movl $_end,%ecx
   16.39 +	    subl %edi,%ecx
   16.40 +	    rep stosb
   16.41 +
   16.42 +        push %esi 
   16.43 +        call start_kernel
   16.44 +
   16.45 +
   16.46 +stack_start:
   16.47 +	.long stack+8192, __KERNEL_DS
   16.48 +
   16.49 +
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/mini-os/hypervisor.c	Mon Oct 06 11:26:01 2003 +0000
    17.3 @@ -0,0 +1,83 @@
    17.4 +/******************************************************************************
    17.5 + * hypervisor.c
    17.6 + * 
    17.7 + * Communication to/from hypervisor.
    17.8 + *
    17.9 + * Copied from XenoLinux and adjusted by Rolf.Neugebauer@intel.com
   17.10 + * 
   17.11 + * Copyright (c) 2002, K A Fraser
   17.12 + */
   17.13 +
   17.14 +#include <os.h>
   17.15 +#include <hypervisor.h>
   17.16 +
   17.17 +static unsigned long event_mask = 0;
   17.18 +static unsigned long ev_err_count;
   17.19 +
   17.20 +void do_hypervisor_callback(struct pt_regs *regs)
   17.21 +{
   17.22 +    unsigned long events, flags;
   17.23 +    shared_info_t *shared = HYPERVISOR_shared_info;
   17.24 +
   17.25 +    do {
   17.26 +        /* Specialised local_irq_save(). */
   17.27 +        flags = test_and_clear_bit(EVENTS_MASTER_ENABLE_BIT, 
   17.28 +                                   &shared->events_mask);
   17.29 +        barrier();
   17.30 +
   17.31 +        events  = xchg(&shared->events, 0);
   17.32 +        events &= event_mask;
   17.33 +
   17.34 +        /* 'events' now contains some pending events to handle. */
   17.35 +        __asm__ __volatile__ (
   17.36 +            "   push %1                            ;"
   17.37 +            "   sub  $4,%%esp                      ;"
   17.38 +            "   jmp  2f                            ;"
   17.39 +            "1: btrl %%eax,%0                      ;" /* clear bit     */
   17.40 +            "   mov  %%eax,(%%esp)                 ;"
   17.41 +            "   call do_event                      ;" /* do_event(event) */
   17.42 +            "2: bsfl %0,%%eax                      ;" /* %eax == bit # */
   17.43 +            "   jnz  1b                            ;"
   17.44 +            "   add  $8,%%esp                      ;"
   17.45 +            /* we use %ebx because it is callee-saved */
   17.46 +            : : "b" (events), "r" (regs)
   17.47 +            /* clobbered by callback function calls */
   17.48 +            : "eax", "ecx", "edx", "memory" ); 
   17.49 +
   17.50 +        /* Specialised local_irq_restore(). */
   17.51 +        if ( flags ) set_bit(EVENTS_MASTER_ENABLE_BIT, &shared->events_mask);
   17.52 +        barrier();
   17.53 +    }
   17.54 +    while ( shared->events );
   17.55 +}
   17.56 +
   17.57 +
   17.58 +
   17.59 +/*
   17.60 + * Define interface to generic handling in irq.c
   17.61 + */
   17.62 +
   17.63 +void enable_hypervisor_event(unsigned int ev)
   17.64 +{
   17.65 +    set_bit(ev, &event_mask);
   17.66 +    set_bit(ev, &HYPERVISOR_shared_info->events_mask);
   17.67 +    if ( test_bit(EVENTS_MASTER_ENABLE_BIT, 
   17.68 +                  &HYPERVISOR_shared_info->events_mask) )
   17.69 +        do_hypervisor_callback(NULL);
   17.70 +}
   17.71 +
   17.72 +void disable_hypervisor_event(unsigned int ev)
   17.73 +{
   17.74 +    clear_bit(ev, &event_mask);
   17.75 +    clear_bit(ev, &HYPERVISOR_shared_info->events_mask);
   17.76 +}
   17.77 +
   17.78 +void ack_hypervisor_event(unsigned int ev)
   17.79 +{
   17.80 +    if ( !(event_mask & (1<<ev)) )
   17.81 +    {
   17.82 +        //printk("Unexpected hypervisor event %d\n", ev);
   17.83 +        atomic_inc((atomic_t *)&ev_err_count);
   17.84 +    }
   17.85 +    set_bit(ev, &HYPERVISOR_shared_info->events_mask);
   17.86 +}
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/mini-os/kernel.c	Mon Oct 06 11:26:01 2003 +0000
    18.3 @@ -0,0 +1,115 @@
    18.4 +/******************************************************************************
    18.5 + * kernel.c
    18.6 + * 
    18.7 + * Assorted crap goes here, including the initial C entry point, jumped at
    18.8 + * from head.S.
    18.9 + */
   18.10 +
   18.11 +#include <os.h>
   18.12 +#include <hypervisor.h>
   18.13 +#include <mm.h>
   18.14 +#include <events.h>
   18.15 +#include <time.h>
   18.16 +#include <types.h>
   18.17 +#include <lib.h>
   18.18 +
   18.19 +/*
   18.20 + * Shared page for communicating with the hypervisor.
   18.21 + * Events flags go here, for example.
   18.22 + */
   18.23 +shared_info_t *HYPERVISOR_shared_info;
   18.24 +
   18.25 +/*
   18.26 + * This structure contains start-of-day info, such as pagetable base pointer,
   18.27 + * address of the shared_info structure, and things like that.
   18.28 + */
   18.29 +union start_info_union start_info_union;
   18.30 +
   18.31 +/*
   18.32 + * Just allocate the kernel stack here. SS:ESP is set up to point here
   18.33 + * in head.S.
   18.34 + */
   18.35 +char stack[8192];
   18.36 +
   18.37 +
   18.38 +/* Assembler interface fns in entry.S. */
   18.39 +void hypervisor_callback(void);
   18.40 +void failsafe_callback(void);
   18.41 +
   18.42 +/* default exit event handler */
   18.43 +static void exit_handler(int ev, struct pt_regs *regs);
   18.44 +
   18.45 +/*
   18.46 + * INITIAL C ENTRY POINT.
   18.47 + */
   18.48 +void start_kernel(start_info_t *si)
   18.49 +{
   18.50 +    int i;
   18.51 +
   18.52 +    /* Copy the start_info struct to a globally-accessible area. */
   18.53 +    memcpy(&start_info, si, sizeof(*si));
   18.54 +
   18.55 +    /* Grab the shared_info pointer and put it in a safe place. */
   18.56 +    HYPERVISOR_shared_info = start_info.shared_info;
   18.57 +
   18.58 +    /* Set up event and failsafe callback addresses. */
   18.59 +    HYPERVISOR_set_callbacks(
   18.60 +        __KERNEL_CS, (unsigned long)hypervisor_callback,
   18.61 +        __KERNEL_CS, (unsigned long)failsafe_callback);
   18.62 +
   18.63 +
   18.64 +    /* ENABLE EVENT DELIVERY. This is disabled at start of day. */
   18.65 +    __sti();
   18.66 +    
   18.67 +    /* print out some useful information  */
   18.68 +    printk("Xeno Minimal OS!\n");
   18.69 +    printk("start_info:   %p\n",  si);
   18.70 +    printk("  nr_pages:   %lu",   si->nr_pages);
   18.71 +    printk("  shared_inf: %p\n",  si->shared_info);
   18.72 +    printk("  pt_base:    %p",    (void *)si->pt_base); 
   18.73 +    printk("  mod_start:  0x%lx\n", si->mod_start);
   18.74 +    printk("  mod_len:    %lu\n", si->mod_len); 
   18.75 +    printk("  net_rings: ");
   18.76 +    for (i = 0; i < MAX_DOMAIN_VIFS; i++) {
   18.77 +        printk(" %lx", si->net_rings[i]);
   18.78 +    }; printk("\n");
   18.79 +    printk("  blk_ring:   0x%lx\n", si->blk_ring);
   18.80 +    printk("  dom_id:     %d\n",  si->dom_id);
   18.81 +    printk("  flags:      0x%lx\n", si->flags);
   18.82 +    printk("  cmd_line:   %s\n",  si->cmd_line ? (const char *)si->cmd_line : "NULL");
   18.83 +
   18.84 +    /* init memory management */
   18.85 +    init_mm();
   18.86 +
   18.87 +    /* set up events */
   18.88 +    init_events();
   18.89 +
   18.90 +    /* install some handlers */
   18.91 +    add_ev_action(EV_DIE, &exit_handler);
   18.92 +    enable_ev_action(EV_DIE);
   18.93 +    enable_hypervisor_event(EV_DIE);
   18.94 +
   18.95 +    /* init time and timers */
   18.96 +    init_time();
   18.97 +
   18.98 +    /* do nothing */
   18.99 +    for ( ; ; ) HYPERVISOR_yield();
  18.100 +}
  18.101 +
  18.102 +
  18.103 +/*
  18.104 + * do_exit: This is called whenever an IRET fails in entry.S.
  18.105 + * This will generally be because an application has got itself into
  18.106 + * a really bad state (probably a bad CS or SS). It must be killed.
  18.107 + * Of course, minimal OS doesn't have applications :-)
  18.108 + */
  18.109 +
  18.110 +void do_exit(void)
  18.111 +{
  18.112 +    printk("do_exit called!\n");
  18.113 +    for ( ;; ) ;
  18.114 +}
  18.115 +static void exit_handler(int ev, struct pt_regs *regs) {
  18.116 +    do_exit();
  18.117 +}
  18.118 +
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/mini-os/lib/malloc.c	Mon Oct 06 11:26:01 2003 +0000
    19.3 @@ -0,0 +1,5700 @@
    19.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    19.5 + ****************************************************************************
    19.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
    19.7 + ****************************************************************************
    19.8 + *
    19.9 + *        File: malloc.c
   19.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
   19.11 + *     Changes: 
   19.12 + *              
   19.13 + *        Date: Aug 2003
   19.14 + * 
   19.15 + * Environment: Xen Minimal OS
   19.16 + * Description: Library functions, maloc at al
   19.17 + *
   19.18 + ****************************************************************************
   19.19 + * $Id: c-insert.c,v 1.7 2002/11/08 16:04:34 rn Exp $
   19.20 + ****************************************************************************
   19.21 + */
   19.22 +
   19.23 +#include <os.h>
   19.24 +#include <mm.h>
   19.25 +#include <types.h>
   19.26 +#include <lib.h>
   19.27 +
   19.28 +/* standard compile option */
   19.29 +#define HAVE_MEMCOPY                1
   19.30 +#define USE_MEMCPY                  1
   19.31 +#undef  HAVE_MMAP
   19.32 +#undef  MMAP_CLEARS
   19.33 +#undef  HAVE_MREMAP
   19.34 +#define malloc_getpagesize          PAGE_SIZE
   19.35 +#undef  HAVE_USR_INCLUDE_MALLOC_H   
   19.36 +#define LACKS_UNISTD_H              1
   19.37 +#define LACKS_SYS_PARAM_H           1
   19.38 +#define LACKS_SYS_MMAN_H            1
   19.39 +#define LACKS_FCNTL_H               1
   19.40 +
   19.41 +
   19.42 +/* page allocator interface */
   19.43 +#define MORECORE             more_core
   19.44 +#define MORECORE_CONTIGUOUS  0
   19.45 +#define MORECORE_FAILURE     0
   19.46 +#define MORECORE_CANNOT_TRIM 1
   19.47 +
   19.48 +static void *more_core(size_t n)
   19.49 +{
   19.50 +    static void *last;
   19.51 +    unsigned long order, num_pages;
   19.52 +    void *ret;
   19.53 +
   19.54 +    if (n == 0)
   19.55 +        return last;
   19.56 +    
   19.57 +    /* get pages */
   19.58 +    order = get_order(n);
   19.59 +    ret = (void *)get_free_pages(order);
   19.60 +
   19.61 +    /* work out pointer to end of chunk */
   19.62 +    if (ret) {
   19.63 +        num_pages = 1 << order;
   19.64 +        last = ret + (num_pages * PAGE_SIZE);
   19.65 +    }
   19.66 +    
   19.67 +    //printk("malloc(%lu) -> o=%lu r=%p, l=%p", n, order, ret, last);
   19.68 +
   19.69 +    return ret;      
   19.70 +}
   19.71 +
   19.72 +/* other options commented out below */
   19.73 +#define __STD_C     1
   19.74 +#define Void_t      void
   19.75 +#define assert(x) ((void)0)
   19.76 +
   19.77 +#define CHUNK_SIZE_T unsigned long
   19.78 +#define PTR_UINT unsigned long
   19.79 +#define INTERNAL_SIZE_T size_t
   19.80 +#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
   19.81 +#define MALLOC_ALIGNMENT       (2 * SIZE_SZ)
   19.82 +#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
   19.83 +#define TRIM_FASTBINS  0
   19.84 +
   19.85 +#define M_MXFAST            1    
   19.86 +#define DEFAULT_MXFAST     64
   19.87 +#define M_TRIM_THRESHOLD       -1
   19.88 +#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
   19.89 +#define M_TOP_PAD              -2
   19.90 +#define DEFAULT_TOP_PAD        (0)
   19.91 +#define M_MMAP_THRESHOLD      -3
   19.92 +#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
   19.93 +#define M_MMAP_MAX             -4
   19.94 +#define DEFAULT_MMAP_MAX       (0)
   19.95 +#define MALLOC_FAILURE_ACTION   printf("malloc failure\n")
   19.96 +
   19.97 +#define cALLOc      public_cALLOc
   19.98 +#define fREe        public_fREe
   19.99 +#define cFREe       public_cFREe
  19.100 +#define mALLOc      public_mALLOc
  19.101 +#define mEMALIGn    public_mEMALIGn
  19.102 +#define rEALLOc     public_rEALLOc
  19.103 +#define vALLOc      public_vALLOc
  19.104 +#define pVALLOc     public_pVALLOc
  19.105 +#define mALLINFo    public_mALLINFo
  19.106 +#define mALLOPt     public_mALLOPt
  19.107 +#define mTRIm       public_mTRIm
  19.108 +#define mSTATs      public_mSTATs
  19.109 +#define mUSABLe     public_mUSABLe
  19.110 +#define iCALLOc     public_iCALLOc
  19.111 +#define iCOMALLOc   public_iCOMALLOc
  19.112 +
  19.113 +#define public_cALLOc    calloc
  19.114 +#define public_fREe      free
  19.115 +#define public_cFREe     cfree
  19.116 +#define public_mALLOc    malloc
  19.117 +#define public_mEMALIGn  memalign
  19.118 +#define public_rEALLOc   realloc
  19.119 +#define public_vALLOc    valloc
  19.120 +#define public_pVALLOc   pvalloc
  19.121 +#define public_mALLINFo  mallinfo
  19.122 +#define public_mALLOPt   mallopt
  19.123 +#define public_mTRIm     malloc_trim
  19.124 +#define public_mSTATs    malloc_stats
  19.125 +#define public_mUSABLe   malloc_usable_size
  19.126 +#define public_iCALLOc   independent_calloc
  19.127 +#define public_iCOMALLOc independent_comalloc
  19.128 +
  19.129 +
  19.130 +/*
  19.131 +  This is a version (aka dlmalloc) of malloc/free/realloc written by
  19.132 +  Doug Lea and released to the public domain.  Use, modify, and
  19.133 +  redistribute this code without permission or acknowledgement in any
  19.134 +  way you wish.  Send questions, comments, complaints, performance
  19.135 +  data, etc to dl@cs.oswego.edu
  19.136 +
  19.137 +* VERSION 2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
  19.138 +
  19.139 +   Note: There may be an updated version of this malloc obtainable at
  19.140 +           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
  19.141 +         Check before installing!
  19.142 +
  19.143 +* Quickstart
  19.144 +
  19.145 +  This library is all in one file to simplify the most common usage:
  19.146 +  ftp it, compile it (-O), and link it into another program. All
  19.147 +  of the compile-time options default to reasonable values for use on
  19.148 +  most unix platforms. Compile -DWIN32 for reasonable defaults on windows.
  19.149 +  You might later want to step through various compile-time and dynamic
  19.150 +  tuning options.
  19.151 +
  19.152 +  For convenience, an include file for code using this malloc is at:
  19.153 +     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.7.1.h
  19.154 +  You don't really need this .h file unless you call functions not
  19.155 +  defined in your system include files.  The .h file contains only the
  19.156 +  excerpts from this file needed for using this malloc on ANSI C/C++
  19.157 +  systems, so long as you haven't changed compile-time options about
  19.158 +  naming and tuning parameters.  If you do, then you can create your
  19.159 +  own malloc.h that does include all settings by cutting at the point
  19.160 +  indicated below.
  19.161 +
  19.162 +* Why use this malloc?
  19.163 +
  19.164 +  This is not the fastest, most space-conserving, most portable, or
  19.165 +  most tunable malloc ever written. However it is among the fastest
  19.166 +  while also being among the most space-conserving, portable and tunable.
  19.167 +  Consistent balance across these factors results in a good general-purpose
  19.168 +  allocator for malloc-intensive programs.
  19.169 +
  19.170 +  The main properties of the algorithms are:
  19.171 +  * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
  19.172 +    with ties normally decided via FIFO (i.e. least recently used).
  19.173 +  * For small (<= 64 bytes by default) requests, it is a caching
  19.174 +    allocator, that maintains pools of quickly recycled chunks.
  19.175 +  * In between, and for combinations of large and small requests, it does
  19.176 +    the best it can trying to meet both goals at once.
  19.177 +  * For very large requests (>= 128KB by default), it relies on system
  19.178 +    memory mapping facilities, if supported.
  19.179 +
  19.180 +  For a longer but slightly out of date high-level description, see
  19.181 +     http://gee.cs.oswego.edu/dl/html/malloc.html
  19.182 +
  19.183 +  You may already by default be using a C library containing a malloc
  19.184 +  that is  based on some version of this malloc (for example in
  19.185 +  linux). You might still want to use the one in this file in order to
  19.186 +  customize settings or to avoid overheads associated with library
  19.187 +  versions.
  19.188 +
  19.189 +* Contents, described in more detail in "description of public routines" below.
  19.190 +
  19.191 +  Standard (ANSI/SVID/...)  functions:
  19.192 +    malloc(size_t n);
  19.193 +    calloc(size_t n_elements, size_t element_size);
  19.194 +    free(Void_t* p);
  19.195 +    realloc(Void_t* p, size_t n);
  19.196 +    memalign(size_t alignment, size_t n);
  19.197 +    valloc(size_t n);
  19.198 +    mallinfo()
  19.199 +    mallopt(int parameter_number, int parameter_value)
  19.200 +
  19.201 +  Additional functions:
  19.202 +    independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
  19.203 +    independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
  19.204 +    pvalloc(size_t n);
  19.205 +    cfree(Void_t* p);
  19.206 +    malloc_trim(size_t pad);
  19.207 +    malloc_usable_size(Void_t* p);
  19.208 +    malloc_stats();
  19.209 +
  19.210 +* Vital statistics:
  19.211 +
  19.212 +  Supported pointer representation:       4 or 8 bytes
  19.213 +  Supported size_t  representation:       4 or 8 bytes 
  19.214 +       Note that size_t is allowed to be 4 bytes even if pointers are 8.
  19.215 +       You can adjust this by defining INTERNAL_SIZE_T
  19.216 +
  19.217 +  Alignment:                              2 * sizeof(size_t) (default)
  19.218 +       (i.e., 8 byte alignment with 4byte size_t). This suffices for
  19.219 +       nearly all current machines and C compilers. However, you can
  19.220 +       define MALLOC_ALIGNMENT to be wider than this if necessary.
  19.221 +
  19.222 +  Minimum overhead per allocated chunk:   4 or 8 bytes
  19.223 +       Each malloced chunk has a hidden word of overhead holding size
  19.224 +       and status information.
  19.225 +
  19.226 +  Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
  19.227 +                          8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
  19.228 +
  19.229 +       When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
  19.230 +       ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
  19.231 +       needed; 4 (8) for a trailing size field and 8 (16) bytes for
  19.232 +       free list pointers. Thus, the minimum allocatable size is
  19.233 +       16/24/32 bytes.
  19.234 +
  19.235 +       Even a request for zero bytes (i.e., malloc(0)) returns a
  19.236 +       pointer to something of the minimum allocatable size.
  19.237 +
  19.238 +       The maximum overhead wastage (i.e., number of extra bytes
  19.239 +       allocated than were requested in malloc) is less than or equal
  19.240 +       to the minimum size, except for requests >= mmap_threshold that
  19.241 +       are serviced via mmap(), where the worst case wastage is 2 *
  19.242 +       sizeof(size_t) bytes plus the remainder from a system page (the
  19.243 +       minimal mmap unit); typically 4096 or 8192 bytes.
  19.244 +
  19.245 +  Maximum allocated size:  4-byte size_t: 2^32 minus about two pages 
  19.246 +                           8-byte size_t: 2^64 minus about two pages
  19.247 +
  19.248 +       It is assumed that (possibly signed) size_t values suffice to
  19.249 +       represent chunk sizes. `Possibly signed' is due to the fact
  19.250 +       that `size_t' may be defined on a system as either a signed or
  19.251 +       an unsigned type. The ISO C standard says that it must be
  19.252 +       unsigned, but a few systems are known not to adhere to this.
  19.253 +       Additionally, even when size_t is unsigned, sbrk (which is by
  19.254 +       default used to obtain memory from system) accepts signed
  19.255 +       arguments, and may not be able to handle size_t-wide arguments
  19.256 +       with negative sign bit.  Generally, values that would
  19.257 +       appear as negative after accounting for overhead and alignment
  19.258 +       are supported only via mmap(), which does not have this
  19.259 +       limitation.
  19.260 +
  19.261 +       Requests for sizes outside the allowed range will perform an optional
  19.262 +       failure action and then return null. (Requests may also
  19.263 +       also fail because a system is out of memory.)
  19.264 +
  19.265 +  Thread-safety: NOT thread-safe unless USE_MALLOC_LOCK defined
  19.266 +
  19.267 +       When USE_MALLOC_LOCK is defined, wrappers are created to
  19.268 +       surround every public call with either a pthread mutex or
  19.269 +       a win32 spinlock (depending on WIN32). This is not
  19.270 +       especially fast, and can be a major bottleneck.
  19.271 +       It is designed only to provide minimal protection
  19.272 +       in concurrent environments, and to provide a basis for
  19.273 +       extensions.  If you are using malloc in a concurrent program,
  19.274 +       you would be far better off obtaining ptmalloc, which is
  19.275 +       derived from a version of this malloc, and is well-tuned for
  19.276 +       concurrent programs. (See http://www.malloc.de) Note that
  19.277 +       even when USE_MALLOC_LOCK is defined, you can can guarantee
  19.278 +       full thread-safety only if no threads acquire memory through 
  19.279 +       direct calls to MORECORE or other system-level allocators.
  19.280 +
  19.281 +  Compliance: I believe it is compliant with the 1997 Single Unix Specification
  19.282 +       (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably 
  19.283 +       others as well.
  19.284 +
  19.285 +* Synopsis of compile-time options:
  19.286 +
  19.287 +    People have reported using previous versions of this malloc on all
  19.288 +    versions of Unix, sometimes by tweaking some of the defines
  19.289 +    below. It has been tested most extensively on Solaris and
  19.290 +    Linux. It is also reported to work on WIN32 platforms.
  19.291 +    People also report using it in stand-alone embedded systems.
  19.292 +
  19.293 +    The implementation is in straight, hand-tuned ANSI C.  It is not
  19.294 +    at all modular. (Sorry!)  It uses a lot of macros.  To be at all
  19.295 +    usable, this code should be compiled using an optimizing compiler
  19.296 +    (for example gcc -O3) that can simplify expressions and control
  19.297 +    paths. (FAQ: some macros import variables as arguments rather than
  19.298 +    declare locals because people reported that some debuggers
  19.299 +    otherwise get confused.)
  19.300 +
  19.301 +    OPTION                     DEFAULT VALUE
  19.302 +
  19.303 +    Compilation Environment options:
  19.304 +
  19.305 +    __STD_C                    derived from C compiler defines
  19.306 +    WIN32                      NOT defined
  19.307 +    HAVE_MEMCPY                defined
  19.308 +    USE_MEMCPY                 1 if HAVE_MEMCPY is defined
  19.309 +    HAVE_MMAP                  defined as 1 
  19.310 +    MMAP_CLEARS                1
  19.311 +    HAVE_MREMAP                0 unless linux defined
  19.312 +    malloc_getpagesize         derived from system #includes, or 4096 if not
  19.313 +    HAVE_USR_INCLUDE_MALLOC_H  NOT defined
  19.314 +    LACKS_UNISTD_H             NOT defined unless WIN32
  19.315 +    LACKS_SYS_PARAM_H          NOT defined unless WIN32
  19.316 +    LACKS_SYS_MMAN_H           NOT defined unless WIN32
  19.317 +    LACKS_FCNTL_H              NOT defined
  19.318 +
  19.319 +    Changing default word sizes:
  19.320 +
  19.321 +    INTERNAL_SIZE_T            size_t
  19.322 +    MALLOC_ALIGNMENT           2 * sizeof(INTERNAL_SIZE_T)
  19.323 +    PTR_UINT                   unsigned long
  19.324 +    CHUNK_SIZE_T               unsigned long
  19.325 +
  19.326 +    Configuration and functionality options:
  19.327 +
  19.328 +    USE_DL_PREFIX              NOT defined
  19.329 +    USE_PUBLIC_MALLOC_WRAPPERS NOT defined
  19.330 +    USE_MALLOC_LOCK            NOT defined
  19.331 +    DEBUG                      NOT defined
  19.332 +    REALLOC_ZERO_BYTES_FREES   NOT defined
  19.333 +    MALLOC_FAILURE_ACTION      errno = ENOMEM, if __STD_C defined, else no-op
  19.334 +    TRIM_FASTBINS              0
  19.335 +    FIRST_SORTED_BIN_SIZE      512
  19.336 +
  19.337 +    Options for customizing MORECORE:
  19.338 +
  19.339 +    MORECORE                   sbrk
  19.340 +    MORECORE_CONTIGUOUS        1 
  19.341 +    MORECORE_CANNOT_TRIM       NOT defined
  19.342 +    MMAP_AS_MORECORE_SIZE      (1024 * 1024) 
  19.343 +
  19.344 +    Tuning options that are also dynamically changeable via mallopt:
  19.345 +
  19.346 +    DEFAULT_MXFAST             64
  19.347 +    DEFAULT_TRIM_THRESHOLD     256 * 1024
  19.348 +    DEFAULT_TOP_PAD            0
  19.349 +    DEFAULT_MMAP_THRESHOLD     256 * 1024
  19.350 +    DEFAULT_MMAP_MAX           65536
  19.351 +
  19.352 +    There are several other #defined constants and macros that you
  19.353 +    probably don't want to touch unless you are extending or adapting malloc.
  19.354 +*/
  19.355 +
  19.356 +/* RN: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
  19.357 +#if 0
  19.358 +
  19.359 +/*
  19.360 +  WIN32 sets up defaults for MS environment and compilers.
  19.361 +  Otherwise defaults are for unix.
  19.362 +*/
  19.363 +
  19.364 +/* #define WIN32 */
  19.365 +
  19.366 +#ifdef WIN32
  19.367 +
  19.368 +#define WIN32_LEAN_AND_MEAN
  19.369 +#include <windows.h>
  19.370 +
  19.371 +/* Win32 doesn't supply or need the following headers */
  19.372 +#define LACKS_UNISTD_H
  19.373 +#define LACKS_SYS_PARAM_H
  19.374 +#define LACKS_SYS_MMAN_H
  19.375 +
  19.376 +/* Use the supplied emulation of sbrk */
  19.377 +#define MORECORE sbrk
  19.378 +#define MORECORE_CONTIGUOUS 1
  19.379 +#define MORECORE_FAILURE    ((void*)(-1))
  19.380 +
  19.381 +/* Use the supplied emulation of mmap and munmap */
  19.382 +#define HAVE_MMAP 1
  19.383 +#define MUNMAP_FAILURE  (-1)
  19.384 +#define MMAP_CLEARS 1
  19.385 +
  19.386 +/* These values don't really matter in windows mmap emulation */
  19.387 +#define MAP_PRIVATE 1
  19.388 +#define MAP_ANONYMOUS 2
  19.389 +#define PROT_READ 1
  19.390 +#define PROT_WRITE 2
  19.391 +
  19.392 +/* Emulation functions defined at the end of this file */
  19.393 +
  19.394 +/* If USE_MALLOC_LOCK, use supplied critical-section-based lock functions */
  19.395 +#ifdef USE_MALLOC_LOCK
  19.396 +static int slwait(int *sl);
  19.397 +static int slrelease(int *sl);
  19.398 +#endif
  19.399 +
  19.400 +static long getpagesize(void);
  19.401 +static long getregionsize(void);
  19.402 +static void *sbrk(long size);
  19.403 +static void *mmap(void *ptr, long size, long prot, long type, long handle, long arg);
  19.404 +static long munmap(void *ptr, long size);
  19.405 +
  19.406 +static void vminfo (unsigned long*free, unsigned long*reserved, unsigned long*committed);
  19.407 +static int cpuinfo (int whole, unsigned long*kernel, unsigned long*user);
  19.408 +
  19.409 +#endif
  19.410 +
  19.411 +/*
  19.412 +  __STD_C should be nonzero if using ANSI-standard C compiler, a C++
  19.413 +  compiler, or a C compiler sufficiently close to ANSI to get away
  19.414 +  with it.
  19.415 +*/
  19.416 +
  19.417 +#ifndef __STD_C
  19.418 +#if defined(__STDC__) || defined(_cplusplus)
  19.419 +#define __STD_C     1
  19.420 +#else
  19.421 +#define __STD_C     0
  19.422 +#endif 
  19.423 +#endif /*__STD_C*/
  19.424 +
  19.425 +
  19.426 +/*
  19.427 +  Void_t* is the pointer type that malloc should say it returns
  19.428 +*/
  19.429 +
  19.430 +#ifndef Void_t
  19.431 +#if (__STD_C || defined(WIN32))
  19.432 +#define Void_t      void
  19.433 +#else
  19.434 +#define Void_t      char
  19.435 +#endif
  19.436 +#endif /*Void_t*/
  19.437 +
  19.438 +#if __STD_C
  19.439 +#include <stddef.h>   /* for size_t */
  19.440 +#else
  19.441 +#include <sys/types.h>
  19.442 +#endif
  19.443 +
  19.444 +#ifdef __cplusplus
  19.445 +extern "C" {
  19.446 +#endif
  19.447 +
  19.448 +/* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
  19.449 +
  19.450 +/* #define  LACKS_UNISTD_H */
  19.451 +
  19.452 +#ifndef LACKS_UNISTD_H
  19.453 +#include <unistd.h>
  19.454 +#endif
  19.455 +
  19.456 +/* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
  19.457 +
  19.458 +/* #define  LACKS_SYS_PARAM_H */
  19.459 +
  19.460 +
  19.461 +#include <stdio.h>    /* needed for malloc_stats */
  19.462 +#include <errno.h>    /* needed for optional MALLOC_FAILURE_ACTION */
  19.463 +
  19.464 +
  19.465 +/*
  19.466 +  Debugging:
  19.467 +
  19.468 +  Because freed chunks may be overwritten with bookkeeping fields, this
  19.469 +  malloc will often die when freed memory is overwritten by user
  19.470 +  programs.  This can be very effective (albeit in an annoying way)
  19.471 +  in helping track down dangling pointers.
  19.472 +
  19.473 +  If you compile with -DDEBUG, a number of assertion checks are
  19.474 +  enabled that will catch more memory errors. You probably won't be
  19.475 +  able to make much sense of the actual assertion errors, but they
  19.476 +  should help you locate incorrectly overwritten memory.  The
  19.477 +  checking is fairly extensive, and will slow down execution
  19.478 +  noticeably. Calling malloc_stats or mallinfo with DEBUG set will
  19.479 +  attempt to check every non-mmapped allocated and free chunk in the
  19.480 +  course of computing the summmaries. (By nature, mmapped regions
  19.481 +  cannot be checked very much automatically.)
  19.482 +
  19.483 +  Setting DEBUG may also be helpful if you are trying to modify
  19.484 +  this code. The assertions in the check routines spell out in more
  19.485 +  detail the assumptions and invariants underlying the algorithms.
  19.486 +
  19.487 +  Setting DEBUG does NOT provide an automated mechanism for checking
  19.488 +  that all accesses to malloced memory stay within their
  19.489 +  bounds. However, there are several add-ons and adaptations of this
  19.490 +  or other mallocs available that do this.
  19.491 +*/
  19.492 +
  19.493 +#if DEBUG
  19.494 +#include <assert.h>
  19.495 +#else
  19.496 +#define assert(x) ((void)0)
  19.497 +#endif
  19.498 +
  19.499 +/*
  19.500 +  The unsigned integer type used for comparing any two chunk sizes.
  19.501 +  This should be at least as wide as size_t, but should not be signed.
  19.502 +*/
  19.503 +
  19.504 +#ifndef CHUNK_SIZE_T
  19.505 +#define CHUNK_SIZE_T unsigned long
  19.506 +#endif
  19.507 +
  19.508 +/* 
  19.509 +  The unsigned integer type used to hold addresses when they are are
  19.510 +  manipulated as integers. Except that it is not defined on all
  19.511 +  systems, intptr_t would suffice.
  19.512 +*/
  19.513 +#ifndef PTR_UINT
  19.514 +#define PTR_UINT unsigned long
  19.515 +#endif
  19.516 +
  19.517 +
  19.518 +/*
  19.519 +  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
  19.520 +  of chunk sizes.
  19.521 +
  19.522 +  The default version is the same as size_t.
  19.523 +
  19.524 +  While not strictly necessary, it is best to define this as an
  19.525 +  unsigned type, even if size_t is a signed type. This may avoid some
  19.526 +  artificial size limitations on some systems.
  19.527 +
  19.528 +  On a 64-bit machine, you may be able to reduce malloc overhead by
  19.529 +  defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
  19.530 +  expense of not being able to handle more than 2^32 of malloced
  19.531 +  space. If this limitation is acceptable, you are encouraged to set
  19.532 +  this unless you are on a platform requiring 16byte alignments. In
  19.533 +  this case the alignment requirements turn out to negate any
  19.534 +  potential advantages of decreasing size_t word size.
  19.535 +
  19.536 +  Implementors: Beware of the possible combinations of:
  19.537 +     - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
  19.538 +       and might be the same width as int or as long
  19.539 +     - size_t might have different width and signedness as INTERNAL_SIZE_T
  19.540 +     - int and long might be 32 or 64 bits, and might be the same width
  19.541 +  To deal with this, most comparisons and difference computations
  19.542 +  among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being
  19.543 +  aware of the fact that casting an unsigned int to a wider long does
  19.544 +  not sign-extend. (This also makes checking for negative numbers
  19.545 +  awkward.) Some of these casts result in harmless compiler warnings
  19.546 +  on some systems.
  19.547 +*/
  19.548 +
  19.549 +#ifndef INTERNAL_SIZE_T
  19.550 +#define INTERNAL_SIZE_T size_t
  19.551 +#endif
  19.552 +
  19.553 +/* The corresponding word size */
  19.554 +#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
  19.555 +
  19.556 +
  19.557 +
  19.558 +/*
  19.559 +  MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
  19.560 +  It must be a power of two at least 2 * SIZE_SZ, even on machines
  19.561 +  for which smaller alignments would suffice. It may be defined as
  19.562 +  larger than this though. Note however that code and data structures
  19.563 +  are optimized for the case of 8-byte alignment.
  19.564 +*/
  19.565 +
  19.566 +
  19.567 +#ifndef MALLOC_ALIGNMENT
  19.568 +#define MALLOC_ALIGNMENT       (2 * SIZE_SZ)
  19.569 +#endif
  19.570 +
  19.571 +/* The corresponding bit mask value */
  19.572 +#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
  19.573 +
  19.574 +
  19.575 +
  19.576 +/*
  19.577 +  REALLOC_ZERO_BYTES_FREES should be set if a call to
  19.578 +  realloc with zero bytes should be the same as a call to free.
  19.579 +  Some people think it should. Otherwise, since this malloc
  19.580 +  returns a unique pointer for malloc(0), so does realloc(p, 0).
  19.581 +*/
  19.582 +
  19.583 +/*   #define REALLOC_ZERO_BYTES_FREES */
  19.584 +
  19.585 +/*
  19.586 +  TRIM_FASTBINS controls whether free() of a very small chunk can
  19.587 +  immediately lead to trimming. Setting to true (1) can reduce memory
  19.588 +  footprint, but will almost always slow down programs that use a lot
  19.589 +  of small chunks.
  19.590 +
  19.591 +  Define this only if you are willing to give up some speed to more
  19.592 +  aggressively reduce system-level memory footprint when releasing
  19.593 +  memory in programs that use many small chunks.  You can get
  19.594 +  essentially the same effect by setting MXFAST to 0, but this can
  19.595 +  lead to even greater slowdowns in programs using many small chunks.
  19.596 +  TRIM_FASTBINS is an in-between compile-time option, that disables
  19.597 +  only those chunks bordering topmost memory from being placed in
  19.598 +  fastbins.
  19.599 +*/
  19.600 +
  19.601 +#ifndef TRIM_FASTBINS
  19.602 +#define TRIM_FASTBINS  0
  19.603 +#endif
  19.604 +
  19.605 +
  19.606 +/*
  19.607 +  USE_DL_PREFIX will prefix all public routines with the string 'dl'.
  19.608 +  This is necessary when you only want to use this malloc in one part 
  19.609 +  of a program, using your regular system malloc elsewhere.
  19.610 +*/
  19.611 +
  19.612 +/* #define USE_DL_PREFIX */
  19.613 +
  19.614 +
  19.615 +/*
  19.616 +  USE_MALLOC_LOCK causes wrapper functions to surround each
  19.617 +  callable routine with pthread mutex lock/unlock.
  19.618 +
  19.619 +  USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
  19.620 +*/
  19.621 +
  19.622 +
  19.623 +/* #define USE_MALLOC_LOCK */
  19.624 +
  19.625 +
  19.626 +/*
  19.627 +  If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
  19.628 +  actually a wrapper function that first calls MALLOC_PREACTION, then
  19.629 +  calls the internal routine, and follows it with
  19.630 +  MALLOC_POSTACTION. This is needed for locking, but you can also use
  19.631 +  this, without USE_MALLOC_LOCK, for purposes of interception,
  19.632 +  instrumentation, etc. It is a sad fact that using wrappers often
  19.633 +  noticeably degrades performance of malloc-intensive programs.
  19.634 +*/
  19.635 +
  19.636 +#ifdef USE_MALLOC_LOCK
  19.637 +#define USE_PUBLIC_MALLOC_WRAPPERS
  19.638 +#else
  19.639 +/* #define USE_PUBLIC_MALLOC_WRAPPERS */
  19.640 +#endif
  19.641 +
  19.642 +
  19.643 +/* 
  19.644 +   Two-phase name translation.
  19.645 +   All of the actual routines are given mangled names.
  19.646 +   When wrappers are used, they become the public callable versions.
  19.647 +   When DL_PREFIX is used, the callable names are prefixed.
  19.648 +*/
  19.649 +
  19.650 +#ifndef USE_PUBLIC_MALLOC_WRAPPERS
  19.651 +#define cALLOc      public_cALLOc
  19.652 +#define fREe        public_fREe
  19.653 +#define cFREe       public_cFREe
  19.654 +#define mALLOc      public_mALLOc
  19.655 +#define mEMALIGn    public_mEMALIGn
  19.656 +#define rEALLOc     public_rEALLOc
  19.657 +#define vALLOc      public_vALLOc
  19.658 +#define pVALLOc     public_pVALLOc
  19.659 +#define mALLINFo    public_mALLINFo
  19.660 +#define mALLOPt     public_mALLOPt
  19.661 +#define mTRIm       public_mTRIm
  19.662 +#define mSTATs      public_mSTATs
  19.663 +#define mUSABLe     public_mUSABLe
  19.664 +#define iCALLOc     public_iCALLOc
  19.665 +#define iCOMALLOc   public_iCOMALLOc
  19.666 +#endif
  19.667 +
  19.668 +#ifdef USE_DL_PREFIX
  19.669 +#define public_cALLOc    dlcalloc
  19.670 +#define public_fREe      dlfree
  19.671 +#define public_cFREe     dlcfree
  19.672 +#define public_mALLOc    dlmalloc
  19.673 +#define public_mEMALIGn  dlmemalign
  19.674 +#define public_rEALLOc   dlrealloc
  19.675 +#define public_vALLOc    dlvalloc
  19.676 +#define public_pVALLOc   dlpvalloc
  19.677 +#define public_mALLINFo  dlmallinfo
  19.678 +#define public_mALLOPt   dlmallopt
  19.679 +#define public_mTRIm     dlmalloc_trim
  19.680 +#define public_mSTATs    dlmalloc_stats
  19.681 +#define public_mUSABLe   dlmalloc_usable_size
  19.682 +#define public_iCALLOc   dlindependent_calloc
  19.683 +#define public_iCOMALLOc dlindependent_comalloc
  19.684 +#else /* USE_DL_PREFIX */
  19.685 +#define public_cALLOc    calloc
  19.686 +#define public_fREe      free
  19.687 +#define public_cFREe     cfree
  19.688 +#define public_mALLOc    malloc
  19.689 +#define public_mEMALIGn  memalign
  19.690 +#define public_rEALLOc   realloc
  19.691 +#define public_vALLOc    valloc
  19.692 +#define public_pVALLOc   pvalloc
  19.693 +#define public_mALLINFo  mallinfo
  19.694 +#define public_mALLOPt   mallopt
  19.695 +#define public_mTRIm     malloc_trim
  19.696 +#define public_mSTATs    malloc_stats
  19.697 +#define public_mUSABLe   malloc_usable_size
  19.698 +#define public_iCALLOc   independent_calloc
  19.699 +#define public_iCOMALLOc independent_comalloc
  19.700 +#endif /* USE_DL_PREFIX */
  19.701 +
  19.702 +
  19.703 +/*
  19.704 +  HAVE_MEMCPY should be defined if you are not otherwise using
  19.705 +  ANSI STD C, but still have memcpy and memset in your C library
  19.706 +  and want to use them in calloc and realloc. Otherwise simple
  19.707 +  macro versions are defined below.
  19.708 +
  19.709 +  USE_MEMCPY should be defined as 1 if you actually want to
  19.710 +  have memset and memcpy called. People report that the macro
  19.711 +  versions are faster than libc versions on some systems.
  19.712 +  
  19.713 +  Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
  19.714 +  (of <= 36 bytes) are manually unrolled in realloc and calloc.
  19.715 +*/
  19.716 +
  19.717 +#define HAVE_MEMCPY
  19.718 +
  19.719 +#ifndef USE_MEMCPY
  19.720 +#ifdef HAVE_MEMCPY
  19.721 +#define USE_MEMCPY 1
  19.722 +#else
  19.723 +#define USE_MEMCPY 0
  19.724 +#endif
  19.725 +#endif
  19.726 +
  19.727 +
  19.728 +#if (__STD_C || defined(HAVE_MEMCPY))
  19.729 +
  19.730 +#ifdef WIN32
  19.731 +/* On Win32 memset and memcpy are already declared in windows.h */
  19.732 +#else
  19.733 +#if __STD_C
  19.734 +void* memset(void*, int, size_t);
  19.735 +void* memcpy(void*, const void*, size_t);
  19.736 +#else
  19.737 +Void_t* memset();
  19.738 +Void_t* memcpy();
  19.739 +#endif
  19.740 +#endif
  19.741 +#endif
  19.742 +
  19.743 +/*
  19.744 +  MALLOC_FAILURE_ACTION is the action to take before "return 0" when
  19.745 +  malloc fails to be able to return memory, either because memory is
  19.746 +  exhausted or because of illegal arguments.
  19.747 +  
  19.748 +  By default, sets errno if running on STD_C platform, else does nothing.  
  19.749 +*/
  19.750 +
  19.751 +#ifndef MALLOC_FAILURE_ACTION
  19.752 +#if __STD_C
  19.753 +#define MALLOC_FAILURE_ACTION \
  19.754 +   errno = ENOMEM;
  19.755 +
  19.756 +#else
  19.757 +#define MALLOC_FAILURE_ACTION
  19.758 +#endif
  19.759 +#endif
  19.760 +
  19.761 +/*
  19.762 +  MORECORE-related declarations. By default, rely on sbrk
  19.763 +*/
  19.764 +
  19.765 +
  19.766 +#ifdef LACKS_UNISTD_H
  19.767 +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
  19.768 +#if __STD_C
  19.769 +extern Void_t*     sbrk(ptrdiff_t);
  19.770 +#else
  19.771 +extern Void_t*     sbrk();
  19.772 +#endif
  19.773 +#endif
  19.774 +#endif
  19.775 +
  19.776 +/*
  19.777 +  MORECORE is the name of the routine to call to obtain more memory
  19.778 +  from the system.  See below for general guidance on writing
  19.779 +  alternative MORECORE functions, as well as a version for WIN32 and a
  19.780 +  sample version for pre-OSX macos.
  19.781 +*/
  19.782 +
  19.783 +#ifndef MORECORE
  19.784 +#define MORECORE sbrk
  19.785 +#endif
  19.786 +
  19.787 +/*
  19.788 +  MORECORE_FAILURE is the value returned upon failure of MORECORE
  19.789 +  as well as mmap. Since it cannot be an otherwise valid memory address,
  19.790 +  and must reflect values of standard sys calls, you probably ought not
  19.791 +  try to redefine it.
  19.792 +*/
  19.793 +
  19.794 +#ifndef MORECORE_FAILURE
  19.795 +#define MORECORE_FAILURE (-1)
  19.796 +#endif
  19.797 +
  19.798 +/*
  19.799 +  If MORECORE_CONTIGUOUS is true, take advantage of fact that
  19.800 +  consecutive calls to MORECORE with positive arguments always return
  19.801 +  contiguous increasing addresses.  This is true of unix sbrk.  Even
  19.802 +  if not defined, when regions happen to be contiguous, malloc will
  19.803 +  permit allocations spanning regions obtained from different
  19.804 +  calls. But defining this when applicable enables some stronger
  19.805 +  consistency checks and space efficiencies. 
  19.806 +*/
  19.807 +
  19.808 +#ifndef MORECORE_CONTIGUOUS
  19.809 +#define MORECORE_CONTIGUOUS 1
  19.810 +#endif
  19.811 +
  19.812 +/*
  19.813 +  Define MORECORE_CANNOT_TRIM if your version of MORECORE
  19.814 +  cannot release space back to the system when given negative
  19.815 +  arguments. This is generally necessary only if you are using
  19.816 +  a hand-crafted MORECORE function that cannot handle negative arguments.
  19.817 +*/
  19.818 +
  19.819 +/* #define MORECORE_CANNOT_TRIM */
  19.820 +
  19.821 +
  19.822 +/*
  19.823 +  Define HAVE_MMAP as true to optionally make malloc() use mmap() to
  19.824 +  allocate very large blocks.  These will be returned to the
  19.825 +  operating system immediately after a free(). Also, if mmap
  19.826 +  is available, it is used as a backup strategy in cases where
  19.827 +  MORECORE fails to provide space from system.
  19.828 +
  19.829 +  This malloc is best tuned to work with mmap for large requests.
  19.830 +  If you do not have mmap, operations involving very large chunks (1MB
  19.831 +  or so) may be slower than you'd like.
  19.832 +*/
  19.833 +
  19.834 +#ifndef HAVE_MMAP
  19.835 +#define HAVE_MMAP 1
  19.836 +#endif
  19.837 +
  19.838 +#if HAVE_MMAP
  19.839 +/* 
  19.840 +   Standard unix mmap using /dev/zero clears memory so calloc doesn't
  19.841 +   need to.
  19.842 +*/
  19.843 +
  19.844 +#ifndef MMAP_CLEARS
  19.845 +#define MMAP_CLEARS 1
  19.846 +#endif
  19.847 +
  19.848 +#else /* no mmap */
  19.849 +#ifndef MMAP_CLEARS
  19.850 +#define MMAP_CLEARS 0
  19.851 +#endif
  19.852 +#endif
  19.853 +
  19.854 +
  19.855 +/* 
  19.856 +   MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
  19.857 +   sbrk fails, and mmap is used as a backup (which is done only if
  19.858 +   HAVE_MMAP).  The value must be a multiple of page size.  This
  19.859 +   backup strategy generally applies only when systems have "holes" in
  19.860 +   address space, so sbrk cannot perform contiguous expansion, but
  19.861 +   there is still space available on system.  On systems for which
  19.862 +   this is known to be useful (i.e. most linux kernels), this occurs
  19.863 +   only when programs allocate huge amounts of memory.  Between this,
  19.864 +   and the fact that mmap regions tend to be limited, the size should
  19.865 +   be large, to avoid too many mmap calls and thus avoid running out
  19.866 +   of kernel resources.
  19.867 +*/
  19.868 +
  19.869 +#ifndef MMAP_AS_MORECORE_SIZE
  19.870 +#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
  19.871 +#endif
  19.872 +
  19.873 +/*
  19.874 +  Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
  19.875 +  large blocks.  This is currently only possible on Linux with
  19.876 +  kernel versions newer than 1.3.77.
  19.877 +*/
  19.878 +
  19.879 +#ifndef HAVE_MREMAP
  19.880 +#ifdef linux
  19.881 +#define HAVE_MREMAP 1
  19.882 +#else
  19.883 +#define HAVE_MREMAP 0
  19.884 +#endif
  19.885 +
  19.886 +#endif /* HAVE_MMAP */
  19.887 +
  19.888 +
  19.889 +/*
  19.890 +  The system page size. To the extent possible, this malloc manages
  19.891 +  memory from the system in page-size units.  Note that this value is
  19.892 +  cached during initialization into a field of malloc_state. So even
  19.893 +  if malloc_getpagesize is a function, it is only called once.
  19.894 +
  19.895 +  The following mechanics for getpagesize were adapted from bsd/gnu
  19.896 +  getpagesize.h. If none of the system-probes here apply, a value of
  19.897 +  4096 is used, which should be OK: If they don't apply, then using
  19.898 +  the actual value probably doesn't impact performance.
  19.899 +*/
  19.900 +
  19.901 +
  19.902 +#ifndef malloc_getpagesize
  19.903 +
  19.904 +#ifndef LACKS_UNISTD_H
  19.905 +#  include <unistd.h>
  19.906 +#endif
  19.907 +
  19.908 +#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
  19.909 +#    ifndef _SC_PAGE_SIZE
  19.910 +#      define _SC_PAGE_SIZE _SC_PAGESIZE
  19.911 +#    endif
  19.912 +#  endif
  19.913 +
  19.914 +#  ifdef _SC_PAGE_SIZE
  19.915 +#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
  19.916 +#  else
  19.917 +#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  19.918 +       extern size_t getpagesize();
  19.919 +#      define malloc_getpagesize getpagesize()
  19.920 +#    else
  19.921 +#      ifdef WIN32 /* use supplied emulation of getpagesize */
  19.922 +#        define malloc_getpagesize getpagesize() 
  19.923 +#      else
  19.924 +#        ifndef LACKS_SYS_PARAM_H
  19.925 +#          include <sys/param.h>
  19.926 +#        endif
  19.927 +#        ifdef EXEC_PAGESIZE
  19.928 +#          define malloc_getpagesize EXEC_PAGESIZE
  19.929 +#        else
  19.930 +#          ifdef NBPG
  19.931 +#            ifndef CLSIZE
  19.932 +#              define malloc_getpagesize NBPG
  19.933 +#            else
  19.934 +#              define malloc_getpagesize (NBPG * CLSIZE)
  19.935 +#            endif
  19.936 +#          else
  19.937 +#            ifdef NBPC
  19.938 +#              define malloc_getpagesize NBPC
  19.939 +#            else
  19.940 +#              ifdef PAGESIZE
  19.941 +#                define malloc_getpagesize PAGESIZE
  19.942 +#              else /* just guess */
  19.943 +#                define malloc_getpagesize (4096) 
  19.944 +#              endif
  19.945 +#            endif
  19.946 +#          endif
  19.947 +#        endif
  19.948 +#      endif
  19.949 +#    endif
  19.950 +#  endif
  19.951 +#endif
  19.952 +
  19.953 +/*
  19.954 +  This version of malloc supports the standard SVID/XPG mallinfo
  19.955 +  routine that returns a struct containing usage properties and
  19.956 +  statistics. It should work on any SVID/XPG compliant system that has
  19.957 +  a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
  19.958 +  install such a thing yourself, cut out the preliminary declarations
  19.959 +  as described above and below and save them in a malloc.h file. But
  19.960 +  there's no compelling reason to bother to do this.)
  19.961 +
  19.962 +  The main declaration needed is the mallinfo struct that is returned
  19.963 +  (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
  19.964 +  bunch of fields that are not even meaningful in this version of
  19.965 +  malloc.  These fields are are instead filled by mallinfo() with
  19.966 +  other numbers that might be of interest.
  19.967 +
  19.968 +  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
  19.969 +  /usr/include/malloc.h file that includes a declaration of struct
  19.970 +  mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
  19.971 +  version is declared below.  These must be precisely the same for
  19.972 +  mallinfo() to work.  The original SVID version of this struct,
  19.973 +  defined on most systems with mallinfo, declares all fields as
  19.974 +  ints. But some others define as unsigned long. If your system
  19.975 +  defines the fields using a type of different width than listed here,
  19.976 +  you must #include your system version and #define
  19.977 +  HAVE_USR_INCLUDE_MALLOC_H.
  19.978 +*/
  19.979 +
  19.980 +/* #define HAVE_USR_INCLUDE_MALLOC_H */
  19.981 +
  19.982 +#ifdef HAVE_USR_INCLUDE_MALLOC_H
  19.983 +#include "/usr/include/malloc.h"
  19.984 +#else
  19.985 +
  19.986 +/* SVID2/XPG mallinfo structure */
  19.987 +
  19.988 +struct mallinfo {
  19.989 +  int arena;    /* non-mmapped space allocated from system */
  19.990 +  int ordblks;  /* number of free chunks */
  19.991 +  int smblks;   /* number of fastbin blocks */
  19.992 +  int hblks;    /* number of mmapped regions */
  19.993 +  int hblkhd;   /* space in mmapped regions */
  19.994 +  int usmblks;  /* maximum total allocated space */
  19.995 +  int fsmblks;  /* space available in freed fastbin blocks */
  19.996 +  int uordblks; /* total allocated space */
  19.997 +  int fordblks; /* total free space */
  19.998 +  int keepcost; /* top-most, releasable (via malloc_trim) space */
  19.999 +};
 19.1000 +
 19.1001 +/*
 19.1002 +  SVID/XPG defines four standard parameter numbers for mallopt,
 19.1003 +  normally defined in malloc.h.  Only one of these (M_MXFAST) is used
 19.1004 +  in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
 19.1005 +  so setting them has no effect. But this malloc also supports other
 19.1006 +  options in mallopt described below.
 19.1007 +*/
 19.1008 +#endif
 19.1009 +
 19.1010 +
 19.1011 +/* ---------- description of public routines ------------ */
 19.1012 +
 19.1013 +/*
 19.1014 +  malloc(size_t n)
 19.1015 +  Returns a pointer to a newly allocated chunk of at least n bytes, or null
 19.1016 +  if no space is available. Additionally, on failure, errno is
 19.1017 +  set to ENOMEM on ANSI C systems.
 19.1018 +
 19.1019 +  If n is zero, malloc returns a minumum-sized chunk. (The minimum
 19.1020 +  size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
 19.1021 +  systems.)  On most systems, size_t is an unsigned type, so calls
 19.1022 +  with negative arguments are interpreted as requests for huge amounts
 19.1023 +  of space, which will often fail. The maximum supported value of n
 19.1024 +  differs across systems, but is in all cases less than the maximum
 19.1025 +  representable value of a size_t.
 19.1026 +*/
 19.1027 +#if __STD_C
 19.1028 +Void_t*  public_mALLOc(size_t);
 19.1029 +#else
 19.1030 +Void_t*  public_mALLOc();
 19.1031 +#endif
 19.1032 +
 19.1033 +/*
 19.1034 +  free(Void_t* p)
 19.1035 +  Releases the chunk of memory pointed to by p, that had been previously
 19.1036 +  allocated using malloc or a related routine such as realloc.
 19.1037 +  It has no effect if p is null. It can have arbitrary (i.e., bad!)
 19.1038 +  effects if p has already been freed.
 19.1039 +
 19.1040 +  Unless disabled (using mallopt), freeing very large spaces will
 19.1041 +  when possible, automatically trigger operations that give
 19.1042 +  back unused memory to the system, thus reducing program footprint.
 19.1043 +*/
 19.1044 +#if __STD_C
 19.1045 +void     public_fREe(Void_t*);
 19.1046 +#else
 19.1047 +void     public_fREe();
 19.1048 +#endif
 19.1049 +
 19.1050 +/*
 19.1051 +  calloc(size_t n_elements, size_t element_size);
 19.1052 +  Returns a pointer to n_elements * element_size bytes, with all locations
 19.1053 +  set to zero.
 19.1054 +*/
 19.1055 +#if __STD_C
 19.1056 +Void_t*  public_cALLOc(size_t, size_t);
 19.1057 +#else
 19.1058 +Void_t*  public_cALLOc();
 19.1059 +#endif
 19.1060 +
 19.1061 +/*
 19.1062 +  realloc(Void_t* p, size_t n)
 19.1063 +  Returns a pointer to a chunk of size n that contains the same data
 19.1064 +  as does chunk p up to the minimum of (n, p's size) bytes, or null
 19.1065 +  if no space is available. 
 19.1066 +
 19.1067 +  The returned pointer may or may not be the same as p. The algorithm
 19.1068 +  prefers extending p when possible, otherwise it employs the
 19.1069 +  equivalent of a malloc-copy-free sequence.
 19.1070 +
 19.1071 +  If p is null, realloc is equivalent to malloc.  
 19.1072 +
 19.1073 +  If space is not available, realloc returns null, errno is set (if on
 19.1074 +  ANSI) and p is NOT freed.
 19.1075 +
 19.1076 +  if n is for fewer bytes than already held by p, the newly unused
 19.1077 +  space is lopped off and freed if possible.  Unless the #define
 19.1078 +  REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
 19.1079 +  zero (re)allocates a minimum-sized chunk.
 19.1080 +
 19.1081 +  Large chunks that were internally obtained via mmap will always
 19.1082 +  be reallocated using malloc-copy-free sequences unless
 19.1083 +  the system supports MREMAP (currently only linux).
 19.1084 +
 19.1085 +  The old unix realloc convention of allowing the last-free'd chunk
 19.1086 +  to be used as an argument to realloc is not supported.
 19.1087 +*/
 19.1088 +#if __STD_C
 19.1089 +Void_t*  public_rEALLOc(Void_t*, size_t);
 19.1090 +#else
 19.1091 +Void_t*  public_rEALLOc();
 19.1092 +#endif
 19.1093 +
 19.1094 +/*
 19.1095 +  memalign(size_t alignment, size_t n);
 19.1096 +  Returns a pointer to a newly allocated chunk of n bytes, aligned
 19.1097 +  in accord with the alignment argument.
 19.1098 +
 19.1099 +  The alignment argument should be a power of two. If the argument is
 19.1100 +  not a power of two, the nearest greater power is used.
 19.1101 +  8-byte alignment is guaranteed by normal malloc calls, so don't
 19.1102 +  bother calling memalign with an argument of 8 or less.
 19.1103 +
 19.1104 +  Overreliance on memalign is a sure way to fragment space.
 19.1105 +*/
 19.1106 +#if __STD_C
 19.1107 +Void_t*  public_mEMALIGn(size_t, size_t);
 19.1108 +#else
 19.1109 +Void_t*  public_mEMALIGn();
 19.1110 +#endif
 19.1111 +
 19.1112 +/*
 19.1113 +  valloc(size_t n);
 19.1114 +  Equivalent to memalign(pagesize, n), where pagesize is the page
 19.1115 +  size of the system. If the pagesize is unknown, 4096 is used.
 19.1116 +*/
 19.1117 +#if __STD_C
 19.1118 +Void_t*  public_vALLOc(size_t);
 19.1119 +#else
 19.1120 +Void_t*  public_vALLOc();
 19.1121 +#endif
 19.1122 +
 19.1123 +
 19.1124 +
 19.1125 +/*
 19.1126 +  mallopt(int parameter_number, int parameter_value)
 19.1127 +  Sets tunable parameters The format is to provide a
 19.1128 +  (parameter-number, parameter-value) pair.  mallopt then sets the
 19.1129 +  corresponding parameter to the argument value if it can (i.e., so
 19.1130 +  long as the value is meaningful), and returns 1 if successful else
 19.1131 +  0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
 19.1132 +  normally defined in malloc.h.  Only one of these (M_MXFAST) is used
 19.1133 +  in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
 19.1134 +  so setting them has no effect. But this malloc also supports four
 19.1135 +  other options in mallopt. See below for details.  Briefly, supported
 19.1136 +  parameters are as follows (listed defaults are for "typical"
 19.1137 +  configurations).
 19.1138 +
 19.1139 +  Symbol            param #   default    allowed param values
 19.1140 +  M_MXFAST          1         64         0-80  (0 disables fastbins)
 19.1141 +  M_TRIM_THRESHOLD -1         256*1024   any   (-1U disables trimming)
 19.1142 +  M_TOP_PAD        -2         0          any  
 19.1143 +  M_MMAP_THRESHOLD -3         256*1024   any   (or 0 if no MMAP support)
 19.1144 +  M_MMAP_MAX       -4         65536      any   (0 disables use of mmap)
 19.1145 +*/
 19.1146 +#if __STD_C
 19.1147 +int      public_mALLOPt(int, int);
 19.1148 +#else
 19.1149 +int      public_mALLOPt();
 19.1150 +#endif
 19.1151 +
 19.1152 +
 19.1153 +/*
 19.1154 +  mallinfo()
 19.1155 +  Returns (by copy) a struct containing various summary statistics:
 19.1156 +
 19.1157 +  arena:     current total non-mmapped bytes allocated from system 
 19.1158 +  ordblks:   the number of free chunks 
 19.1159 +  smblks:    the number of fastbin blocks (i.e., small chunks that
 19.1160 +               have been freed but not use resused or consolidated)
 19.1161 +  hblks:     current number of mmapped regions 
 19.1162 +  hblkhd:    total bytes held in mmapped regions 
 19.1163 +  usmblks:   the maximum total allocated space. This will be greater
 19.1164 +                than current total if trimming has occurred.
 19.1165 +  fsmblks:   total bytes held in fastbin blocks 
 19.1166 +  uordblks:  current total allocated space (normal or mmapped)
 19.1167 +  fordblks:  total free space 
 19.1168 +  keepcost:  the maximum number of bytes that could ideally be released
 19.1169 +               back to system via malloc_trim. ("ideally" means that
 19.1170 +               it ignores page restrictions etc.)
 19.1171 +
 19.1172 +  Because these fields are ints, but internal bookkeeping may
 19.1173 +  be kept as longs, the reported values may wrap around zero and 
 19.1174 +  thus be inaccurate.
 19.1175 +*/
 19.1176 +#if __STD_C
 19.1177 +struct mallinfo public_mALLINFo(void);
 19.1178 +#else
 19.1179 +struct mallinfo public_mALLINFo();
 19.1180 +#endif
 19.1181 +
 19.1182 +/*
 19.1183 +  independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
 19.1184 +
 19.1185 +  independent_calloc is similar to calloc, but instead of returning a
 19.1186 +  single cleared space, it returns an array of pointers to n_elements
 19.1187 +  independent elements that can hold contents of size elem_size, each
 19.1188 +  of which starts out cleared, and can be independently freed,
 19.1189 +  realloc'ed etc. The elements are guaranteed to be adjacently
 19.1190 +  allocated (this is not guaranteed to occur with multiple callocs or
 19.1191 +  mallocs), which may also improve cache locality in some
 19.1192 +  applications.
 19.1193 +
 19.1194 +  The "chunks" argument is optional (i.e., may be null, which is
 19.1195 +  probably the most typical usage). If it is null, the returned array
 19.1196 +  is itself dynamically allocated and should also be freed when it is
 19.1197 +  no longer needed. Otherwise, the chunks array must be of at least
 19.1198 +  n_elements in length. It is filled in with the pointers to the
 19.1199 +  chunks.
 19.1200 +
 19.1201 +  In either case, independent_calloc returns this pointer array, or
 19.1202 +  null if the allocation failed.  If n_elements is zero and "chunks"
 19.1203 +  is null, it returns a chunk representing an array with zero elements
 19.1204 +  (which should be freed if not wanted).
 19.1205 +
 19.1206 +  Each element must be individually freed when it is no longer
 19.1207 +  needed. If you'd like to instead be able to free all at once, you
 19.1208 +  should instead use regular calloc and assign pointers into this
 19.1209 +  space to represent elements.  (In this case though, you cannot
 19.1210 +  independently free elements.)
 19.1211 +  
 19.1212 +  independent_calloc simplifies and speeds up implementations of many
 19.1213 +  kinds of pools.  It may also be useful when constructing large data
 19.1214 +  structures that initially have a fixed number of fixed-sized nodes,
 19.1215 +  but the number is not known at compile time, and some of the nodes
 19.1216 +  may later need to be freed. For example:
 19.1217 +
 19.1218 +  struct Node { int item; struct Node* next; };
 19.1219 +  
 19.1220 +  struct Node* build_list() {
 19.1221 +    struct Node** pool;
 19.1222 +    int n = read_number_of_nodes_needed();
 19.1223 +    if (n <= 0) return 0;
 19.1224 +    pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
 19.1225 +    if (pool == 0) die(); 
 19.1226 +    // organize into a linked list... 
 19.1227 +    struct Node* first = pool[0];
 19.1228 +    for (i = 0; i < n-1; ++i) 
 19.1229 +      pool[i]->next = pool[i+1];
 19.1230 +    free(pool);     // Can now free the array (or not, if it is needed later)
 19.1231 +    return first;
 19.1232 +  }
 19.1233 +*/
 19.1234 +#if __STD_C
 19.1235 +Void_t** public_iCALLOc(size_t, size_t, Void_t**);
 19.1236 +#else
 19.1237 +Void_t** public_iCALLOc();
 19.1238 +#endif
 19.1239 +
 19.1240 +/*
 19.1241 +  independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
 19.1242 +
 19.1243 +  independent_comalloc allocates, all at once, a set of n_elements
 19.1244 +  chunks with sizes indicated in the "sizes" array.    It returns
 19.1245 +  an array of pointers to these elements, each of which can be
 19.1246 +  independently freed, realloc'ed etc. The elements are guaranteed to
 19.1247 +  be adjacently allocated (this is not guaranteed to occur with
 19.1248 +  multiple callocs or mallocs), which may also improve cache locality
 19.1249 +  in some applications.
 19.1250 +
 19.1251 +  The "chunks" argument is optional (i.e., may be null). If it is null
 19.1252 +  the returned array is itself dynamically allocated and should also
 19.1253 +  be freed when it is no longer needed. Otherwise, the chunks array
 19.1254 +  must be of at least n_elements in length. It is filled in with the
 19.1255 +  pointers to the chunks.
 19.1256 +
 19.1257 +  In either case, independent_comalloc returns this pointer array, or
 19.1258 +  null if the allocation failed.  If n_elements is zero and chunks is
 19.1259 +  null, it returns a chunk representing an array with zero elements
 19.1260 +  (which should be freed if not wanted).
 19.1261 +  
 19.1262 +  Each element must be individually freed when it is no longer
 19.1263 +  needed. If you'd like to instead be able to free all at once, you
 19.1264 +  should instead use a single regular malloc, and assign pointers at
 19.1265 +  particular offsets in the aggregate space. (In this case though, you 
 19.1266 +  cannot independently free elements.)
 19.1267 +
 19.1268 +  independent_comallac differs from independent_calloc in that each
 19.1269 +  element may have a different size, and also that it does not
 19.1270 +  automatically clear elements.
 19.1271 +
 19.1272 +  independent_comalloc can be used to speed up allocation in cases
 19.1273 +  where several structs or objects must always be allocated at the
 19.1274 +  same time.  For example:
 19.1275 +
 19.1276 +  struct Head { ... }
 19.1277 +  struct Foot { ... }
 19.1278 +
 19.1279 +  void send_message(char* msg) {
 19.1280 +    int msglen = strlen(msg);
 19.1281 +    size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
 19.1282 +    void* chunks[3];
 19.1283 +    if (independent_comalloc(3, sizes, chunks) == 0)
 19.1284 +      die();
 19.1285 +    struct Head* head = (struct Head*)(chunks[0]);
 19.1286 +    char*        body = (char*)(chunks[1]);
 19.1287 +    struct Foot* foot = (struct Foot*)(chunks[2]);
 19.1288 +    // ...
 19.1289 +  }
 19.1290 +
 19.1291 +  In general though, independent_comalloc is worth using only for
 19.1292 +  larger values of n_elements. For small values, you probably won't
 19.1293 +  detect enough difference from series of malloc calls to bother.
 19.1294 +
 19.1295 +  Overuse of independent_comalloc can increase overall memory usage,
 19.1296 +  since it cannot reuse existing noncontiguous small chunks that
 19.1297 +  might be available for some of the elements.
 19.1298 +*/
 19.1299 +#if __STD_C
 19.1300 +Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
 19.1301 +#else
 19.1302 +Void_t** public_iCOMALLOc();
 19.1303 +#endif
 19.1304 +
 19.1305 +
 19.1306 +/*
 19.1307 +  pvalloc(size_t n);
 19.1308 +  Equivalent to valloc(minimum-page-that-holds(n)), that is,
 19.1309 +  round up n to nearest pagesize.
 19.1310 + */
 19.1311 +#if __STD_C
 19.1312 +Void_t*  public_pVALLOc(size_t);
 19.1313 +#else
 19.1314 +Void_t*  public_pVALLOc();
 19.1315 +#endif
 19.1316 +
 19.1317 +/*
 19.1318 +  cfree(Void_t* p);
 19.1319 +  Equivalent to free(p).
 19.1320 +
 19.1321 +  cfree is needed/defined on some systems that pair it with calloc,
 19.1322 +  for odd historical reasons (such as: cfree is used in example 
 19.1323 +  code in the first edition of K&R).
 19.1324 +*/
 19.1325 +#if __STD_C
 19.1326 +void     public_cFREe(Void_t*);
 19.1327 +#else
 19.1328 +void     public_cFREe();
 19.1329 +#endif
 19.1330 +
 19.1331 +/*
 19.1332 +  malloc_trim(size_t pad);
 19.1333 +
 19.1334 +  If possible, gives memory back to the system (via negative
 19.1335 +  arguments to sbrk) if there is unused memory at the `high' end of
 19.1336 +  the malloc pool. You can call this after freeing large blocks of
 19.1337 +  memory to potentially reduce the system-level memory requirements
 19.1338 +  of a program. However, it cannot guarantee to reduce memory. Under
 19.1339 +  some allocation patterns, some large free blocks of memory will be
 19.1340 +  locked between two used chunks, so they cannot be given back to
 19.1341 +  the system.
 19.1342 +  
 19.1343 +  The `pad' argument to malloc_trim represents the amount of free
 19.1344 +  trailing space to leave untrimmed. If this argument is zero,
 19.1345 +  only the minimum amount of memory to maintain internal data
 19.1346 +  structures will be left (one page or less). Non-zero arguments
 19.1347 +  can be supplied to maintain enough trailing space to service
 19.1348 +  future expected allocations without having to re-obtain memory
 19.1349 +  from the system.
 19.1350 +  
 19.1351 +  Malloc_trim returns 1 if it actually released any memory, else 0.
 19.1352 +  On systems that do not support "negative sbrks", it will always
 19.1353 +  rreturn 0.
 19.1354 +*/
 19.1355 +#if __STD_C
 19.1356 +int      public_mTRIm(size_t);
 19.1357 +#else
 19.1358 +int      public_mTRIm();
 19.1359 +#endif
 19.1360 +
 19.1361 +/*
 19.1362 +  malloc_usable_size(Void_t* p);
 19.1363 +
 19.1364 +  Returns the number of bytes you can actually use in
 19.1365 +  an allocated chunk, which may be more than you requested (although
 19.1366 +  often not) due to alignment and minimum size constraints.
 19.1367 +  You can use this many bytes without worrying about
 19.1368 +  overwriting other allocated objects. This is not a particularly great
 19.1369 +  programming practice. malloc_usable_size can be more useful in
 19.1370 +  debugging and assertions, for example:
 19.1371 +
 19.1372 +  p = malloc(n);
 19.1373 +  assert(malloc_usable_size(p) >= 256);
 19.1374 +
 19.1375 +*/
 19.1376 +#if __STD_C
 19.1377 +size_t   public_mUSABLe(Void_t*);
 19.1378 +#else
 19.1379 +size_t   public_mUSABLe();
 19.1380 +#endif
 19.1381 +
 19.1382 +/*
 19.1383 +  malloc_stats();
 19.1384 +  Prints on stderr the amount of space obtained from the system (both
 19.1385 +  via sbrk and mmap), the maximum amount (which may be more than
 19.1386 +  current if malloc_trim and/or munmap got called), and the current
 19.1387 +  number of bytes allocated via malloc (or realloc, etc) but not yet
 19.1388 +  freed. Note that this is the number of bytes allocated, not the
 19.1389 +  number requested. It will be larger than the number requested
 19.1390 +  because of alignment and bookkeeping overhead. Because it includes
 19.1391 +  alignment wastage as being in use, this figure may be greater than
 19.1392 +  zero even when no user-level chunks are allocated.
 19.1393 +
 19.1394 +  The reported current and maximum system memory can be inaccurate if
 19.1395 +  a program makes other calls to system memory allocation functions
 19.1396 +  (normally sbrk) outside of malloc.
 19.1397 +
 19.1398 +  malloc_stats prints only the most commonly interesting statistics.
 19.1399 +  More information can be obtained by calling mallinfo.
 19.1400 +
 19.1401 +*/
 19.1402 +#if __STD_C
 19.1403 +void     public_mSTATs();
 19.1404 +#else
 19.1405 +void     public_mSTATs();
 19.1406 +#endif
 19.1407 +
 19.1408 +/* mallopt tuning options */
 19.1409 +
 19.1410 +/*
 19.1411 +  M_MXFAST is the maximum request size used for "fastbins", special bins
 19.1412 +  that hold returned chunks without consolidating their spaces. This
 19.1413 +  enables future requests for chunks of the same size to be handled
 19.1414 +  very quickly, but can increase fragmentation, and thus increase the
 19.1415 +  overall memory footprint of a program.
 19.1416 +
 19.1417 +  This malloc manages fastbins very conservatively yet still
 19.1418 +  efficiently, so fragmentation is rarely a problem for values less
 19.1419 +  than or equal to the default.  The maximum supported value of MXFAST
 19.1420 +  is 80. You wouldn't want it any higher than this anyway.  Fastbins
 19.1421 +  are designed especially for use with many small structs, objects or
 19.1422 +  strings -- the default handles structs/objects/arrays with sizes up
 19.1423 +  to 16 4byte fields, or small strings representing words, tokens,
 19.1424 +  etc. Using fastbins for larger objects normally worsens
 19.1425 +  fragmentation without improving speed.
 19.1426 +
 19.1427 +  M_MXFAST is set in REQUEST size units. It is internally used in
 19.1428 +  chunksize units, which adds padding and alignment.  You can reduce
 19.1429 +  M_MXFAST to 0 to disable all use of fastbins.  This causes the malloc
 19.1430 +  algorithm to be a closer approximation of fifo-best-fit in all cases,
 19.1431 +  not just for larger requests, but will generally cause it to be
 19.1432 +  slower.
 19.1433 +*/
 19.1434 +
 19.1435 +
 19.1436 +/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
 19.1437 +#ifndef M_MXFAST
 19.1438 +#define M_MXFAST            1    
 19.1439 +#endif
 19.1440 +
 19.1441 +#ifndef DEFAULT_MXFAST
 19.1442 +#define DEFAULT_MXFAST     64
 19.1443 +#endif
 19.1444 +
 19.1445 +
 19.1446 +/*
 19.1447 +  M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
 19.1448 +  to keep before releasing via malloc_trim in free().
 19.1449 +
 19.1450 +  Automatic trimming is mainly useful in long-lived programs.
 19.1451 +  Because trimming via sbrk can be slow on some systems, and can
 19.1452 +  sometimes be wasteful (in cases where programs immediately
 19.1453 +  afterward allocate more large chunks) the value should be high
 19.1454 +  enough so that your overall system performance would improve by
 19.1455 +  releasing this much memory.
 19.1456 +
 19.1457 +  The trim threshold and the mmap control parameters (see below)
 19.1458 +  can be traded off with one another. Trimming and mmapping are
 19.1459 +  two different ways of releasing unused memory back to the
 19.1460 +  system. Between these two, it is often possible to keep
 19.1461 +  system-level demands of a long-lived program down to a bare
 19.1462 +  minimum. For example, in one test suite of sessions measuring
 19.1463 +  the XF86 X server on Linux, using a trim threshold of 128K and a
 19.1464 +  mmap threshold of 192K led to near-minimal long term resource
 19.1465 +  consumption.
 19.1466 +
 19.1467 +  If you are using this malloc in a long-lived program, it should
 19.1468 +  pay to experiment with these values.  As a rough guide, you
 19.1469 +  might set to a value close to the average size of a process
 19.1470 +  (program) running on your system.  Releasing this much memory
 19.1471 +  would allow such a process to run in memory.  Generally, it's
 19.1472 +  worth it to tune for trimming rather tham memory mapping when a
 19.1473 +  program undergoes phases where several large chunks are
 19.1474 +  allocated and released in ways that can reuse each other's
 19.1475 +  storage, perhaps mixed with phases where there are no such
 19.1476 +  chunks at all.  And in well-behaved long-lived programs,
 19.1477 +  controlling release of large blocks via trimming versus mapping
 19.1478 +  is usually faster.
 19.1479 +
 19.1480 +  However, in most programs, these parameters serve mainly as
 19.1481 +  protection against the system-level effects of carrying around
 19.1482 +  massive amounts of unneeded memory. Since frequent calls to
 19.1483 +  sbrk, mmap, and munmap otherwise degrade performance, the default
 19.1484 +  parameters are set to relatively high values that serve only as
 19.1485 +  safeguards.
 19.1486 +
 19.1487 +  The trim value must be greater than page size to have any useful
 19.1488 +  effect.  To disable trimming completely, you can set to 
 19.1489 +  (unsigned long)(-1)
 19.1490 +
 19.1491 +  Trim settings interact with fastbin (MXFAST) settings: Unless
 19.1492 +  TRIM_FASTBINS is defined, automatic trimming never takes place upon
 19.1493 +  freeing a chunk with size less than or equal to MXFAST. Trimming is
 19.1494 +  instead delayed until subsequent freeing of larger chunks. However,
 19.1495 +  you can still force an attempted trim by calling malloc_trim.
 19.1496 +
 19.1497 +  Also, trimming is not generally possible in cases where
 19.1498 +  the main arena is obtained via mmap.
 19.1499 +
 19.1500 +  Note that the trick some people use of mallocing a huge space and
 19.1501 +  then freeing it at program startup, in an attempt to reserve system
 19.1502 +  memory, doesn't have the intended effect under automatic trimming,
 19.1503 +  since that memory will immediately be returned to the system.
 19.1504 +*/
 19.1505 +
 19.1506 +#define M_TRIM_THRESHOLD       -1
 19.1507 +
 19.1508 +#ifndef DEFAULT_TRIM_THRESHOLD
 19.1509 +#define DEFAULT_TRIM_THRESHOLD (256 * 1024)
 19.1510 +#endif
 19.1511 +
 19.1512 +/*
 19.1513 +  M_TOP_PAD is the amount of extra `padding' space to allocate or
 19.1514 +  retain whenever sbrk is called. It is used in two ways internally:
 19.1515 +
 19.1516 +  * When sbrk is called to extend the top of the arena to satisfy
 19.1517 +  a new malloc request, this much padding is added to the sbrk
 19.1518 +  request.
 19.1519 +
 19.1520 +  * When malloc_trim is called automatically from free(),
 19.1521 +  it is used as the `pad' argument.
 19.1522 +
 19.1523 +  In both cases, the actual amount of padding is rounded
 19.1524 +  so that the end of the arena is always a system page boundary.
 19.1525 +
 19.1526 +  The main reason for using padding is to avoid calling sbrk so
 19.1527 +  often. Having even a small pad greatly reduces the likelihood
 19.1528 +  that nearly every malloc request during program start-up (or
 19.1529 +  after trimming) will invoke sbrk, which needlessly wastes
 19.1530 +  time.
 19.1531 +
 19.1532 +  Automatic rounding-up to page-size units is normally sufficient
 19.1533 +  to avoid measurable overhead, so the default is 0.  However, in
 19.1534 +  systems where sbrk is relatively slow, it can pay to increase
 19.1535 +  this value, at the expense of carrying around more memory than
 19.1536 +  the program needs.
 19.1537 +*/
 19.1538 +
 19.1539 +#define M_TOP_PAD              -2
 19.1540 +
 19.1541 +#ifndef DEFAULT_TOP_PAD
 19.1542 +#define DEFAULT_TOP_PAD        (0)
 19.1543 +#endif
 19.1544 +
 19.1545 +/*
 19.1546 +  M_MMAP_THRESHOLD is the request size threshold for using mmap()
 19.1547 +  to service a request. Requests of at least this size that cannot
 19.1548 +  be allocated using already-existing space will be serviced via mmap.
 19.1549 +  (If enough normal freed space already exists it is used instead.)
 19.1550 +
 19.1551 +  Using mmap segregates relatively large chunks of memory so that
 19.1552 +  they can be individually obtained and released from the host
 19.1553 +  system. A request serviced through mmap is never reused by any
 19.1554 +  other request (at least not directly; the system may just so
 19.1555 +  happen to remap successive requests to the same locations).
 19.1556 +
 19.1557 +  Segregating space in this way has the benefits that:
 19.1558 +
 19.1559 +   1. Mmapped space can ALWAYS be individually released back 
 19.1560 +      to the system, which helps keep the system level memory 
 19.1561 +      demands of a long-lived program low. 
 19.1562 +   2. Mapped memory can never become `locked' between
 19.1563 +      other chunks, as can happen with normally allocated chunks, which
 19.1564 +      means that even trimming via malloc_trim would not release them.
 19.1565 +   3. On some systems with "holes" in address spaces, mmap can obtain
 19.1566 +      memory that sbrk cannot.
 19.1567 +
 19.1568 +  However, it has the disadvantages that:
 19.1569 +
 19.1570 +   1. The space cannot be reclaimed, consolidated, and then
 19.1571 +      used to service later requests, as happens with normal chunks.
 19.1572 +   2. It can lead to more wastage because of mmap page alignment
 19.1573 +      requirements
 19.1574 +   3. It causes malloc performance to be more dependent on host
 19.1575 +      system memory management support routines which may vary in
 19.1576 +      implementation quality and may impose arbitrary
 19.1577 +      limitations. Generally, servicing a request via normal
 19.1578 +      malloc steps is faster than going through a system's mmap.
 19.1579 +
 19.1580 +  The advantages of mmap nearly always outweigh disadvantages for
 19.1581 +  "large" chunks, but the value of "large" varies across systems.  The
 19.1582 +  default is an empirically derived value that works well in most
 19.1583 +  systems.
 19.1584 +*/
 19.1585 +
 19.1586 +#define M_MMAP_THRESHOLD      -3
 19.1587 +
 19.1588 +#ifndef DEFAULT_MMAP_THRESHOLD
 19.1589 +#define DEFAULT_MMAP_THRESHOLD (256 * 1024)
 19.1590 +#endif
 19.1591 +
 19.1592 +/*
 19.1593 +  M_MMAP_MAX is the maximum number of requests to simultaneously
 19.1594 +  service using mmap. This parameter exists because
 19.1595 +. Some systems have a limited number of internal tables for
 19.1596 +  use by mmap, and using more than a few of them may degrade
 19.1597 +  performance.
 19.1598 +
 19.1599 +  The default is set to a value that serves only as a safeguard.
 19.1600 +  Setting to 0 disables use of mmap for servicing large requests.  If
 19.1601 +  HAVE_MMAP is not set, the default value is 0, and attempts to set it
 19.1602 +  to non-zero values in mallopt will fail.
 19.1603 +*/
 19.1604 +
 19.1605 +#define M_MMAP_MAX             -4
 19.1606 +
 19.1607 +#ifndef DEFAULT_MMAP_MAX
 19.1608 +#if HAVE_MMAP
 19.1609 +#define DEFAULT_MMAP_MAX       (65536)
 19.1610 +#else
 19.1611 +#define DEFAULT_MMAP_MAX       (0)
 19.1612 +#endif
 19.1613 +#endif
 19.1614 +
 19.1615 +#ifdef __cplusplus
 19.1616 +};  /* end of extern "C" */
 19.1617 +#endif
 19.1618 +
 19.1619 +
 19.1620 +/* RN XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
 19.1621 +#endif 
 19.1622 +
 19.1623 +/* 
 19.1624 +  ========================================================================
 19.1625 +  To make a fully customizable malloc.h header file, cut everything
 19.1626 +  above this line, put into file malloc.h, edit to suit, and #include it 
 19.1627 +  on the next line, as well as in programs that use this malloc.
 19.1628 +  ========================================================================
 19.1629 +*/
 19.1630 +
 19.1631 +/* #include "malloc.h" */
 19.1632 +
 19.1633 +/* --------------------- public wrappers ---------------------- */
 19.1634 +
 19.1635 +#ifdef USE_PUBLIC_MALLOC_WRAPPERS
 19.1636 +
 19.1637 +/* Declare all routines as internal */
 19.1638 +#if __STD_C
 19.1639 +static Void_t*  mALLOc(size_t);
 19.1640 +static void     fREe(Void_t*);
 19.1641 +static Void_t*  rEALLOc(Void_t*, size_t);
 19.1642 +static Void_t*  mEMALIGn(size_t, size_t);
 19.1643 +static Void_t*  vALLOc(size_t);
 19.1644 +static Void_t*  pVALLOc(size_t);
 19.1645 +static Void_t*  cALLOc(size_t, size_t);
 19.1646 +static Void_t** iCALLOc(size_t, size_t, Void_t**);
 19.1647 +static Void_t** iCOMALLOc(size_t, size_t*, Void_t**);
 19.1648 +static void     cFREe(Void_t*);
 19.1649 +static int      mTRIm(size_t);
 19.1650 +static size_t   mUSABLe(Void_t*);
 19.1651 +static void     mSTATs();
 19.1652 +static int      mALLOPt(int, int);
 19.1653 +static struct mallinfo mALLINFo(void);
 19.1654 +#else
 19.1655 +static Void_t*  mALLOc();
 19.1656 +static void     fREe();
 19.1657 +static Void_t*  rEALLOc();
 19.1658 +static Void_t*  mEMALIGn();
 19.1659 +static Void_t*  vALLOc();
 19.1660 +static Void_t*  pVALLOc();
 19.1661 +static Void_t*  cALLOc();
 19.1662 +static Void_t** iCALLOc();
 19.1663 +static Void_t** iCOMALLOc();
 19.1664 +static void     cFREe();
 19.1665 +static int      mTRIm();
 19.1666 +static size_t   mUSABLe();
 19.1667 +static void     mSTATs();
 19.1668 +static int      mALLOPt();
 19.1669 +static struct mallinfo mALLINFo();
 19.1670 +#endif
 19.1671 +
 19.1672 +/*
 19.1673 +  MALLOC_PREACTION and MALLOC_POSTACTION should be
 19.1674 +  defined to return 0 on success, and nonzero on failure.
 19.1675 +  The return value of MALLOC_POSTACTION is currently ignored
 19.1676 +  in wrapper functions since there is no reasonable default
 19.1677 +  action to take on failure.
 19.1678 +*/
 19.1679 +
 19.1680 +
 19.1681 +#ifdef USE_MALLOC_LOCK
 19.1682 +
 19.1683 +#ifdef WIN32
 19.1684 +
 19.1685 +static int mALLOC_MUTEx;
 19.1686 +#define MALLOC_PREACTION   slwait(&mALLOC_MUTEx)
 19.1687 +#define MALLOC_POSTACTION  slrelease(&mALLOC_MUTEx)
 19.1688 +
 19.1689 +#else
 19.1690 +
 19.1691 +#include <pthread.h>
 19.1692 +
 19.1693 +static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
 19.1694 +
 19.1695 +#define MALLOC_PREACTION   pthread_mutex_lock(&mALLOC_MUTEx)
 19.1696 +#define MALLOC_POSTACTION  pthread_mutex_unlock(&mALLOC_MUTEx)
 19.1697 +
 19.1698 +#endif /* USE_MALLOC_LOCK */
 19.1699 +
 19.1700 +#else
 19.1701 +
 19.1702 +/* Substitute anything you like for these */
 19.1703 +
 19.1704 +#define MALLOC_PREACTION   (0)
 19.1705 +#define MALLOC_POSTACTION  (0)
 19.1706 +
 19.1707 +#endif
 19.1708 +
 19.1709 +Void_t* public_mALLOc(size_t bytes) {
 19.1710 +  Void_t* m;
 19.1711 +  if (MALLOC_PREACTION != 0) {
 19.1712 +    return 0;
 19.1713 +  }
 19.1714 +  m = mALLOc(bytes);
 19.1715 +  if (MALLOC_POSTACTION != 0) {
 19.1716 +  }
 19.1717 +  return m;
 19.1718 +}
 19.1719 +
 19.1720 +void public_fREe(Void_t* m) {
 19.1721 +  if (MALLOC_PREACTION != 0) {
 19.1722 +    return;
 19.1723 +  }
 19.1724 +  fREe(m);
 19.1725 +  if (MALLOC_POSTACTION != 0) {
 19.1726 +  }
 19.1727 +}
 19.1728 +
 19.1729 +Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
 19.1730 +  if (MALLOC_PREACTION != 0) {
 19.1731 +    return 0;
 19.1732 +  }
 19.1733 +  m = rEALLOc(m, bytes);
 19.1734 +  if (MALLOC_POSTACTION != 0) {
 19.1735 +  }
 19.1736 +  return m;
 19.1737 +}
 19.1738 +
 19.1739 +Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
 19.1740 +  Void_t* m;
 19.1741 +  if (MALLOC_PREACTION != 0) {
 19.1742 +    return 0;
 19.1743 +  }
 19.1744 +  m = mEMALIGn(alignment, bytes);
 19.1745 +  if (MALLOC_POSTACTION != 0) {
 19.1746 +  }
 19.1747 +  return m;
 19.1748 +}
 19.1749 +
 19.1750 +Void_t* public_vALLOc(size_t bytes) {
 19.1751 +  Void_t* m;
 19.1752 +  if (MALLOC_PREACTION != 0) {
 19.1753 +    return 0;
 19.1754 +  }
 19.1755 +  m = vALLOc(bytes);
 19.1756 +  if (MALLOC_POSTACTION != 0) {
 19.1757 +  }
 19.1758 +  return m;
 19.1759 +}
 19.1760 +
 19.1761 +Void_t* public_pVALLOc(size_t bytes) {
 19.1762 +  Void_t* m;
 19.1763 +  if (MALLOC_PREACTION != 0) {
 19.1764 +    return 0;
 19.1765 +  }
 19.1766 +  m = pVALLOc(bytes);
 19.1767 +  if (MALLOC_POSTACTION != 0) {
 19.1768 +  }
 19.1769 +  return m;
 19.1770 +}
 19.1771 +
 19.1772 +Void_t* public_cALLOc(size_t n, size_t elem_size) {
 19.1773 +  Void_t* m;
 19.1774 +  if (MALLOC_PREACTION != 0) {
 19.1775 +    return 0;
 19.1776 +  }
 19.1777 +  m = cALLOc(n, elem_size);
 19.1778 +  if (MALLOC_POSTACTION != 0) {
 19.1779 +  }
 19.1780 +  return m;
 19.1781 +}
 19.1782 +
 19.1783 +
 19.1784 +Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) {
 19.1785 +  Void_t** m;
 19.1786 +  if (MALLOC_PREACTION != 0) {
 19.1787 +    return 0;
 19.1788 +  }
 19.1789 +  m = iCALLOc(n, elem_size, chunks);
 19.1790 +  if (MALLOC_POSTACTION != 0) {
 19.1791 +  }
 19.1792 +  return m;
 19.1793 +}
 19.1794 +
 19.1795 +Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) {
 19.1796 +  Void_t** m;
 19.1797 +  if (MALLOC_PREACTION != 0) {
 19.1798 +    return 0;
 19.1799 +  }
 19.1800 +  m = iCOMALLOc(n, sizes, chunks);
 19.1801 +  if (MALLOC_POSTACTION != 0) {
 19.1802 +  }
 19.1803 +  return m;
 19.1804 +}
 19.1805 +
 19.1806 +void public_cFREe(Void_t* m) {
 19.1807 +  if (MALLOC_PREACTION != 0) {
 19.1808 +    return;
 19.1809 +  }
 19.1810 +  cFREe(m);
 19.1811 +  if (MALLOC_POSTACTION != 0) {
 19.1812 +  }
 19.1813 +}
 19.1814 +
 19.1815 +int public_mTRIm(size_t s) {
 19.1816 +  int result;
 19.1817 +  if (MALLOC_PREACTION != 0) {
 19.1818 +    return 0;
 19.1819 +  }
 19.1820 +  result = mTRIm(s);
 19.1821 +  if (MALLOC_POSTACTION != 0) {
 19.1822 +  }
 19.1823 +  return result;
 19.1824 +}
 19.1825 +
 19.1826 +size_t public_mUSABLe(Void_t* m) {
 19.1827 +  size_t result;
 19.1828 +  if (MALLOC_PREACTION != 0) {
 19.1829 +    return 0;
 19.1830 +  }
 19.1831 +  result = mUSABLe(m);
 19.1832 +  if (MALLOC_POSTACTION != 0) {
 19.1833 +  }
 19.1834 +  return result;
 19.1835 +}
 19.1836 +
 19.1837 +void public_mSTATs() {
 19.1838 +  if (MALLOC_PREACTION != 0) {
 19.1839 +    return;
 19.1840 +  }
 19.1841 +  mSTATs();
 19.1842 +  if (MALLOC_POSTACTION != 0) {
 19.1843 +  }
 19.1844 +}
 19.1845 +
 19.1846 +struct mallinfo public_mALLINFo() {
 19.1847 +  struct mallinfo m;
 19.1848 +  if (MALLOC_PREACTION != 0) {
 19.1849 +    struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
 19.1850 +    return nm;
 19.1851 +  }
 19.1852 +  m = mALLINFo();
 19.1853 +  if (MALLOC_POSTACTION != 0) {
 19.1854 +  }
 19.1855 +  return m;
 19.1856 +}
 19.1857 +
 19.1858 +int public_mALLOPt(int p, int v) {
 19.1859 +  int result;
 19.1860 +  if (MALLOC_PREACTION != 0) {
 19.1861 +    return 0;
 19.1862 +  }
 19.1863 +  result = mALLOPt(p, v);
 19.1864 +  if (MALLOC_POSTACTION != 0) {
 19.1865 +  }
 19.1866 +  return result;
 19.1867 +}
 19.1868 +
 19.1869 +#endif
 19.1870 +
 19.1871 +
 19.1872 +
 19.1873 +/* ------------- Optional versions of memcopy ---------------- */
 19.1874 +
 19.1875 +
 19.1876 +#if USE_MEMCPY
 19.1877 +
 19.1878 +/* 
 19.1879 +  Note: memcpy is ONLY invoked with non-overlapping regions,
 19.1880 +  so the (usually slower) memmove is not needed.
 19.1881 +*/
 19.1882 +
 19.1883 +#define MALLOC_COPY(dest, src, nbytes)  memcpy(dest, src, nbytes)
 19.1884 +#define MALLOC_ZERO(dest, nbytes)       memset(dest, 0,   nbytes)
 19.1885 +
 19.1886 +#else /* !USE_MEMCPY */
 19.1887 +
 19.1888 +/* Use Duff's device for good zeroing/copying performance. */
 19.1889 +
 19.1890 +#define MALLOC_ZERO(charp, nbytes)                                            \
 19.1891 +do {                                                                          \
 19.1892 +  INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
 19.1893 +  CHUNK_SIZE_T  mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                     \
 19.1894 +  long mcn;                                                                   \
 19.1895 +  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
 19.1896 +  switch (mctmp) {                                                            \
 19.1897 +    case 0: for(;;) { *mzp++ = 0;                                             \
 19.1898 +    case 7:           *mzp++ = 0;                                             \
 19.1899 +    case 6:           *mzp++ = 0;                                             \
 19.1900 +    case 5:           *mzp++ = 0;                                             \
 19.1901 +    case 4:           *mzp++ = 0;                                             \
 19.1902 +    case 3:           *mzp++ = 0;                                             \
 19.1903 +    case 2:           *mzp++ = 0;                                             \
 19.1904 +    case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
 19.1905 +  }                                                                           \
 19.1906 +} while(0)
 19.1907 +
 19.1908 +#define MALLOC_COPY(dest,src,nbytes)                                          \
 19.1909 +do {                                                                          \
 19.1910 +  INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
 19.1911 +  INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
 19.1912 +  CHUNK_SIZE_T  mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T);                     \
 19.1913 +  long mcn;                                                                   \
 19.1914 +  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
 19.1915 +  switch (mctmp) {                                                            \
 19.1916 +    case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
 19.1917 +    case 7:           *mcdst++ = *mcsrc++;                                    \
 19.1918 +    case 6:           *mcdst++ = *mcsrc++;                                    \
 19.1919 +    case 5:           *mcdst++ = *mcsrc++;                                    \
 19.1920 +    case 4:           *mcdst++ = *mcsrc++;                                    \
 19.1921 +    case 3:           *mcdst++ = *mcsrc++;                                    \
 19.1922 +    case 2:           *mcdst++ = *mcsrc++;                                    \
 19.1923 +    case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
 19.1924 +  }                                                                           \
 19.1925 +} while(0)
 19.1926 +
 19.1927 +#endif
 19.1928 +
 19.1929 +/* ------------------ MMAP support ------------------  */
 19.1930 +
 19.1931 +
 19.1932 +#if HAVE_MMAP
 19.1933 +
 19.1934 +#ifndef LACKS_FCNTL_H
 19.1935 +#include <fcntl.h>
 19.1936 +#endif
 19.1937 +
 19.1938 +#ifndef LACKS_SYS_MMAN_H
 19.1939 +#include <sys/mman.h>
 19.1940 +#endif
 19.1941 +
 19.1942 +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
 19.1943 +#define MAP_ANONYMOUS MAP_ANON
 19.1944 +#endif
 19.1945 +
 19.1946 +/* 
 19.1947 +   Nearly all versions of mmap support MAP_ANONYMOUS, 
 19.1948 +   so the following is unlikely to be needed, but is
 19.1949 +   supplied just in case.
 19.1950 +*/
 19.1951 +
 19.1952 +#ifndef MAP_ANONYMOUS
 19.1953 +
 19.1954 +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
 19.1955 +
 19.1956 +#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
 19.1957 + (dev_zero_fd = open("/dev/zero", O_RDWR), \
 19.1958 +  mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
 19.1959 +   mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
 19.1960 +
 19.1961 +#else
 19.1962 +
 19.1963 +#define MMAP(addr, size, prot, flags) \
 19.1964 + (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
 19.1965 +
 19.1966 +#endif
 19.1967 +
 19.1968 +
 19.1969 +#endif /* HAVE_MMAP */
 19.1970 +
 19.1971 +
 19.1972 +/*
 19.1973 +  -----------------------  Chunk representations -----------------------
 19.1974 +*/
 19.1975 +
 19.1976 +
 19.1977 +/*
 19.1978 +  This struct declaration is misleading (but accurate and necessary).
 19.1979 +  It declares a "view" into memory allowing access to necessary
 19.1980 +  fields at known offsets from a given base. See explanation below.
 19.1981 +*/
 19.1982 +
 19.1983 +struct malloc_chunk {
 19.1984 +
 19.1985 +  INTERNAL_SIZE_T      prev_size;  /* Size of previous chunk (if free).  */
 19.1986 +  INTERNAL_SIZE_T      size;       /* Size in bytes, including overhead. */
 19.1987 +
 19.1988 +  struct malloc_chunk* fd;         /* double links -- used only if free. */
 19.1989 +  struct malloc_chunk* bk;
 19.1990 +};
 19.1991 +
 19.1992 +
 19.1993 +typedef struct malloc_chunk* mchunkptr;
 19.1994 +
 19.1995 +/*
 19.1996 +   malloc_chunk details:
 19.1997 +
 19.1998 +    (The following includes lightly edited explanations by Colin Plumb.)
 19.1999 +
 19.2000 +    Chunks of memory are maintained using a `boundary tag' method as
 19.2001 +    described in e.g., Knuth or Standish.  (See the paper by Paul
 19.2002 +    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
 19.2003 +    survey of such techniques.)  Sizes of free chunks are stored both
 19.2004 +    in the front of each chunk and at the end.  This makes
 19.2005 +    consolidating fragmented chunks into bigger chunks very fast.  The
 19.2006 +    size fields also hold bits representing whether chunks are free or
 19.2007 +    in use.
 19.2008 +
 19.2009 +    An allocated chunk looks like this:
 19.2010 +
 19.2011 +
 19.2012 +    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2013 +            |             Size of previous chunk, if allocated            | |
 19.2014 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2015 +            |             Size of chunk, in bytes                         |P|
 19.2016 +      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2017 +            |             User data starts here...                          .
 19.2018 +            .                                                               .
 19.2019 +            .             (malloc_usable_space() bytes)                     .
 19.2020 +            .                                                               |
 19.2021 +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2022 +            |             Size of chunk                                     |
 19.2023 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2024 +
 19.2025 +
 19.2026 +    Where "chunk" is the front of the chunk for the purpose of most of
 19.2027 +    the malloc code, but "mem" is the pointer that is returned to the
 19.2028 +    user.  "Nextchunk" is the beginning of the next contiguous chunk.
 19.2029 +
 19.2030 +    Chunks always begin on even word boundries, so the mem portion
 19.2031 +    (which is returned to the user) is also on an even word boundary, and
 19.2032 +    thus at least double-word aligned.
 19.2033 +
 19.2034 +    Free chunks are stored in circular doubly-linked lists, and look like this:
 19.2035 +
 19.2036 +    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2037 +            |             Size of previous chunk                            |
 19.2038 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2039 +    `head:' |             Size of chunk, in bytes                         |P|
 19.2040 +      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2041 +            |             Forward pointer to next chunk in list             |
 19.2042 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2043 +            |             Back pointer to previous chunk in list            |
 19.2044 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2045 +            |             Unused space (may be 0 bytes long)                .
 19.2046 +            .                                                               .
 19.2047 +            .                                                               |
 19.2048 +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2049 +    `foot:' |             Size of chunk, in bytes                           |
 19.2050 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 19.2051 +
 19.2052 +    The P (PREV_INUSE) bit, stored in the unused low-order bit of the
 19.2053 +    chunk size (which is always a multiple of two words), is an in-use
 19.2054 +    bit for the *previous* chunk.  If that bit is *clear*, then the
 19.2055 +    word before the current chunk size contains the previous chunk
 19.2056 +    size, and can be used to find the front of the previous chunk.
 19.2057 +    The very first chunk allocated always has this bit set,
 19.2058 +    preventing access to non-existent (or non-owned) memory. If
 19.2059 +    prev_inuse is set for any given chunk, then you CANNOT determine
 19.2060 +    the size of the previous chunk, and might even get a memory
 19.2061 +    addressing fault when trying to do so.
 19.2062 +
 19.2063 +    Note that the `foot' of the current chunk is actually represented
 19.2064 +    as the prev_size of the NEXT chunk. This makes it easier to
 19.2065 +    deal with alignments etc but can be very confusing when trying
 19.2066 +    to extend or adapt this code.
 19.2067 +
 19.2068 +    The two exceptions to all this are
 19.2069 +
 19.2070 +     1. The special chunk `top' doesn't bother using the
 19.2071 +        trailing size field since there is no next contiguous chunk
 19.2072 +        that would have to index off it. After initialization, `top'
 19.2073 +        is forced to always exist.  If it would become less than
 19.2074 +        MINSIZE bytes long, it is replenished.
 19.2075 +
 19.2076 +     2. Chunks allocated via mmap, which have the second-lowest-order
 19.2077 +        bit (IS_MMAPPED) set in their size fields.  Because they are
 19.2078 +        allocated one-by-one, each must contain its own trailing size field.
 19.2079 +
 19.2080 +*/
 19.2081 +
 19.2082 +/*
 19.2083 +  ---------- Size and alignment checks and conversions ----------
 19.2084 +*/
 19.2085 +
 19.2086 +/* conversion from malloc headers to user pointers, and back */
 19.2087 +
 19.2088 +#define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
 19.2089 +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
 19.2090 +
 19.2091 +/* The smallest possible chunk */
 19.2092 +#define MIN_CHUNK_SIZE        (sizeof(struct malloc_chunk))
 19.2093 +
 19.2094 +/* The smallest size we can malloc is an aligned minimal chunk */
 19.2095 +
 19.2096 +#define MINSIZE  \
 19.2097 +  (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
 19.2098 +
 19.2099 +/* Check if m has acceptable alignment */
 19.2100 +
 19.2101 +#define aligned_OK(m)  (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0)
 19.2102 +
 19.2103 +
 19.2104 +/* 
 19.2105 +   Check if a request is so large that it would wrap around zero when
 19.2106 +   padded and aligned. To simplify some other code, the bound is made
 19.2107 +   low enough so that adding MINSIZE will also not wrap around sero.
 19.2108 +*/
 19.2109 +
 19.2110 +#define REQUEST_OUT_OF_RANGE(req)                                 \
 19.2111 +  ((CHUNK_SIZE_T)(req) >=                                        \
 19.2112 +   (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-2 * MINSIZE))    
 19.2113 +
 19.2114 +/* pad request bytes into a usable size -- internal version */
 19.2115 +
 19.2116 +#define request2size(req)                                         \
 19.2117 +  (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?             \
 19.2118 +   MINSIZE :                                                      \
 19.2119 +   ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
 19.2120 +
 19.2121 +/*  Same, except also perform argument check */
 19.2122 +
 19.2123 +#define checked_request2size(req, sz)                             \
 19.2124 +  if (REQUEST_OUT_OF_RANGE(req)) {                                \
 19.2125 +    MALLOC_FAILURE_ACTION;                                        \
 19.2126 +    return 0;                                                     \
 19.2127 +  }                                                               \
 19.2128 +  (sz) = request2size(req);                                              
 19.2129 +
 19.2130 +/*
 19.2131 +  --------------- Physical chunk operations ---------------
 19.2132 +*/
 19.2133 +
 19.2134 +
 19.2135 +/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
 19.2136 +#define PREV_INUSE 0x1
 19.2137 +
 19.2138 +/* extract inuse bit of previous chunk */
 19.2139 +#define prev_inuse(p)       ((p)->size & PREV_INUSE)
 19.2140 +
 19.2141 +
 19.2142 +/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
 19.2143 +#define IS_MMAPPED 0x2
 19.2144 +
 19.2145 +/* check for mmap()'ed chunk */
 19.2146 +#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
 19.2147 +
 19.2148 +/* 
 19.2149 +  Bits to mask off when extracting size 
 19.2150 +
 19.2151 +  Note: IS_MMAPPED is intentionally not masked off from size field in
 19.2152 +  macros for which mmapped chunks should never be seen. This should
 19.2153 +  cause helpful core dumps to occur if it is tried by accident by
 19.2154 +  people extending or adapting this malloc.
 19.2155 +*/
 19.2156 +#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
 19.2157 +
 19.2158 +/* Get size, ignoring use bits */
 19.2159 +#define chunksize(p)         ((p)->size & ~(SIZE_BITS))
 19.2160 +
 19.2161 +
 19.2162 +/* Ptr to next physical malloc_chunk. */
 19.2163 +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
 19.2164 +
 19.2165 +/* Ptr to previous physical malloc_chunk */
 19.2166 +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
 19.2167 +
 19.2168 +/* Treat space at ptr + offset as a chunk */
 19.2169 +#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
 19.2170 +
 19.2171 +/* extract p's inuse bit */
 19.2172 +#define inuse(p)\
 19.2173 +((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
 19.2174 +
 19.2175 +/* set/clear chunk as being inuse without otherwise disturbing */
 19.2176 +#define set_inuse(p)\
 19.2177 +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
 19.2178 +
 19.2179 +#define clear_inuse(p)\
 19.2180 +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
 19.2181 +
 19.2182 +
 19.2183 +/* check/set/clear inuse bits in known places */
 19.2184 +#define inuse_bit_at_offset(p, s)\
 19.2185 + (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
 19.2186 +
 19.2187 +#define set_inuse_bit_at_offset(p, s)\
 19.2188 + (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
 19.2189 +
 19.2190 +#define clear_inuse_bit_at_offset(p, s)\
 19.2191 + (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
 19.2192 +
 19.2193 +
 19.2194 +/* Set size at head, without disturbing its use bit */
 19.2195 +#define set_head_size(p, s)  ((p)->size = (((p)->size & PREV_INUSE) | (s)))
 19.2196 +
 19.2197 +/* Set size/use field */
 19.2198 +#define set_head(p, s)       ((p)->size = (s))
 19.2199 +
 19.2200 +/* Set size at footer (only when chunk is not in use) */
 19.2201 +#define set_foot(p, s)       (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
 19.2202 +
 19.2203 +
 19.2204 +/*
 19.2205 +  -------------------- Internal data structures --------------------
 19.2206 +
 19.2207 +   All internal state is held in an instance of malloc_state defined
 19.2208 +   below. There are no other static variables, except in two optional
 19.2209 +   cases: 
 19.2210 +   * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. 
 19.2211 +   * If HAVE_MMAP is true, but mmap doesn't support
 19.2212 +     MAP_ANONYMOUS, a dummy file descriptor for mmap.
 19.2213 +
 19.2214 +   Beware of lots of tricks that minimize the total bookkeeping space
 19.2215 +   requirements. The result is a little over 1K bytes (for 4byte
 19.2216 +   pointers and size_t.)
 19.2217 +*/
 19.2218 +
 19.2219 +/*
 19.2220 +  Bins
 19.2221 +
 19.2222 +    An array of bin headers for free chunks. Each bin is doubly
 19.2223 +    linked.  The bins are approximately proportionally (log) spaced.
 19.2224 +    There are a lot of these bins (128). This may look excessive, but
 19.2225 +    works very well in practice.  Most bins hold sizes that are
 19.2226 +    unusual as malloc request sizes, but are more usual for fragments
 19.2227 +    and consolidated sets of chunks, which is what these bins hold, so
 19.2228 +    they can be found quickly.  All procedures maintain the invariant
 19.2229 +    that no consolidated chunk physically borders another one, so each
 19.2230 +    chunk in a list is known to be preceeded and followed by either
 19.2231 +    inuse chunks or the ends of memory.
 19.2232 +
 19.2233 +    Chunks in bins are kept in size order, with ties going to the
 19.2234 +    approximately least recently used chunk. Ordering isn't needed
 19.2235 +    for the small bins, which all contain the same-sized chunks, but
 19.2236 +    facilitates best-fit allocation for larger chunks. These lists
 19.2237 +    are just sequential. Keeping them in order almost never requires
 19.2238 +    enough traversal to warrant using fancier ordered data
 19.2239 +    structures.  
 19.2240 +
 19.2241 +    Chunks of the same size are linked with the most
 19.2242 +    recently freed at the front, and allocations are taken from the
 19.2243 +    back.  This results in LRU (FIFO) allocation order, which tends
 19.2244 +    to give each chunk an equal opportunity to be consolidated with
 19.2245 +    adjacent freed chunks, resulting in larger free chunks and less
 19.2246 +    fragmentation.
 19.2247 +
 19.2248 +    To simplify use in double-linked lists, each bin header acts
 19.2249 +    as a malloc_chunk. This avoids special-casing for headers.
 19.2250 +    But to conserve space and improve locality, we allocate
 19.2251 +    only the fd/bk pointers of bins, and then use repositioning tricks
 19.2252 +    to treat these as the fields of a malloc_chunk*.  
 19.2253 +*/
 19.2254 +
 19.2255 +typedef struct malloc_chunk* mbinptr;
 19.2256 +
 19.2257 +/* addressing -- note that bin_at(0) does not exist */
 19.2258 +#define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
 19.2259 +
 19.2260 +/* analog of ++bin */
 19.2261 +#define next_bin(b)  ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
 19.2262 +
 19.2263 +/* Reminders about list directionality within bins */
 19.2264 +#define first(b)     ((b)->fd)
 19.2265 +#define last(b)      ((b)->bk)
 19.2266 +
 19.2267 +/* Take a chunk off a bin list */
 19.2268 +#define unlink(P, BK, FD) {                                            \
 19.2269 +  FD = P->fd;                                                          \
 19.2270 +  BK = P->bk;                                                          \
 19.2271 +  FD->bk = BK;                                                         \
 19.2272 +  BK->fd = FD;                                                         \
 19.2273 +}
 19.2274 +
 19.2275 +/*
 19.2276 +  Indexing
 19.2277 +
 19.2278 +    Bins for sizes < 512 bytes contain chunks of all the same size, spaced
 19.2279 +    8 bytes apart. Larger bins are approximately logarithmically spaced:
 19.2280 +
 19.2281 +    64 bins of size       8
 19.2282 +    32 bins of size      64
 19.2283 +    16 bins of size     512
 19.2284 +     8 bins of size    4096
 19.2285 +     4 bins of size   32768
 19.2286 +     2 bins of size  262144
 19.2287 +     1 bin  of size what's left
 19.2288 +
 19.2289 +    The bins top out around 1MB because we expect to service large
 19.2290 +    requests via mmap.
 19.2291 +*/
 19.2292 +
 19.2293 +#define NBINS              96
 19.2294 +#define NSMALLBINS         32
 19.2295 +#define SMALLBIN_WIDTH      8
 19.2296 +#define MIN_LARGE_SIZE    256
 19.2297 +
 19.2298 +#define in_smallbin_range(sz)  \
 19.2299 +  ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE)
 19.2300 +
 19.2301 +#define smallbin_index(sz)     (((unsigned)(sz)) >> 3)
 19.2302 +
 19.2303 +/*
 19.2304 +  Compute index for size. We expect this to be inlined when
 19.2305 +  compiled with optimization, else not, which works out well.
 19.2306 +*/
 19.2307 +static int largebin_index(unsigned int sz) {
 19.2308 +  unsigned int  x = sz >> SMALLBIN_WIDTH; 
 19.2309 +  unsigned int m;            /* bit position of highest set bit of m */
 19.2310 +
 19.2311 +  if (x >= 0x10000) return NBINS-1;
 19.2312 +
 19.2313 +  /* On intel, use BSRL instruction to find highest bit */
 19.2314 +#if defined(__GNUC__) && defined(i386)
 19.2315 +
 19.2316 +  __asm__("bsrl %1,%0\n\t"
 19.2317 +          : "=r" (m) 
 19.2318 +          : "g"  (x));
 19.2319 +
 19.2320 +#else
 19.2321 +  {
 19.2322 +    /*
 19.2323 +      Based on branch-free nlz algorithm in chapter 5 of Henry
 19.2324 +      S. Warren Jr's book "Hacker's Delight".
 19.2325 +    */
 19.2326 +
 19.2327 +    unsigned int n = ((x - 0x100) >> 16) & 8;
 19.2328 +    x <<= n; 
 19.2329 +    m = ((x - 0x1000) >> 16) & 4;
 19.2330 +    n += m; 
 19.2331 +    x <<= m; 
 19.2332 +    m = ((x - 0x4000) >> 16) & 2;
 19.2333 +    n += m; 
 19.2334 +    x = (x << m) >> 14;
 19.2335 +    m = 13 - n + (x & ~(x>>1));
 19.2336 +  }
 19.2337 +#endif
 19.2338 +
 19.2339 +  /* Use next 2 bits to create finer-granularity bins */
 19.2340 +  return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
 19.2341 +}
 19.2342 +
 19.2343 +#define bin_index(sz) \
 19.2344 + ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
 19.2345 +
 19.2346 +/*
 19.2347 +  FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
 19.2348 +  first bin that is maintained in sorted order. This must
 19.2349 +  be the smallest size corresponding to a given bin.
 19.2350 +
 19.2351 +  Normally, this should be MIN_LARGE_SIZE. But you can weaken
 19.2352 +  best fit guarantees to sometimes speed up malloc by increasing value.
 19.2353 +  Doing this means that malloc may choose a chunk that is 
 19.2354 +  non-best-fitting by up to the width of the bin.
 19.2355 +
 19.2356 +  Some useful cutoff values:
 19.2357 +      512 - all bins sorted
 19.2358 +     2560 - leaves bins <=     64 bytes wide unsorted  
 19.2359 +    12288 - leaves bins <=    512 bytes wide unsorted
 19.2360 +    65536 - leaves bins <=   4096 bytes wide unsorted
 19.2361 +   262144 - leaves bins <=  32768 bytes wide unsorted
 19.2362 +       -1 - no bins sorted (not recommended!)
 19.2363 +*/
 19.2364 +
 19.2365 +#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE 
 19.2366 +/* #define FIRST_SORTED_BIN_SIZE 65536 */
 19.2367 +
 19.2368 +/*
 19.2369 +  Unsorted chunks
 19.2370 +
 19.2371 +    All remainders from chunk splits, as well as all returned chunks,
 19.2372 +    are first placed in the "unsorted" bin. They are then placed
 19.2373 +    in regular bins after malloc gives them ONE chance to be used before
 19.2374 +    binning. So, basically, the unsorted_chunks list acts as a queue,
 19.2375 +    with chunks being placed on it in free (and malloc_consolidate),
 19.2376 +    and taken off (to be either used or placed in bins) in malloc.
 19.2377 +*/
 19.2378 +
 19.2379 +/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
 19.2380 +#define unsorted_chunks(M)          (bin_at(M, 1))
 19.2381 +
 19.2382 +/*
 19.2383 +  Top
 19.2384 +
 19.2385 +    The top-most available chunk (i.e., the one bordering the end of
 19.2386 +    available memory) is treated specially. It is never included in
 19.2387 +    any bin, is used only if no other chunk is available, and is
 19.2388 +    released back to the system if it is very large (see
 19.2389 +    M_TRIM_THRESHOLD).  Because top initially
 19.2390 +    points to its own bin with initial zero size, thus forcing
 19.2391 +    extension on the first malloc request, we avoid having any special
 19.2392 +    code in malloc to check whether it even exists yet. But we still
 19.2393 +    need to do so when getting memory from system, so we make
 19.2394 +    initial_top treat the bin as a legal but unusable chunk during the
 19.2395 +    interval between initialization and the first call to
 19.2396 +    sYSMALLOc. (This is somewhat delicate, since it relies on
 19.2397 +    the 2 preceding words to be zero during this interval as well.)
 19.2398 +*/
 19.2399 +
 19.2400 +/* Conveniently, the unsorted bin can be used as dummy top on first call */
 19.2401 +#define initial_top(M)              (unsorted_chunks(M))
 19.2402 +
 19.2403 +/*
 19.2404 +  Binmap
 19.2405 +
 19.2406 +    To help compensate for the large number of bins, a one-level index
 19.2407 +    structure is used for bin-by-bin searching.  `binmap' is a
 19.2408 +    bitvector recording whether bins are definitely empty so they can
 19.2409 +    be skipped over during during traversals.  The bits are NOT always
 19.2410 +    cleared as soon as bins are empty, but instead only
 19.2411 +    when they are noticed to be empty during traversal in malloc.
 19.2412 +*/
 19.2413 +
 19.2414 +/* Conservatively use 32 bits per map word, even if on 64bit system */
 19.2415 +#define BINMAPSHIFT      5
 19.2416 +#define BITSPERMAP       (1U << BINMAPSHIFT)
 19.2417 +#define BINMAPSIZE       (NBINS / BITSPERMAP)
 19.2418 +
 19.2419 +#define idx2block(i)     ((i) >> BINMAPSHIFT)
 19.2420 +#define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
 19.2421 +
 19.2422 +#define mark_bin(m,i)    ((m)->binmap[idx2block(i)] |=  idx2bit(i))
 19.2423 +#define unmark_bin(m,i)  ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
 19.2424 +#define get_binmap(m,i)  ((m)->binmap[idx2block(i)] &   idx2bit(i))
 19.2425 +
 19.2426 +/*
 19.2427 +  Fastbins
 19.2428 +
 19.2429 +    An array of lists holding recently freed small chunks.  Fastbins
 19.2430 +    are not doubly linked.  It is faster to single-link them, and
 19.2431 +    since chunks are never removed from the middles of these lists,
 19.2432 +    double linking is not necessary. Also, unlike regular bins, they
 19.2433 +    are not even processed in FIFO order (they use faster LIFO) since
 19.2434 +    ordering doesn't much matter in the transient contexts in which
 19.2435 +    fastbins are normally used.
 19.2436 +
 19.2437 +    Chunks in fastbins keep their inuse bit set, so they cannot
 19.2438 +    be consolidated with other free chunks. malloc_consolidate
 19.2439 +    releases all chunks in fastbins and consolidates them with
 19.2440 +    other free chunks. 
 19.2441 +*/
 19.2442 +
 19.2443 +typedef struct malloc_chunk* mfastbinptr;
 19.2444 +
 19.2445 +/* offset 2 to use otherwise unindexable first 2 bins */
 19.2446 +#define fastbin_index(sz)        ((((unsigned int)(sz)) >> 3) - 2)
 19.2447 +
 19.2448 +/* The maximum fastbin request size we support */
 19.2449 +#define MAX_FAST_SIZE     80
 19.2450 +
 19.2451 +#define NFASTBINS  (fastbin_index(request2size(MAX_FAST_SIZE))+1)
 19.2452 +
 19.2453 +/*
 19.2454 +  FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
 19.2455 +  that triggers automatic consolidation of possibly-surrounding
 19.2456 +  fastbin chunks. This is a heuristic, so the exact value should not
 19.2457 +  matter too much. It is defined at half the default trim threshold as a
 19.2458 +  compromise heuristic to only attempt consolidation if it is likely
 19.2459 +  to lead to trimming. However, it is not dynamically tunable, since
 19.2460 +  consolidation reduces fragmentation surrounding loarge chunks even 
 19.2461 +  if trimming is not used.
 19.2462 +*/
 19.2463 +
 19.2464 +#define FASTBIN_CONSOLIDATION_THRESHOLD  \
 19.2465 +  ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
 19.2466 +
 19.2467 +/*
 19.2468 +  Since the lowest 2 bits in max_fast don't matter in size comparisons, 
 19.2469 +  they are used as flags.
 19.2470 +*/
 19.2471 +
 19.2472 +/*
 19.2473 +  ANYCHUNKS_BIT held in max_fast indicates that there may be any
 19.2474 +  freed chunks at all. It is set true when entering a chunk into any
 19.2475 +  bin.
 19.2476 +*/
 19.2477 +
 19.2478 +#define ANYCHUNKS_BIT        (1U)
 19.2479 +
 19.2480 +#define have_anychunks(M)     (((M)->max_fast &  ANYCHUNKS_BIT))
 19.2481 +#define set_anychunks(M)      ((M)->max_fast |=  ANYCHUNKS_BIT)
 19.2482 +#define clear_anychunks(M)    ((M)->max_fast &= ~ANYCHUNKS_BIT)
 19.2483 +
 19.2484 +/*
 19.2485 +  FASTCHUNKS_BIT held in max_fast indicates that there are probably
 19.2486 +  some fastbin chunks. It is set true on entering a chunk into any
 19.2487 +  fastbin, and cleared only in malloc_consolidate.
 19.2488 +*/
 19.2489 +
 19.2490 +#define FASTCHUNKS_BIT        (2U)
 19.2491 +
 19.2492 +#define have_fastchunks(M)   (((M)->max_fast &  FASTCHUNKS_BIT))
 19.2493 +#define set_fastchunks(M)    ((M)->max_fast |=  (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
 19.2494 +#define clear_fastchunks(M)  ((M)->max_fast &= ~(FASTCHUNKS_BIT))
 19.2495 +
 19.2496 +/* 
 19.2497 +   Set value of max_fast. 
 19.2498 +   Use impossibly small value if 0.
 19.2499 +*/
 19.2500 +
 19.2501 +#define set_max_fast(M, s) \
 19.2502 +  (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
 19.2503 +  ((M)->max_fast &  (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
 19.2504 +
 19.2505 +#define get_max_fast(M) \
 19.2506 +  ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
 19.2507 +
 19.2508 +
 19.2509 +/*
 19.2510 +  morecore_properties is a status word holding dynamically discovered
 19.2511 +  or controlled properties of the morecore function
 19.2512 +*/
 19.2513 +
 19.2514 +#define MORECORE_CONTIGUOUS_BIT  (1U)
 19.2515 +
 19.2516 +#define contiguous(M) \
 19.2517 +        (((M)->morecore_properties &  MORECORE_CONTIGUOUS_BIT))
 19.2518 +#define noncontiguous(M) \
 19.2519 +        (((M)->morecore_properties &  MORECORE_CONTIGUOUS_BIT) == 0)
 19.2520 +#define set_contiguous(M) \
 19.2521 +        ((M)->morecore_properties |=  MORECORE_CONTIGUOUS_BIT)
 19.2522 +#define set_noncontiguous(M) \
 19.2523 +        ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
 19.2524 +
 19.2525 +
 19.2526 +/*
 19.2527 +   ----------- Internal state representation and initialization -----------
 19.2528 +*/
 19.2529 +
 19.2530 +struct malloc_state {
 19.2531 +
 19.2532 +  /* The maximum chunk size to be eligible for fastbin */
 19.2533 +  INTERNAL_SIZE_T  max_fast;   /* low 2 bits used as flags */
 19.2534 +
 19.2535 +  /* Fastbins */
 19.2536 +  mfastbinptr      fastbins[NFASTBINS];
 19.2537 +
 19.2538 +  /* Base of the topmost chunk -- not otherwise kept in a bin */
 19.2539 +  mchunkptr        top;
 19.2540 +
 19.2541 +  /* The remainder from the most recent split of a small request */
 19.2542 +  mchunkptr        last_remainder;
 19.2543 +
 19.2544 +  /* Normal bins packed as described above */
 19.2545 +  mchunkptr        bins[NBINS * 2];
 19.2546 +
 19.2547 +  /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
 19.2548 +  unsigned int     binmap[BINMAPSIZE+1];
 19.2549 +
 19.2550 +  /* Tunable parameters */
 19.2551 +  CHUNK_SIZE_T     trim_threshold;
 19.2552 +  INTERNAL_SIZE_T  top_pad;
 19.2553 +  INTERNAL_SIZE_T  mmap_threshold;
 19.2554 +
 19.2555 +  /* Memory map support */
 19.2556 +  int              n_mmaps;
 19.2557 +  int              n_mmaps_max;
 19.2558 +  int              max_n_mmaps;
 19.2559 +
 19.2560 +  /* Cache malloc_getpagesize */
 19.2561 +  unsigned int     pagesize;    
 19.2562 +
 19.2563 +  /* Track properties of MORECORE */
 19.2564 +  unsigned int     morecore_properties;
 19.2565 +
 19.2566 +  /* Statistics */
 19.2567 +  INTERNAL_SIZE_T  mmapped_mem;
 19.2568 +  INTERNAL_SIZE_T  sbrked_mem;
 19.2569 +  INTERNAL_SIZE_T  max_sbrked_mem;
 19.2570 +  INTERNAL_SIZE_T  max_mmapped_mem;
 19.2571 +  INTERNAL_SIZE_T  max_total_mem;
 19.2572 +};
 19.2573 +
 19.2574 +typedef struct malloc_state *mstate;
 19.2575 +
 19.2576 +/* 
 19.2577 +   There is exactly one instance of this struct in this malloc.
 19.2578 +   If you are adapting this malloc in a way that does NOT use a static
 19.2579 +   malloc_state, you MUST explicitly zero-fill it before using. This
 19.2580 +   malloc relies on the property that malloc_state is initialized to
 19.2581 +   all zeroes (as is true of C statics).
 19.2582 +*/
 19.2583 +
 19.2584 +static struct malloc_state av_;  /* never directly referenced */
 19.2585 +
 19.2586 +/*
 19.2587 +   All uses of av_ are via get_malloc_state().
 19.2588 +   At most one "call" to get_malloc_state is made per invocation of
 19.2589 +   the public versions of malloc and free, but other routines
 19.2590 +   that in turn invoke malloc and/or free may call more then once. 
 19.2591 +   Also, it is called in check* routines if DEBUG is set.
 19.2592 +*/
 19.2593 +
 19.2594 +#define get_malloc_state() (&(av_))
 19.2595 +
 19.2596 +/*
 19.2597 +  Initialize a malloc_state struct.
 19.2598 +
 19.2599 +  This is called only from within malloc_consolidate, which needs
 19.2600 +  be called in the same contexts anyway.  It is never called directly
 19.2601 +  outside of malloc_consolidate because some optimizing compilers try
 19.2602 +  to inline it at all call points, which turns out not to be an
 19.2603 +  optimization at all. (Inlining it in malloc_consolidate is fine though.)
 19.2604 +*/
 19.2605 +
 19.2606 +#if __STD_C
 19.2607 +static void malloc_init_state(mstate av)
 19.2608 +#else
 19.2609 +static void malloc_init_state(av) mstate av;
 19.2610 +#endif
 19.2611 +{
 19.2612 +  int     i;
 19.2613 +  mbinptr bin;
 19.2614 +  
 19.2615 +  /* Establish circular links for normal bins */
 19.2616 +  for (i = 1; i < NBINS; ++i) { 
 19.2617 +    bin = bin_at(av,i);
 19.2618 +    bin->fd = bin->bk = bin;
 19.2619 +  }
 19.2620 +
 19.2621 +  av->top_pad        = DEFAULT_TOP_PAD;
 19.2622 +  av->n_mmaps_max    = DEFAULT_MMAP_MAX;
 19.2623 +  av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
 19.2624 +  av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
 19.2625 +
 19.2626 +#if MORECORE_CONTIGUOUS
 19.2627 +  set_contiguous(av);
 19.2628 +#else
 19.2629 +  set_noncontiguous(av);
 19.2630 +#endif
 19.2631 +
 19.2632 +
 19.2633 +  set_max_fast(av, DEFAULT_MXFAST);
 19.2634 +
 19.2635 +  av->top            = initial_top(av);
 19.2636 +  av->pagesize       = malloc_getpagesize;
 19.2637 +}
 19.2638 +
 19.2639 +/* 
 19.2640 +   Other internal utilities operating on mstates
 19.2641 +*/
 19.2642 +
 19.2643 +#if __STD_C
 19.2644 +static Void_t*  sYSMALLOc(INTERNAL_SIZE_T, mstate);
 19.2645 +static int      sYSTRIm(size_t, mstate);
 19.2646 +static void     malloc_consolidate(mstate);
 19.2647 +static Void_t** iALLOc(size_t, size_t*, int, Void_t**);
 19.2648 +#else
 19.2649 +static Void_t*  sYSMALLOc();
 19.2650 +static int      sYSTRIm();
 19.2651 +static void     malloc_consolidate();
 19.2652 +static Void_t** iALLOc();
 19.2653 +#endif
 19.2654 +
 19.2655 +/*
 19.2656 +  Debugging support
 19.2657 +
 19.2658 +  These routines make a number of assertions about the states
 19.2659 +  of data structures that should be true at all times. If any
 19.2660 +  are not true, it's very likely that a user program has somehow
 19.2661 +  trashed memory. (It's also possible that there is a coding error
 19.2662 +  in malloc. In which case, please report it!)
 19.2663 +*/
 19.2664 +
 19.2665 +#if ! DEBUG
 19.2666 +
 19.2667 +#define check_chunk(P)
 19.2668 +#define check_free_chunk(P)
 19.2669 +#define check_inuse_chunk(P)
 19.2670 +#define check_remalloced_chunk(P,N)
 19.2671 +#define check_malloced_chunk(P,N)
 19.2672 +#define check_malloc_state()
 19.2673 +
 19.2674 +#else
 19.2675 +#define check_chunk(P)              do_check_chunk(P)
 19.2676 +#define check_free_chunk(P)         do_check_free_chunk(P)
 19.2677 +#define check_inuse_chunk(P)        do_check_inuse_chunk(P)
 19.2678 +#define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N)
 19.2679 +#define check_malloced_chunk(P,N)   do_check_malloced_chunk(P,N)
 19.2680 +#define check_malloc_state()        do_check_malloc_state()
 19.2681 +
 19.2682 +/*
 19.2683 +  Properties of all chunks
 19.2684 +*/
 19.2685 +
 19.2686 +#if __STD_C
 19.2687 +static void do_check_chunk(mchunkptr p)
 19.2688 +#else
 19.2689 +static void do_check_chunk(p) mchunkptr p;
 19.2690 +#endif
 19.2691 +{
 19.2692 +  mstate av = get_malloc_state();
 19.2693 +  CHUNK_SIZE_T  sz = chunksize(p);
 19.2694 +  /* min and max possible addresses assuming contiguous allocation */
 19.2695 +  char* max_address = (char*)(av->top) + chunksize(av->top);
 19.2696 +  char* min_address = max_address - av->sbrked_mem;
 19.2697 +
 19.2698 +  if (!chunk_is_mmapped(p)) {
 19.2699 +    
 19.2700 +    /* Has legal address ... */
 19.2701 +    if (p != av->top) {
 19.2702 +      if (contiguous(av)) {
 19.2703 +        assert(((char*)p) >= min_address);
 19.2704 +        assert(((char*)p + sz) <= ((char*)(av->top)));
 19.2705 +      }
 19.2706 +    }
 19.2707 +    else {
 19.2708 +      /* top size is always at least MINSIZE */
 19.2709 +      assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
 19.2710 +      /* top predecessor always marked inuse */
 19.2711 +      assert(prev_inuse(p));
 19.2712 +    }
 19.2713 +      
 19.2714 +  }
 19.2715 +  else {
 19.2716 +#if HAVE_MMAP
 19.2717 +    /* address is outside main heap  */
 19.2718 +    if (contiguous(av) && av->top != initial_top(av)) {
 19.2719 +      assert(((char*)p) < min_address || ((char*)p) > max_address);
 19.2720 +    }
 19.2721 +    /* chunk is page-aligned */
 19.2722 +    assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
 19.2723 +    /* mem is aligned */
 19.2724 +    assert(aligned_OK(chunk2mem(p)));
 19.2725 +#else
 19.2726 +    /* force an appropriate assert violation if debug set */
 19.2727 +    assert(!chunk_is_mmapped(p));
 19.2728 +#endif
 19.2729 +  }
 19.2730 +}
 19.2731 +
 19.2732 +/*
 19.2733 +  Properties of free chunks
 19.2734 +*/
 19.2735 +
 19.2736 +#if __STD_C
 19.2737 +static void do_check_free_chunk(mchunkptr p)
 19.2738 +#else
 19.2739 +static void do_check_free_chunk(p) mchunkptr p;
 19.2740 +#endif
 19.2741 +{
 19.2742 +  mstate av = get_malloc_state();
 19.2743 +
 19.2744 +  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
 19.2745 +  mchunkptr next = chunk_at_offset(p, sz);
 19.2746 +
 19.2747 +  do_check_chunk(p);
 19.2748 +
 19.2749 +  /* Chunk must claim to be free ... */
 19.2750 +  assert(!inuse(p));
 19.2751 +  assert (!chunk_is_mmapped(p));
 19.2752 +
 19.2753 +  /* Unless a special marker, must have OK fields */
 19.2754 +  if ((CHUNK_SIZE_T)(sz) >= MINSIZE)
 19.2755 +  {
 19.2756 +    assert((sz & MALLOC_ALIGN_MASK) == 0);
 19.2757 +    assert(aligned_OK(chunk2mem(p)));
 19.2758 +    /* ... matching footer field */
 19.2759 +    assert(next->prev_size == sz);
 19.2760 +    /* ... and is fully consolidated */
 19.2761 +    assert(prev_inuse(p));
 19.2762 +    assert (next == av->top || inuse(next));
 19.2763 +
 19.2764 +    /* ... and has minimally sane links */
 19.2765 +    assert(p->fd->bk == p);
 19.2766 +    assert(p->bk->fd == p);
 19.2767 +  }
 19.2768 +  else /* markers are always of size SIZE_SZ */
 19.2769 +    assert(sz == SIZE_SZ);
 19.2770 +}
 19.2771 +
 19.2772 +/*
 19.2773 +  Properties of inuse chunks
 19.2774 +*/
 19.2775 +
 19.2776 +#if __STD_C
 19.2777 +static void do_check_inuse_chunk(mchunkptr p)
 19.2778 +#else
 19.2779 +static void do_check_inuse_chunk(p) mchunkptr p;
 19.2780 +#endif
 19.2781 +{
 19.2782 +  mstate av = get_malloc_state();
 19.2783 +  mchunkptr next;
 19.2784 +  do_check_chunk(p);
 19.2785 +
 19.2786 +  if (chunk_is_mmapped(p))
 19.2787 +    return; /* mmapped chunks have no next/prev */
 19.2788 +
 19.2789 +  /* Check whether it claims to be in use ... */
 19.2790 +  assert(inuse(p));
 19.2791 +
 19.2792 +  next = next_chunk(p);
 19.2793 +
 19.2794 +  /* ... and is surrounded by OK chunks.
 19.2795 +    Since more things can be checked with free chunks than inuse ones,
 19.2796 +    if an inuse chunk borders them and debug is on, it's worth doing them.
 19.2797 +  */
 19.2798 +  if (!prev_inuse(p))  {
 19.2799 +    /* Note that we cannot even look at prev unless it is not inuse */
 19.2800 +    mchunkptr prv = prev_chunk(p);
 19.2801 +    assert(next_chunk(prv) == p);
 19.2802 +    do_check_free_chunk(prv);
 19.2803 +  }
 19.2804 +
 19.2805 +  if (next == av->top) {
 19.2806 +    assert(prev_inuse(next));
 19.2807 +    assert(chunksize(next) >= MINSIZE);
 19.2808 +  }
 19.2809 +  else if (!inuse(next))
 19.2810 +    do_check_free_chunk(next);
 19.2811 +}
 19.2812 +
 19.2813 +/*
 19.2814 +  Properties of chunks recycled from fastbins
 19.2815 +*/
 19.2816 +
 19.2817 +#if __STD_C
 19.2818 +static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
 19.2819 +#else
 19.2820 +static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
 19.2821 +#endif
 19.2822 +{
 19.2823 +  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
 19.2824 +
 19.2825 +  do_check_inuse_chunk(p);
 19.2826 +
 19.2827 +  /* Legal size ... */
 19.2828 +  assert((sz & MALLOC_ALIGN_MASK) == 0);
 19.2829 +  assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
 19.2830 +  /* ... and alignment */
 19.2831 +  assert(aligned_OK(chunk2mem(p)));
 19.2832 +  /* chunk is less than MINSIZE more than request */
 19.2833 +  assert((long)(sz) - (long)(s) >= 0);
 19.2834 +  assert((long)(sz) - (long)(s + MINSIZE) < 0);
 19.2835 +}
 19.2836 +
 19.2837 +/*
 19.2838 +  Properties of nonrecycled chunks at the point they are malloced
 19.2839 +*/
 19.2840 +
 19.2841 +#if __STD_C
 19.2842 +static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
 19.2843 +#else
 19.2844 +static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
 19.2845 +#endif
 19.2846 +{
 19.2847 +  /* same as recycled case ... */
 19.2848 +  do_check_remalloced_chunk(p, s);
 19.2849 +
 19.2850 +  /*
 19.2851 +    ... plus,  must obey implementation invariant that prev_inuse is
 19.2852 +    always true of any allocated chunk; i.e., that each allocated
 19.2853 +    chunk borders either a previously allocated and still in-use
 19.2854 +    chunk, or the base of its memory arena. This is ensured
 19.2855 +    by making all allocations from the the `lowest' part of any found
 19.2856 +    chunk.  This does not necessarily hold however for chunks
 19.2857 +    recycled via fastbins.
 19.2858 +  */
 19.2859 +
 19.2860 +  assert(prev_inuse(p));
 19.2861 +}
 19.2862 +
 19.2863 +
 19.2864 +/*
 19.2865 +  Properties of malloc_state.
 19.2866 +
 19.2867 +  This may be useful for debugging malloc, as well as detecting user
 19.2868 +  programmer errors that somehow write into malloc_state.
 19.2869 +
 19.2870 +  If you are extending or experimenting with this malloc, you can
 19.2871 +  probably figure out how to hack this routine to print out or
 19.2872 +  display chunk addresses, sizes, bins, and other instrumentation.
 19.2873 +*/
 19.2874 +
 19.2875 +static void do_check_malloc_state()
 19.2876 +{
 19.2877 +  mstate av = get_malloc_state();
 19.2878 +  int i;
 19.2879 +  mchunkptr p;
 19.2880 +  mchunkptr q;
 19.2881 +  mbinptr b;
 19.2882 +  unsigned int binbit;
 19.2883 +  int empty;
 19.2884 +  unsigned int idx;
 19.2885 +  INTERNAL_SIZE_T size;
 19.2886 +  CHUNK_SIZE_T  total = 0;
 19.2887 +  int max_fast_bin;
 19.2888 +
 19.2889 +  /* internal size_t must be no wider than pointer type */
 19.2890 +  assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
 19.2891 +
 19.2892 +  /* alignment is a power of 2 */
 19.2893 +  assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
 19.2894 +
 19.2895 +  /* cannot run remaining checks until fully initialized */
 19.2896 +  if (av->top == 0 || av->top == initial_top(av))
 19.2897 +    return;
 19.2898 +
 19.2899 +  /* pagesize is a power of 2 */
 19.2900 +  assert((av->pagesize & (av->pagesize-1)) == 0);
 19.2901 +
 19.2902 +  /* properties of fastbins */
 19.2903 +
 19.2904 +  /* max_fast is in allowed range */
 19.2905 +  assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE));
 19.2906 +
 19.2907 +  max_fast_bin = fastbin_index(av->max_fast);
 19.2908 +
 19.2909 +  for (i = 0; i < NFASTBINS; ++i) {
 19.2910 +    p = av->fastbins[i];
 19.2911 +
 19.2912 +    /* all bins past max_fast are empty */
 19.2913 +    if (i > max_fast_bin)
 19.2914 +      assert(p == 0);
 19.2915 +
 19.2916 +    while (p != 0) {
 19.2917 +      /* each chunk claims to be inuse */
 19.2918 +      do_check_inuse_chunk(p);
 19.2919 +      total += chunksize(p);
 19.2920 +      /* chunk belongs in this bin */
 19.2921 +      assert(fastbin_index(chunksize(p)) == i);
 19.2922 +      p = p->fd;
 19.2923 +    }
 19.2924 +  }
 19.2925 +
 19.2926 +  if (total != 0)
 19.2927 +    assert(have_fastchunks(av));
 19.2928 +  else if (!have_fastchunks(av))
 19.2929 +    assert(total == 0);
 19.2930 +
 19.2931 +  /* check normal bins */
 19.2932 +  for (i = 1; i < NBINS; ++i) {
 19.2933 +    b = bin_at(av,i);
 19.2934 +
 19.2935 +    /* binmap is accurate (except for bin 1 == unsorted_chunks) */
 19.2936 +    if (i >= 2) {
 19.2937 +      binbit = get_binmap(av,i);
 19.2938 +      empty = last(b) == b;
 19.2939 +      if (!binbit)
 19.2940 +        assert(empty);
 19.2941 +      else if (!empty)
 19.2942 +        assert(binbit);
 19.2943 +    }
 19.2944 +
 19.2945 +    for (p = last(b); p != b; p = p->bk) {
 19.2946 +      /* each chunk claims to be free */
 19.2947 +      do_check_free_chunk(p);
 19.2948 +      size = chunksize(p);
 19.2949 +      total += size;
 19.2950 +      if (i >= 2) {
 19.2951 +        /* chunk belongs in bin */
 19.2952 +        idx = bin_index(size);
 19.2953 +        assert(idx == i);
 19.2954 +        /* lists are sorted */
 19.2955 +        if ((CHUNK_SIZE_T) size >= (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
 19.2956 +          assert(p->bk == b || 
 19.2957 +                 (CHUNK_SIZE_T)chunksize(p->bk) >= 
 19.2958 +                 (CHUNK_SIZE_T)chunksize(p));
 19.2959 +        }
 19.2960 +      }
 19.2961 +      /* chunk is followed by a legal chain of inuse chunks */
 19.2962 +      for (q = next_chunk(p);
 19.2963 +           (q != av->top && inuse(q) && 
 19.2964 +             (CHUNK_SIZE_T)(chunksize(q)) >= MINSIZE);
 19.2965 +           q = next_chunk(q))
 19.2966 +        do_check_inuse_chunk(q);
 19.2967 +    }
 19.2968 +  }
 19.2969 +
 19.2970 +  /* top chunk is OK */
 19.2971 +  check_chunk(av->top);
 19.2972 +
 19.2973 +  /* sanity checks for statistics */
 19.2974 +
 19.2975 +  assert(total <= (CHUNK_SIZE_T)(av->max_total_mem));
 19.2976 +  assert(av->n_mmaps >= 0);
 19.2977 +  assert(av->n_mmaps <= av->max_n_mmaps);
 19.2978 +
 19.2979 +  assert((CHUNK_SIZE_T)(av->sbrked_mem) <=
 19.2980 +         (CHUNK_SIZE_T)(av->max_sbrked_mem));
 19.2981 +
 19.2982 +  assert((CHUNK_SIZE_T)(av->mmapped_mem) <=
 19.2983 +         (CHUNK_SIZE_T)(av->max_mmapped_mem));
 19.2984 +
 19.2985 +  assert((CHUNK_SIZE_T)(av->max_total_mem) >=
 19.2986 +         (CHUNK_SIZE_T)(av->mmapped_mem) + (CHUNK_SIZE_T)(av->sbrked_mem));
 19.2987 +}
 19.2988 +#endif
 19.2989 +
 19.2990 +
 19.2991 +/* ----------- Routines dealing with system allocation -------------- */
 19.2992 +
 19.2993 +/*
 19.2994 +  sysmalloc handles malloc cases requiring more memory from the system.
 19.2995 +  On entry, it is assumed that av->top does not have enough
 19.2996 +  space to service request for nb bytes, thus requiring that av->top
 19.2997 +  be extended or replaced.
 19.2998 +*/
 19.2999 +
 19.3000 +#if __STD_C
 19.3001 +static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
 19.3002 +#else
 19.3003 +static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
 19.3004 +#endif
 19.3005 +{
 19.3006 +  mchunkptr       old_top;        /* incoming value of av->top */
 19.3007 +  INTERNAL_SIZE_T old_size;       /* its size */
 19.3008 +  char*           old_end;        /* its end address */
 19.3009 +
 19.3010 +  long            size;           /* arg to first MORECORE or mmap call */
 19.3011 +  char*           brk;            /* return value from MORECORE */
 19.3012 +
 19.3013 +  long            correction;     /* arg to 2nd MORECORE call */
 19.3014 +  char*           snd_brk;        /* 2nd return val */
 19.3015 +
 19.3016 +  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
 19.3017 +  INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
 19.3018 +  char*           aligned_brk;    /* aligned offset into brk */
 19.3019 +
 19.3020 +  mchunkptr       p;              /* the allocated/returned chunk */
 19.3021 +  mchunkptr       remainder;      /* remainder from allocation */
 19.3022 +  CHUNK_SIZE_T    remainder_size; /* its size */
 19.3023 +
 19.3024 +  CHUNK_SIZE_T    sum;            /* for updating stats */
 19.3025 +
 19.3026 +  size_t          pagemask  = av->pagesize - 1;
 19.3027 +
 19.3028 +  /*
 19.3029 +    If there is space available in fastbins, consolidate and retry
 19.3030 +    malloc from scratch rather than getting memory from system.  This
 19.3031 +    can occur only if nb is in smallbin range so we didn't consolidate
 19.3032 +    upon entry to malloc. It is much easier to handle this case here
 19.3033 +    than in malloc proper.
 19.3034 +  */
 19.3035 +
 19.3036 +  if (have_fastchunks(av)) {
 19.3037 +    assert(in_smallbin_range(nb));
 19.3038 +    malloc_consolidate(av);
 19.3039 +    return mALLOc(nb - MALLOC_ALIGN_MASK);
 19.3040 +  }
 19.3041 +
 19.3042 +
 19.3043 +#if HAVE_MMAP
 19.3044 +
 19.3045 +  /*
 19.3046 +    If have mmap, and the request size meets the mmap threshold, and
 19.3047 +    the system supports mmap, and there are few enough currently
 19.3048 +    allocated mmapped regions, try to directly map this request
 19.3049 +    rather than expanding top.
 19.3050 +  */
 19.3051 +
 19.3052 +  if ((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) &&
 19.3053 +      (av->n_mmaps < av->n_mmaps_max)) {
 19.3054 +
 19.3055 +    char* mm;             /* return value from mmap call*/
 19.3056 +
 19.3057 +    /*
 19.3058 +      Round up size to nearest page.  For mmapped chunks, the overhead
 19.3059 +      is one SIZE_SZ unit larger than for normal chunks, because there
 19.3060 +      is no following chunk whose prev_size field could be used.
 19.3061 +    */
 19.3062 +    size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
 19.3063 +
 19.3064 +    /* Don't try if size wraps around 0 */
 19.3065 +    if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
 19.3066 +
 19.3067 +      mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
 19.3068 +      
 19.3069 +      if (mm != (char*)(MORECORE_FAILURE)) {
 19.3070 +        
 19.3071 +        /*
 19.3072 +          The offset to the start of the mmapped region is stored
 19.3073 +          in the prev_size field of the chunk. This allows us to adjust
 19.3074 +          returned start address to meet alignment requirements here 
 19.3075 +          and in memalign(), and still be able to compute proper
 19.3076 +          address argument for later munmap in free() and realloc().
 19.3077 +        */
 19.3078 +        
 19.3079 +        front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
 19.3080 +        if (front_misalign > 0) {
 19.3081 +          correction = MALLOC_ALIGNMENT - front_misalign;
 19.3082 +          p = (mchunkptr)(mm + correction);
 19.3083 +          p->prev_size = correction;
 19.3084 +          set_head(p, (size - correction) |IS_MMAPPED);
 19.3085 +        }
 19.3086 +        else {
 19.3087 +          p = (mchunkptr)mm;
 19.3088 +          p->prev_size = 0;
 19.3089 +          set_head(p, size|IS_MMAPPED);
 19.3090 +        }
 19.3091 +        
 19.3092 +        /* update statistics */
 19.3093 +        
 19.3094 +        if (++av->n_mmaps > av->max_n_mmaps) 
 19.3095 +          av->max_n_mmaps = av->n_mmaps;
 19.3096 +        
 19.3097 +        sum = av->mmapped_mem += size;
 19.3098 +        if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem)) 
 19.3099 +          av->max_mmapped_mem = sum;
 19.3100 +        sum += av->sbrked_mem;
 19.3101 +        if (sum > (CHUNK_SIZE_T)(av->max_total_mem)) 
 19.3102 +          av->max_total_mem = sum;
 19.3103 +
 19.3104 +        check_chunk(p);
 19.3105 +        
 19.3106 +        return chunk2mem(p);
 19.3107 +      }
 19.3108 +    }
 19.3109 +  }
 19.3110 +#endif
 19.3111 +
 19.3112 +  /* Record incoming configuration of top */
 19.3113 +
 19.3114 +  old_top  = av->top;
 19.3115 +  old_size = chunksize(old_top);
 19.3116 +  old_end  = (char*)(chunk_at_offset(old_top, old_size));
 19.3117 +
 19.3118 +  brk = snd_brk = (char*)(MORECORE_FAILURE); 
 19.3119 +
 19.3120 +  /* 
 19.3121 +     If not the first time through, we require old_size to be
 19.3122 +     at least MINSIZE and to have prev_inuse set.
 19.3123 +  */
 19.3124 +
 19.3125 +  assert((old_top == initial_top(av) && old_size == 0) || 
 19.3126 +         ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
 19.3127 +          prev_inuse(old_top)));
 19.3128 +
 19.3129 +  /* Precondition: not enough current space to satisfy nb request */
 19.3130 +  assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
 19.3131 +
 19.3132 +  /* Precondition: all fastbins are consolidated */
 19.3133 +  assert(!have_fastchunks(av));
 19.3134 +
 19.3135 +
 19.3136 +  /* Request enough space for nb + pad + overhead */
 19.3137 +
 19.3138 +  size = nb + av->top_pad + MINSIZE;
 19.3139 +
 19.3140 +  /*
 19.3141 +    If contiguous, we can subtract out existing space that we hope to
 19.3142 +    combine with new space. We add it back later only if
 19.3143 +    we don't actually get contiguous space.
 19.3144 +  */
 19.3145 +
 19.3146 +  if (contiguous(av))
 19.3147 +    size -= old_size;
 19.3148 +
 19.3149 +  /*
 19.3150 +    Round to a multiple of page size.
 19.3151 +    If MORECORE is not contiguous, this ensures that we only call it
 19.3152 +    with whole-page arguments.  And if MORECORE is contiguous and
 19.3153 +    this is not first time through, this preserves page-alignment of
 19.3154 +    previous calls. Otherwise, we correct to page-align below.
 19.3155 +  */
 19.3156 +
 19.3157 +  size = (size + pagemask) & ~pagemask;
 19.3158 +
 19.3159 +  /*
 19.3160 +    Don't try to call MORECORE if argument is so big as to appear
 19.3161 +    negative. Note that since mmap takes size_t arg, it may succeed
 19.3162 +    below even if we cannot call MORECORE.
 19.3163 +  */
 19.3164 +
 19.3165 +  if (size > 0) 
 19.3166 +    brk = (char*)(MORECORE(size));
 19.3167 +
 19.3168 +  /*
 19.3169 +    If have mmap, try using it as a backup when MORECORE fails or
 19.3170 +    cannot be used. This is worth doing on systems that have "holes" in
 19.3171 +    address space, so sbrk cannot extend to give contiguous space, but
 19.3172 +    space is available elsewhere.  Note that we ignore mmap max count
 19.3173 +    and threshold limits, since the space will not be used as a
 19.3174 +    segregated mmap region.
 19.3175 +  */
 19.3176 +
 19.3177 +#if HAVE_MMAP
 19.3178 +  if (brk == (char*)(MORECORE_FAILURE)) {
 19.3179 +
 19.3180 +    /* Cannot merge with old top, so add its size back in */
 19.3181 +    if (contiguous(av))
 19.3182 +      size = (size + old_size + pagemask) & ~pagemask;
 19.3183 +
 19.3184 +    /* If we are relying on mmap as backup, then use larger units */
 19.3185 +    if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE))
 19.3186 +      size = MMAP_AS_MORECORE_SIZE;
 19.3187 +
 19.3188 +    /* Don't try if size wraps around 0 */
 19.3189 +    if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) {
 19.3190 +
 19.3191 +      brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
 19.3192 +      
 19.3193 +      if (brk != (char*)(MORECORE_FAILURE)) {
 19.3194 +        
 19.3195 +        /* We do not need, and cannot use, another sbrk call to find end */
 19.3196 +        snd_brk = brk + size;
 19.3197 +        
 19.3198 +        /* 
 19.3199 +           Record that we no longer have a contiguous sbrk region. 
 19.3200 +           After the first time mmap is used as backup, we do not
 19.3201 +           ever rely on contiguous space since this could incorrectly
 19.3202 +           bridge regions.
 19.3203 +        */
 19.3204 +        set_noncontiguous(av);
 19.3205 +      }
 19.3206 +    }
 19.3207 +  }
 19.3208 +#endif
 19.3209 +
 19.3210 +  if (brk != (char*)(MORECORE_FAILURE)) {
 19.3211 +    av->sbrked_mem += size;
 19.3212 +
 19.3213 +    /*
 19.3214 +      If MORECORE extends previous space, we can likewise extend top size.
 19.3215 +    */
 19.3216 +    
 19.3217 +    if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
 19.3218 +      set_head(old_top, (size + old_size) | PREV_INUSE);
 19.3219 +    }
 19.3220 +
 19.3221 +    /*
 19.3222 +      Otherwise, make adjustments:
 19.3223 +      
 19.3224 +      * If the first time through or noncontiguous, we need to call sbrk
 19.3225 +        just to find out where the end of memory lies.
 19.3226 +
 19.3227 +      * We need to ensure that all returned chunks from malloc will meet
 19.3228 +        MALLOC_ALIGNMENT
 19.3229 +
 19.3230 +      * If there was an intervening foreign sbrk, we need to adjust sbrk
 19.3231 +        request size to account for fact that we will not be able to
 19.3232 +        combine new space with existing space in old_top.
 19.3233 +
 19.3234 +      * Almost all systems internally allocate whole pages at a time, in
 19.3235 +        which case we might as well use the whole last page of request.
 19.3236 +        So we allocate enough more memory to hit a page boundary now,
 19.3237 +        which in turn causes future contiguous calls to page-align.
 19.3238 +    */
 19.3239 +    
 19.3240 +    else {
 19.3241 +      front_misalign = 0;
 19.3242 +      end_misalign = 0;
 19.3243 +      correction = 0;
 19.3244 +      aligned_brk = brk;
 19.3245 +
 19.3246 +      /*
 19.3247 +        If MORECORE returns an address lower than we have seen before,
 19.3248 +        we know it isn't really contiguous.  This and some subsequent
 19.3249 +        checks help cope with non-conforming MORECORE functions and
 19.3250 +        the presence of "foreign" calls to MORECORE from outside of
 19.3251 +        malloc or by other threads.  We cannot guarantee to detect
 19.3252 +        these in all cases, but cope with the ones we do detect.
 19.3253 +      */
 19.3254 +      if (contiguous(av) && old_size != 0 && brk < old_end) {
 19.3255 +        set_noncontiguous(av);
 19.3256 +      }
 19.3257 +      
 19.3258 +      /* handle contiguous cases */
 19.3259 +      if (contiguous(av)) { 
 19.3260 +
 19.3261 +        /* 
 19.3262 +           We can tolerate forward non-contiguities here (usually due
 19.3263 +           to foreign calls) but treat them as part of our space for
 19.3264 +           stats reporting.
 19.3265 +        */
 19.3266 +        if (old_size != 0) 
 19.3267 +          av->sbrked_mem += brk - old_end;
 19.3268 +        
 19.3269 +        /* Guarantee alignment of first new chunk made from this space */
 19.3270 +
 19.3271 +        front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
 19.3272 +        if (front_misalign > 0) {
 19.3273 +
 19.3274 +          /*
 19.3275 +            Skip over some bytes to arrive at an aligned position.
 19.3276 +            We don't need to specially mark these wasted front bytes.
 19.3277 +            They will never be accessed anyway because
 19.3278 +            prev_inuse of av->top (and any chunk created from its start)
 19.3279 +            is always true after initialization.
 19.3280 +          */
 19.3281 +
 19.3282 +          correction = MALLOC_ALIGNMENT - front_misalign;
 19.3283 +          aligned_brk += correction;
 19.3284 +        }
 19.3285 +        
 19.3286 +        /*
 19.3287 +          If this isn't adjacent to existing space, then we will not
 19.3288 +          be able to merge with old_top space, so must add to 2nd request.
 19.3289 +        */
 19.3290 +        
 19.3291 +        correction += old_size;
 19.3292 +        
 19.3293 +        /* Extend the end address to hit a page boundary */
 19.3294 +        end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
 19.3295 +        correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
 19.3296 +        
 19.3297 +        assert(correction >= 0);
 19.3298 +        snd_brk = (char*)(MORECORE(correction));
 19.3299 +        
 19.3300 +        if (snd_brk == (char*)(MORECORE_FAILURE)) {
 19.3301 +          /*
 19.3302 +            If can't allocate correction, try to at least find out current
 19.3303 +            brk.  It might be enough to proceed without failing.
 19.3304 +          */
 19.3305 +          correction = 0;
 19.3306 +          snd_brk = (char*)(MORECORE(0));
 19.3307 +        }
 19.3308 +        else if (snd_brk < brk) {
 19.3309 +          /*
 19.3310 +            If the second call gives noncontiguous space even though
 19.3311 +            it says it won't, the only course of action is to ignore
 19.3312 +            results of second call, and conservatively estimate where
 19.3313 +            the first call left us. Also set noncontiguous, so this
 19.3314 +            won't happen again, leaving at most one hole.
 19.3315 +            
 19.3316 +            Note that this check is intrinsically incomplete.  Because
 19.3317 +            MORECORE is allowed to give more space than we ask for,
 19.3318 +            there is no reliable way to detect a noncontiguity
 19.3319 +            producing a forward gap for the second call.
 19.3320 +          */
 19.3321 +          snd_brk = brk + size;
 19.3322 +          correction = 0;
 19.3323 +          set_noncontiguous(av);
 19.3324 +        }
 19.3325 +
 19.3326 +      }
 19.3327 +      
 19.3328 +      /* handle non-contiguous cases */
 19.3329 +      else { 
 19.3330 +        /* MORECORE/mmap must correctly align */
 19.3331 +        assert(aligned_OK(chunk2mem(brk)));
 19.3332 +        
 19.3333 +        /* Find out current end of memory */
 19.3334 +        if (snd_brk == (char*)(MORECORE_FAILURE)) {
 19.3335 +          snd_brk = (char*)(MORECORE(0));
 19.3336 +          av->sbrked_mem += snd_brk - brk - size;
 19.3337 +        }
 19.3338 +      }
 19.3339 +      
 19.3340 +      /* Adjust top based on results of second sbrk */
 19.3341 +      if (snd_brk != (char*)(MORECORE_FAILURE)) {
 19.3342 +        av->top = (mchunkptr)aligned_brk;
 19.3343 +        set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
 19.3344 +        av->sbrked_mem += correction;
 19.3345 +     
 19.3346 +        /*
 19.3347 +          If not the first time through, we either have a
 19.3348 +          gap due to foreign sbrk or a non-contiguous region.  Insert a
 19.3349 +          double fencepost at old_top to prevent consolidation with space
 19.3350 +          we don't own. These fenceposts are artificial chunks that are
 19.3351 +          marked as inuse and are in any case too small to use.  We need
 19.3352 +          two to make sizes and alignments work out.
 19.3353 +        */
 19.3354 +   
 19.3355 +        if (old_size != 0) {
 19.3356 +          /* 
 19.3357 +             Shrink old_top to insert fenceposts, keeping size a
 19.3358 +             multiple of MALLOC_ALIGNMENT. We know there is at least
 19.3359 +             enough space in old_top to do this.
 19.3360 +          */
 19.3361 +          old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
 19.3362 +          set_head(old_top, old_size | PREV_INUSE);
 19.3363 +          
 19.3364 +          /*
 19.3365 +            Note that the following assignments completely overwrite
 19.3366 +            old_top when old_size was previously MINSIZE.  This is
 19.3367 +            intentional. We need the fencepost, even if old_top otherwise gets
 19.3368 +            lost.
 19.3369 +          */
 19.3370 +          chunk_at_offset(old_top, old_size          )->size =
 19.3371 +            SIZE_SZ|PREV_INUSE;
 19.3372 +
 19.3373 +          chunk_at_offset(old_top, old_size + SIZE_SZ)->size =
 19.3374 +            SIZE_SZ|PREV_INUSE;
 19.3375 +
 19.3376 +          /* 
 19.3377 +             If possible, release the rest, suppressing trimming.
 19.3378 +          */
 19.3379 +          if (old_size >= MINSIZE) {
 19.3380 +            INTERNAL_SIZE_T tt = av->trim_threshold;
 19.3381 +            av->trim_threshold = (INTERNAL_SIZE_T)(-1);
 19.3382 +            fREe(chunk2mem(old_top));
 19.3383 +            av->trim_threshold = tt;
 19.3384 +          }
 19.3385 +        }
 19.3386 +      }
 19.3387 +    }
 19.3388 +    
 19.3389 +    /* Update statistics */
 19.3390 +    sum = av->sbrked_mem;
 19.3391 +    if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
 19.3392 +      av->max_sbrked_mem = sum;
 19.3393 +    
 19.3394 +    sum += av->mmapped_mem;
 19.3395 +    if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
 19.3396 +      av->max_total_mem = sum;
 19.3397 +
 19.3398 +    check_malloc_state();
 19.3399 +    
 19.3400 +    /* finally, do the allocation */
 19.3401 +
 19.3402 +    p = av->top;
 19.3403 +    size = chunksize(p);
 19.3404 +    
 19.3405 +    /* check that one of the above allocation paths succeeded */
 19.3406 +    if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
 19.3407 +      remainder_size = size - nb;
 19.3408 +      remainder = chunk_at_offset(p, nb);
 19.3409 +      av->top = remainder;
 19.3410 +      set_head(p, nb | PREV_INUSE);
 19.3411 +      set_head(remainder, remainder_size | PREV_INUSE);
 19.3412 +      check_malloced_chunk(p, nb);
 19.3413 +      return chunk2mem(p);
 19.3414 +    }
 19.3415 +
 19.3416 +  }
 19.3417 +
 19.3418 +  /* catch all failure paths */
 19.3419 +  MALLOC_FAILURE_ACTION;
 19.3420 +  return 0;
 19.3421 +}
 19.3422 +
 19.3423 +
 19.3424 +
 19.3425 +
 19.3426 +/*
 19.3427 +  sYSTRIm is an inverse of sorts to sYSMALLOc.  It gives memory back
 19.3428 +  to the system (via negative arguments to sbrk) if there is unused
 19.3429 +  memory at the `high' end of the malloc pool. It is called
 19.3430 +  automatically by free() when top space exceeds the trim
 19.3431 +  threshold. It is also called by the public malloc_trim routine.  It
 19.3432 +  returns 1 if it actually released any memory, else 0.
 19.3433 +*/
 19.3434 +
 19.3435 +#if __STD_C
 19.3436 +static int sYSTRIm(size_t pad, mstate av)
 19.3437 +#else
 19.3438 +static int sYSTRIm(pad, av) size_t pad; mstate av;
 19.3439 +#endif
 19.3440 +{
 19.3441 +  long  top_size;        /* Amount of top-most memory */
 19.3442 +  long  extra;           /* Amount to release */
 19.3443 +  long  released;        /* Amount actually released */
 19.3444 +  char* current_brk;     /* address returned by pre-check sbrk call */
 19.3445 +  char* new_brk;         /* address returned by post-check sbrk call */
 19.3446 +  size_t pagesz;
 19.3447 +
 19.3448 +  pagesz = av->pagesize;
 19.3449 +  top_size = chunksize(av->top);
 19.3450 +  
 19.3451 +  /* Release in pagesize units, keeping at least one page */
 19.3452 +  extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
 19.3453 +  
 19.3454 +  if (extra > 0) {
 19.3455 +    
 19.3456 +    /*
 19.3457 +      Only proceed if end of memory is where we last set it.
 19.3458 +      This avoids problems if there were foreign sbrk calls.
 19.3459 +    */
 19.3460 +    current_brk = (char*)(MORECORE(0));
 19.3461 +    if (current_brk == (char*)(av->top) + top_size) {
 19.3462 +      
 19.3463 +      /*
 19.3464 +        Attempt to release memory. We ignore MORECORE return value,
 19.3465 +        and instead call again to find out where new end of memory is.
 19.3466 +        This avoids problems if first call releases less than we asked,
 19.3467 +        of if failure somehow altered brk value. (We could still
 19.3468 +        encounter problems if it altered brk in some very bad way,
 19.3469 +        but the only thing we can do is adjust anyway, which will cause
 19.3470 +        some downstream failure.)
 19.3471 +      */
 19.3472 +      
 19.3473 +      MORECORE(-extra);
 19.3474 +      new_brk = (char*)(MORECORE(0));
 19.3475 +      
 19.3476 +      if (new_brk != (char*)MORECORE_FAILURE) {
 19.3477 +        released = (long)(current_brk - new_brk);
 19.3478 +        
 19.3479 +        if (released != 0) {
 19.3480 +          /* Success. Adjust top. */
 19.3481 +          av->sbrked_mem -= released;
 19.3482 +          set_head(av->top, (top_size - released) | PREV_INUSE);
 19.3483 +          check_malloc_state();
 19.3484 +          return 1;
 19.3485 +        }
 19.3486 +      }
 19.3487 +    }
 19.3488 +  }
 19.3489 +  return 0;
 19.3490 +}
 19.3491 +
 19.3492 +/*
 19.3493 +  ------------------------------ malloc ------------------------------
 19.3494 +*/
 19.3495 +
 19.3496 +
 19.3497 +#if __STD_C
 19.3498 +Void_t* mALLOc(size_t bytes)
 19.3499 +#else
 19.3500 +  Void_t* mALLOc(bytes) size_t bytes;
 19.3501 +#endif
 19.3502 +{
 19.3503 +  mstate av = get_malloc_state();
 19.3504 +
 19.3505 +  INTERNAL_SIZE_T nb;               /* normalized request size */
 19.3506 +  unsigned int    idx;              /* associated bin index */
 19.3507 +  mbinptr         bin;              /* associated bin */
 19.3508 +  mfastbinptr*    fb;               /* associated fastbin */
 19.3509 +
 19.3510 +  mchunkptr       victim;           /* inspected/selected chunk */
 19.3511 +  INTERNAL_SIZE_T size;             /* its size */
 19.3512 +  int             victim_index;     /* its bin index */
 19.3513 +
 19.3514 +  mchunkptr       remainder;        /* remainder from a split */
 19.3515 +  CHUNK_SIZE_T    remainder_size;   /* its size */
 19.3516 +
 19.3517 +  unsigned int    block;            /* bit map traverser */
 19.3518 +  unsigned int    bit;              /* bit map traverser */
 19.3519 +  unsigned int    map;              /* current word of binmap */
 19.3520 +
 19.3521 +  mchunkptr       fwd;              /* misc temp for linking */
 19.3522 +  mchunkptr       bck;              /* misc temp for linking */
 19.3523 +
 19.3524 +  /*
 19.3525 +    Convert request size to internal form by adding SIZE_SZ bytes
 19.3526 +    overhead plus possibly more to obtain necessary alignment and/or
 19.3527 +    to obtain a size of at least MINSIZE, the smallest allocatable
 19.3528 +    size. Also, checked_request2size traps (returning 0) request sizes
 19.3529 +    that are so large that they wrap around zero when padded and
 19.3530 +    aligned.
 19.3531 +  */
 19.3532 +
 19.3533 +  checked_request2size(bytes, nb);
 19.3534 +
 19.3535 +  /*
 19.3536 +    Bypass search if no frees yet
 19.3537 +   */
 19.3538 +  if (!have_anychunks(av)) {
 19.3539 +    if (av->max_fast == 0) /* initialization check */
 19.3540 +      malloc_consolidate(av);
 19.3541 +    goto use_top;
 19.3542 +  }
 19.3543 +
 19.3544 +  /*
 19.3545 +    If the size qualifies as a fastbin, first check corresponding bin.
 19.3546 +  */
 19.3547 +
 19.3548 +  if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) { 
 19.3549 +    fb = &(av->fastbins[(fastbin_index(nb))]);
 19.3550 +    if ( (victim = *fb) != 0) {
 19.3551 +      *fb = victim->fd;
 19.3552 +      check_remalloced_chunk(victim, nb);
 19.3553 +      return chunk2mem(victim);
 19.3554 +    }
 19.3555 +  }
 19.3556 +
 19.3557 +  /*
 19.3558 +    If a small request, check regular bin.  Since these "smallbins"
 19.3559 +    hold one size each, no searching within bins is necessary.
 19.3560 +    (For a large request, we need to wait until unsorted chunks are
 19.3561 +    processed to find best fit. But for small ones, fits are exact
 19.3562 +    anyway, so we can check now, which is faster.)
 19.3563 +  */
 19.3564 +
 19.3565 +  if (in_smallbin_range(nb)) {
 19.3566 +    idx = smallbin_index(nb);
 19.3567 +    bin = bin_at(av,idx);
 19.3568 +
 19.3569 +    if ( (victim = last(bin)) != bin) {
 19.3570 +      bck = victim->bk;
 19.3571 +      set_inuse_bit_at_offset(victim, nb);
 19.3572 +      bin->bk = bck;
 19.3573 +      bck->fd = bin;
 19.3574 +      
 19.3575 +      check_malloced_chunk(victim, nb);
 19.3576 +      return chunk2mem(victim);
 19.3577 +    }
 19.3578 +  }
 19.3579 +
 19.3580 +  /* 
 19.3581 +     If this is a large request, consolidate fastbins before continuing.
 19.3582 +     While it might look excessive to kill all fastbins before
 19.3583 +     even seeing if there is space available, this avoids
 19.3584 +     fragmentation problems normally associated with fastbins.
 19.3585 +     Also, in practice, programs tend to have runs of either small or
 19.3586 +     large requests, but less often mixtures, so consolidation is not 
 19.3587 +     invoked all that often in most programs. And the programs that
 19.3588 +     it is called frequently in otherwise tend to fragment.
 19.3589 +  */
 19.3590 +
 19.3591 +  else {
 19.3592 +    idx = largebin_index(nb);
 19.3593 +    if (have_fastchunks(av)) 
 19.3594 +      malloc_consolidate(av);
 19.3595 +  }
 19.3596 +
 19.3597 +  /*
 19.3598 +    Process recently freed or remaindered chunks, taking one only if
 19.3599 +    it is exact fit, or, if this a small request, the chunk is remainder from
 19.3600 +    the most recent non-exact fit.  Place other traversed chunks in
 19.3601 +    bins.  Note that this step is the only place in any routine where
 19.3602 +    chunks are placed in bins.
 19.3603 +  */
 19.3604 +    
 19.3605 +  while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
 19.3606 +    bck = victim->bk;
 19.3607 +    size = chunksize(victim);
 19.3608 +    
 19.3609 +    /* 
 19.3610 +       If a small request, try to use last remainder if it is the
 19.3611 +       only chunk in unsorted bin.  This helps promote locality for
 19.3612 +       runs of consecutive small requests. This is the only
 19.3613 +       exception to best-fit, and applies only when there is
 19.3614 +       no exact fit for a small chunk.
 19.3615 +    */
 19.3616 +    
 19.3617 +    if (in_smallbin_range(nb) && 
 19.3618 +        bck == unsorted_chunks(av) &&
 19.3619 +        victim == av->last_remainder &&
 19.3620 +        (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
 19.3621 +      
 19.3622 +      /* split and reattach remainder */
 19.3623 +      remainder_size = size - nb;
 19.3624 +      remainder = chunk_at_offset(victim, nb);
 19.3625 +      unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
 19.3626 +      av->last_remainder = remainder; 
 19.3627 +      remainder->bk = remainder->fd = unsorted_chunks(av);
 19.3628 +      
 19.3629 +      set_head(victim, nb | PREV_INUSE);
 19.3630 +      set_head(remainder, remainder_size | PREV_INUSE);
 19.3631 +      set_foot(remainder, remainder_size);
 19.3632 +      
 19.3633 +      check_malloced_chunk(victim, nb);
 19.3634 +      return chunk2mem(victim);
 19.3635 +    }
 19.3636 +    
 19.3637 +    /* remove from unsorted list */
 19.3638 +    unsorted_chunks(av)->bk = bck;
 19.3639 +    bck->fd = unsorted_chunks(av);
 19.3640 +    
 19.3641 +    /* Take now instead of binning if exact fit */
 19.3642 +    
 19.3643 +    if (size == nb) {
 19.3644 +      set_inuse_bit_at_offset(victim, size);
 19.3645 +      check_malloced_chunk(victim, nb);
 19.3646 +      return chunk2mem(victim);
 19.3647 +    }
 19.3648 +    
 19.3649 +    /* place chunk in bin */
 19.3650 +    
 19.3651 +    if (in_smallbin_range(size)) {
 19.3652 +      victim_index = smallbin_index(size);
 19.3653 +      bck = bin_at(av, victim_index);
 19.3654 +      fwd = bck->fd;
 19.3655 +    }
 19.3656 +    else {
 19.3657 +      victim_index = largebin_index(size);
 19.3658 +      bck = bin_at(av, victim_index);
 19.3659 +      fwd = bck->fd;
 19.3660 +      
 19.3661 +      if (fwd != bck) {
 19.3662 +        /* if smaller than smallest, place first */
 19.3663 +        if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
 19.3664 +          fwd = bck;
 19.3665 +          bck = bck->bk;
 19.3666 +        }
 19.3667 +        else if ((CHUNK_SIZE_T)(size) >= 
 19.3668 +                 (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
 19.3669 +          
 19.3670 +          /* maintain large bins in sorted order */
 19.3671 +          size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
 19.3672 +          while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size)) 
 19.3673 +            fwd = fwd->fd;
 19.3674 +          bck = fwd->bk;
 19.3675 +        }
 19.3676 +      }
 19.3677 +    }
 19.3678 +      
 19.3679 +    mark_bin(av, victim_index);
 19.3680 +    victim->bk = bck;
 19.3681 +    victim->fd = fwd;
 19.3682 +    fwd->bk = victim;
 19.3683 +    bck->fd = victim;
 19.3684 +  }
 19.3685 +  
 19.3686 +  /*
 19.3687 +    If a large request, scan through the chunks of current bin to
 19.3688 +    find one that fits.  (This will be the smallest that fits unless
 19.3689 +    FIRST_SORTED_BIN_SIZE has been changed from default.)  This is
 19.3690 +    the only step where an unbounded number of chunks might be
 19.3691 +    scanned without doing anything useful with them. However the
 19.3692 +    lists tend to be short.
 19.3693 +  */
 19.3694 +  
 19.3695 +  if (!in_smallbin_range(nb)) {
 19.3696 +    bin = bin_at(av, idx);
 19.3697 +    
 19.3698 +    for (victim = last(bin); victim != bin; victim = victim->bk) {
 19.3699 +      size = chunksize(victim);
 19.3700 +      
 19.3701 +      if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
 19.3702 +        remainder_size = size - nb;
 19.3703 +        unlink(victim, bck, fwd);
 19.3704 +        
 19.3705 +        /* Exhaust */
 19.3706 +        if (remainder_size < MINSIZE)  {
 19.3707 +          set_inuse_bit_at_offset(victim, size);
 19.3708 +          check_malloced_chunk(victim, nb);
 19.3709 +          return chunk2mem(victim);
 19.3710 +        }
 19.3711 +        /* Split */
 19.3712 +        else {
 19.3713 +          remainder = chunk_at_offset(victim, nb);
 19.3714 +          unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
 19.3715 +          remainder->bk = remainder->fd = unsorted_chunks(av);
 19.3716 +          set_head(victim, nb | PREV_INUSE);
 19.3717 +          set_head(remainder, remainder_size | PREV_INUSE);
 19.3718 +          set_foot(remainder, remainder_size);
 19.3719 +          check_malloced_chunk(victim, nb);
 19.3720 +          return chunk2mem(victim);
 19.3721 +        } 
 19.3722 +      }
 19.3723 +    }    
 19.3724 +  }
 19.3725 +
 19.3726 +  /*
 19.3727 +    Search for a chunk by scanning bins, starting with next largest
 19.3728 +    bin. This search is strictly by best-fit; i.e., the smallest
 19.3729 +    (with ties going to approximately the least recently used) chunk
 19.3730 +    that fits is selected.
 19.3731 +    
 19.3732 +    The bitmap avoids needing to check that most blocks are nonempty.
 19.3733 +  */
 19.3734 +    
 19.3735 +  ++idx;
 19.3736 +  bin = bin_at(av,idx);
 19.3737 +  block = idx2block(idx);
 19.3738 +  map = av->binmap[block];
 19.3739 +  bit = idx2bit(idx);
 19.3740 +  
 19.3741 +  for (;;) {
 19.3742 +    
 19.3743 +    /* Skip rest of block if there are no more set bits in this block.  */
 19.3744 +    if (bit > map || bit == 0) {
 19.3745 +      do {
 19.3746 +        if (++block >= BINMAPSIZE)  /* out of bins */
 19.3747 +          goto use_top;
 19.3748 +      } while ( (map = av->binmap[block]) == 0);
 19.3749 +      
 19.3750 +      bin = bin_at(av, (block << BINMAPSHIFT));
 19.3751 +      bit = 1;
 19.3752 +    }
 19.3753 +    
 19.3754 +    /* Advance to bin with set bit. There must be one. */
 19.3755 +    while ((bit & map) == 0) {
 19.3756 +      bin = next_bin(bin);
 19.3757 +      bit <<= 1;
 19.3758 +      assert(bit != 0);
 19.3759 +    }
 19.3760 +    
 19.3761 +    /* Inspect the bin. It is likely to be non-empty */
 19.3762 +    victim = last(bin);
 19.3763 +    
 19.3764 +    /*  If a false alarm (empty bin), clear the bit. */
 19.3765 +    if (victim == bin) {
 19.3766 +      av->binmap[block] = map &= ~bit; /* Write through */
 19.3767 +      bin = next_bin(bin);
 19.3768 +      bit <<= 1;
 19.3769 +    }
 19.3770 +    
 19.3771 +    else {
 19.3772 +      size = chunksize(victim);
 19.3773 +      
 19.3774 +      /*  We know the first chunk in this bin is big enough to use. */
 19.3775 +      assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
 19.3776 +      
 19.3777 +      remainder_size = size - nb;
 19.3778 +      
 19.3779 +      /* unlink */
 19.3780 +      bck = victim->bk;
 19.3781 +      bin->bk = bck;
 19.3782 +      bck->fd = bin;
 19.3783 +      
 19.3784 +      /* Exhaust */
 19.3785 +      if (remainder_size < MINSIZE) {
 19.3786 +        set_inuse_bit_at_offset(victim, size);
 19.3787 +        check_malloced_chunk(victim, nb);
 19.3788 +        return chunk2mem(victim);
 19.3789 +      }
 19.3790 +      
 19.3791 +      /* Split */
 19.3792 +      else {
 19.3793 +        remainder = chunk_at_offset(victim, nb);
 19.3794 +        
 19.3795 +        unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
 19.3796 +        remainder->bk = remainder->fd = unsorted_chunks(av);
 19.3797 +        /* advertise as last remainder */
 19.3798 +        if (in_smallbin_range(nb)) 
 19.3799 +          av->last_remainder = remainder; 
 19.3800 +        
 19.3801 +        set_head(victim, nb | PREV_INUSE);
 19.3802 +        set_head(remainder, remainder_size | PREV_INUSE);
 19.3803 +        set_foot(remainder, remainder_size);
 19.3804 +        check_malloced_chunk(victim, nb);
 19.3805 +        return chunk2mem(victim);
 19.3806 +      }
 19.3807 +    }
 19.3808 +  }
 19.3809 +
 19.3810 +  use_top:    
 19.3811 +  /*
 19.3812 +    If large enough, split off the chunk bordering the end of memory
 19.3813 +    (held in av->top). Note that this is in accord with the best-fit
 19.3814 +    search rule.  In effect, av->top is treated as larger (and thus
 19.3815 +    less well fitting) than any other available chunk since it can
 19.3816 +    be extended to be as large as necessary (up to system
 19.3817 +    limitations).
 19.3818 +    
 19.3819 +    We require that av->top always exists (i.e., has size >=
 19.3820 +    MINSIZE) after initialization, so if it would otherwise be
 19.3821 +    exhuasted by current request, it is replenished. (The main
 19.3822 +    reason for ensuring it exists is that we may need MINSIZE space
 19.3823 +    to put in fenceposts in sysmalloc.)
 19.3824 +  */
 19.3825 +  
 19.3826 +  victim = av->top;
 19.3827 +  size = chunksize(victim);
 19.3828 +  
 19.3829 +  if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
 19.3830 +    remainder_size = size - nb;
 19.3831 +    remainder = chunk_at_offset(victim, nb);
 19.3832 +    av->top = remainder;
 19.3833 +    set_head(victim, nb | PREV_INUSE);
 19.3834 +    set_head(remainder, remainder_size | PREV_INUSE);
 19.3835 +    
 19.3836 +    check_malloced_chunk(victim, nb);
 19.3837 +    return chunk2mem(victim);
 19.3838 +  }
 19.3839 +  
 19.3840 +  /* 
 19.3841 +     If no space in top, relay to handle system-dependent cases 
 19.3842 +  */
 19.3843 +  return sYSMALLOc(nb, av);    
 19.3844 +}
 19.3845 +
 19.3846 +/*
 19.3847 +  ------------------------------ free ------------------------------
 19.3848 +*/
 19.3849 +
 19.3850 +#if __STD_C
 19.3851 +void fREe(Void_t* mem)
 19.3852 +#else
 19.3853 +void fREe(mem) Void_t* mem;
 19.3854 +#endif
 19.3855 +{
 19.3856 +  mstate av = get_malloc_state();
 19.3857 +
 19.3858 +  mchunkptr       p;           /* chunk corresponding to mem */
 19.3859 +  INTERNAL_SIZE_T size;        /* its size */
 19.3860 +  mfastbinptr*    fb;          /* associated fastbin */
 19.3861 +  mchunkptr       nextchunk;   /* next contiguous chunk */
 19.3862 +  INTERNAL_SIZE_T nextsize;    /* its size */
 19.3863 +  int             nextinuse;   /* true if nextchunk is used */
 19.3864 +  INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
 19.3865 +  mchunkptr       bck;         /* misc temp for linking */
 19.3866 +  mchunkptr       fwd;         /* misc temp for linking */
 19.3867 +
 19.3868 +  /* free(0) has no effect */
 19.3869 +  if (mem != 0) {
 19.3870 +    p = mem2chunk(mem);
 19.3871 +    size = chunksize(p);
 19.3872 +
 19.3873 +    check_inuse_chunk(p);
 19.3874 +
 19.3875 +    /*
 19.3876 +      If eligible, place chunk on a fastbin so it can be found
 19.3877 +      and used quickly in malloc.
 19.3878 +    */
 19.3879 +
 19.3880 +    if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
 19.3881 +
 19.3882 +#if TRIM_FASTBINS
 19.3883 +        /* 
 19.3884 +           If TRIM_FASTBINS set, don't place chunks
 19.3885 +           bordering top into fastbins
 19.3886 +        */
 19.3887 +        && (chunk_at_offset(p, size) != av->top)
 19.3888 +#endif
 19.3889 +        ) {
 19.3890 +
 19.3891 +      set_fastchunks(av);
 19.3892 +      fb = &(av->fastbins[fastbin_index(size)]);
 19.3893 +      p->fd = *fb;
 19.3894 +      *fb = p;
 19.3895 +    }
 19.3896 +
 19.3897 +    /*
 19.3898 +       Consolidate other non-mmapped chunks as they arrive.
 19.3899 +    */
 19.3900 +
 19.3901 +    else if (!chunk_is_mmapped(p)) {
 19.3902 +      set_anychunks(av);
 19.3903 +
 19.3904 +      nextchunk = chunk_at_offset(p, size);
 19.3905 +      nextsize = chunksize(nextchunk);
 19.3906 +
 19.3907 +      /* consolidate backward */
 19.3908 +      if (!prev_inuse(p)) {
 19.3909 +        prevsize = p->prev_size;
 19.3910 +        size += prevsize;
 19.3911 +        p = chunk_at_offset(p, -((long) prevsize));
 19.3912 +        unlink(p, bck, fwd);
 19.3913 +      }
 19.3914 +
 19.3915 +      if (nextchunk != av->top) {
 19.3916 +        /* get and clear inuse bit */
 19.3917 +        nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 19.3918 +        set_head(nextchunk, nextsize);
 19.3919 +
 19.3920 +        /* consolidate forward */
 19.3921 +        if (!nextinuse) {
 19.3922 +          unlink(nextchunk, bck, fwd);
 19.3923 +          size += nextsize;
 19.3924 +        }
 19.3925 +
 19.3926 +        /*
 19.3927 +          Place the chunk in unsorted chunk list. Chunks are
 19.3928 +          not placed into regular bins until after they have
 19.3929 +          been given one chance to be used in malloc.
 19.3930 +        */
 19.3931 +
 19.3932 +        bck = unsorted_chunks(av);
 19.3933 +        fwd = bck->fd;
 19.3934 +        p->bk = bck;
 19.3935 +        p->fd = fwd;
 19.3936 +        bck->fd = p;
 19.3937 +        fwd->bk = p;
 19.3938 +
 19.3939 +        set_head(p, size | PREV_INUSE);
 19.3940 +        set_foot(p, size);
 19.3941 +        
 19.3942 +        check_free_chunk(p);
 19.3943 +      }
 19.3944 +
 19.3945 +      /*
 19.3946 +         If the chunk borders the current high end of memory,
 19.3947 +         consolidate into top
 19.3948 +      */
 19.3949 +
 19.3950 +      else {
 19.3951 +        size += nextsize;
 19.3952 +        set_head(p, size | PREV_INUSE);
 19.3953 +        av->top = p;
 19.3954 +        check_chunk(p);
 19.3955 +      }
 19.3956 +
 19.3957 +      /*
 19.3958 +        If freeing a large space, consolidate possibly-surrounding
 19.3959 +        chunks. Then, if the total unused topmost memory exceeds trim
 19.3960 +        threshold, ask malloc_trim to reduce top.
 19.3961 +
 19.3962 +        Unless max_fast is 0, we don't know if there are fastbins
 19.3963 +        bordering top, so we cannot tell for sure whether threshold
 19.3964 +        has been reached unless fastbins are consolidated.  But we
 19.3965 +        don't want to consolidate on each free.  As a compromise,
 19.3966 +        consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
 19.3967 +        is reached.
 19.3968 +      */
 19.3969 +
 19.3970 +      if ((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { 
 19.3971 +        if (have_fastchunks(av)) 
 19.3972 +          malloc_consolidate(av);
 19.3973 +
 19.3974 +#ifndef MORECORE_CANNOT_TRIM        
 19.3975 +        if ((CHUNK_SIZE_T)(chunksize(av->top)) >= 
 19.3976 +            (CHUNK_SIZE_T)(av->trim_threshold))
 19.3977 +          sYSTRIm(av->top_pad, av);
 19.3978 +#endif
 19.3979 +      }
 19.3980 +
 19.3981 +    }
 19.3982 +    /*
 19.3983 +      If the chunk was allocated via mmap, release via munmap()
 19.3984 +      Note that if HAVE_MMAP is false but chunk_is_mmapped is
 19.3985 +      true, then user must have overwritten memory. There's nothing
 19.3986 +      we can do to catch this error unless DEBUG is set, in which case
 19.3987 +      check_inuse_chunk (above) will have triggered error.
 19.3988 +    */
 19.3989 +
 19.3990 +    else {
 19.3991 +#if HAVE_MMAP
 19.3992 +      int ret;
 19.3993 +      INTERNAL_SIZE_T offset = p->prev_size;
 19.3994 +      av->n_mmaps--;
 19.3995 +      av->mmapped_mem -= (size + offset);
 19.3996 +      ret = munmap((char*)p - offset, size + offset);
 19.3997 +      /* munmap returns non-zero on failure */
 19.3998 +      assert(ret == 0);
 19.3999 +#endif
 19.4000 +    }
 19.4001 +  }
 19.4002 +}
 19.4003 +
 19.4004 +/*
 19.4005 +  ------------------------- malloc_consolidate -------------------------
 19.4006 +
 19.4007 +  malloc_consolidate is a specialized version of free() that tears
 19.4008 +  down chunks held in fastbins.  Free itself cannot be used for this
 19.4009 +  purpose since, among other things, it might place chunks back onto
 19.4010 +  fastbins.  So, instead, we need to use a minor variant of the same
 19.4011 +  code.
 19.4012 +  
 19.4013 +  Also, because this routine needs to be called the first time through
 19.4014 +  malloc anyway, it turns out to be the perfect place to trigger
 19.4015 +  initialization code.
 19.4016 +*/
 19.4017 +
 19.4018 +#if __STD_C
 19.4019 +static void malloc_consolidate(mstate av)
 19.4020 +#else
 19.4021 +static void malloc_consolidate(av) mstate av;
 19.4022 +#endif
 19.4023 +{
 19.4024 +  mfastbinptr*    fb;                 /* current fastbin being consolidated */
 19.4025 +  mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
 19.4026 +  mchunkptr       p;                  /* current chunk being consolidated */
 19.4027 +  mchunkptr       nextp;              /* next chunk to consolidate */
 19.4028 +  mchunkptr       unsorted_bin;       /* bin header */
 19.4029 +  mchunkptr       first_unsorted;     /* chunk to link to */
 19.4030 +
 19.4031 +  /* These have same use as in free() */
 19.4032 +  mchunkptr       nextchunk;
 19.4033 +  INTERNAL_SIZE_T size;
 19.4034 +  INTERNAL_SIZE_T nextsize;
 19.4035 +  INTERNAL_SIZE_T prevsize;
 19.4036 +  int             nextinuse;
 19.4037 +  mchunkptr       bck;
 19.4038 +  mchunkptr       fwd;
 19.4039 +
 19.4040 +  /*
 19.4041 +    If max_fast is 0, we know that av hasn't
 19.4042 +    yet been initialized, in which case do so below
 19.4043 +  */
 19.4044 +
 19.4045 +  if (av->max_fast != 0) {
 19.4046 +    clear_fastchunks(av);
 19.4047 +
 19.4048 +    unsorted_bin = unsorted_chunks(av);
 19.4049 +
 19.4050 +    /*
 19.4051 +      Remove each chunk from fast bin and consolidate it, placing it
 19.4052 +      then in unsorted bin. Among other reasons for doing this,
 19.4053 +      placing in unsorted bin avoids needing to calculate actual bins
 19.4054 +      until malloc is sure that chunks aren't immediately going to be
 19.4055 +      reused anyway.
 19.4056 +    */
 19.4057 +    
 19.4058 +    maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
 19.4059 +    fb = &(av->fastbins[0]);
 19.4060 +    do {
 19.4061 +      if ( (p = *fb) != 0) {
 19.4062 +        *fb = 0;
 19.4063 +        
 19.4064 +        do {
 19.4065 +          check_inuse_chunk(p);
 19.4066 +          nextp = p->fd;
 19.4067 +          
 19.4068 +          /* Slightly streamlined version of consolidation code in free() */
 19.4069 +          size = p->size & ~PREV_INUSE;
 19.4070 +          nextchunk = chunk_at_offset(p, size);
 19.4071 +          nextsize = chunksize(nextchunk);
 19.4072 +          
 19.4073 +          if (!prev_inuse(p)) {
 19.4074 +            prevsize = p->prev_size;
 19.4075 +            size += prevsize;
 19.4076 +            p = chunk_at_offset(p, -((long) prevsize));
 19.4077 +            unlink(p, bck, fwd);
 19.4078 +          }
 19.4079 +          
 19.4080 +          if (nextchunk != av->top) {
 19.4081 +            nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 19.4082 +            set_head(nextchunk, nextsize);
 19.4083 +            
 19.4084 +            if (!nextinuse) {
 19.4085 +              size += nextsize;
 19.4086 +              unlink(nextchunk, bck, fwd);
 19.4087 +            }
 19.4088 +            
 19.4089 +            first_unsorted = unsorted_bin->fd;
 19.4090 +            unsorted_bin->fd = p;
 19.4091 +            first_unsorted->bk = p;
 19.4092 +            
 19.4093 +            set_head(p, size | PREV_INUSE);
 19.4094 +            p->bk = unsorted_bin;
 19.4095 +            p->fd = first_unsorted;
 19.4096 +            set_foot(p, size);
 19.4097 +          }
 19.4098 +          
 19.4099 +          else {
 19.4100 +            size += nextsize;
 19.4101 +            set_head(p, size | PREV_INUSE);
 19.4102 +            av->top = p;
 19.4103 +          }
 19.4104 +          
 19.4105 +        } while ( (p = nextp) != 0);
 19.4106 +        
 19.4107 +      }
 19.4108 +    } while (fb++ != maxfb);
 19.4109 +  }
 19.4110 +  else {
 19.4111 +    malloc_init_state(av);
 19.4112 +    check_malloc_state();
 19.4113 +  }
 19.4114 +}
 19.4115 +
 19.4116 +/*
 19.4117 +  ------------------------------ realloc ------------------------------
 19.4118 +*/
 19.4119 +
 19.4120 +
 19.4121 +#if __STD_C
 19.4122 +Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
 19.4123 +#else
 19.4124 +Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
 19.4125 +#endif
 19.4126 +{
 19.4127 +  mstate av = get_malloc_state();
 19.4128 +
 19.4129 +  INTERNAL_SIZE_T  nb;              /* padded request size */
 19.4130 +
 19.4131 +  mchunkptr        oldp;            /* chunk corresponding to oldmem */
 19.4132 +  INTERNAL_SIZE_T  oldsize;         /* its size */
 19.4133 +
 19.4134 +  mchunkptr        newp;            /* chunk to return */
 19.4135 +  INTERNAL_SIZE_T  newsize;         /* its size */
 19.4136 +  Void_t*          newmem;          /* corresponding user mem */
 19.4137 +
 19.4138 +  mchunkptr        next;            /* next contiguous chunk after oldp */
 19.4139 +
 19.4140 +  mchunkptr        remainder;       /* extra space at end of newp */
 19.4141 +  CHUNK_SIZE_T     remainder_size;  /* its size */
 19.4142 +
 19.4143 +  mchunkptr        bck;             /* misc temp for linking */
 19.4144 +  mchunkptr        fwd;             /* misc temp for linking */
 19.4145 +
 19.4146 +  CHUNK_SIZE_T     copysize;        /* bytes to copy */
 19.4147 +  unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
 19.4148 +  INTERNAL_SIZE_T* s;               /* copy source */ 
 19.4149 +  INTERNAL_SIZE_T* d;               /* copy destination */
 19.4150 +
 19.4151 +
 19.4152 +#ifdef REALLOC_ZERO_BYTES_FREES
 19.4153 +  if (bytes == 0) {
 19.4154 +    fREe(oldmem);
 19.4155 +    return 0;
 19.4156 +  }
 19.4157 +#endif
 19.4158 +
 19.4159 +  /* realloc of null is supposed to be same as malloc */
 19.4160 +  if (oldmem == 0) return mALLOc(bytes);
 19.4161 +
 19.4162 +  checked_request2size(bytes, nb);
 19.4163 +
 19.4164 +  oldp    = mem2chunk(oldmem);
 19.4165 +  oldsize = chunksize(oldp);
 19.4166 +
 19.4167 +  check_inuse_chunk(oldp);
 19.4168 +
 19.4169 +  if (!chunk_is_mmapped(oldp)) {
 19.4170 +
 19.4171 +    if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb)) {
 19.4172 +      /* already big enough; split below */
 19.4173 +      newp = oldp;
 19.4174 +      newsize = oldsize;
 19.4175 +    }
 19.4176 +
 19.4177 +    else {
 19.4178 +      next = chunk_at_offset(oldp, oldsize);
 19.4179 +
 19.4180 +      /* Try to expand forward into top */
 19.4181 +      if (next == av->top &&
 19.4182 +          (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
 19.4183 +          (CHUNK_SIZE_T)(nb + MINSIZE)) {
 19.4184 +        set_head_size(oldp, nb);
 19.4185 +        av->top = chunk_at_offset(oldp, nb);
 19.4186 +        set_head(av->top, (newsize - nb) | PREV_INUSE);
 19.4187 +        return chunk2mem(oldp);
 19.4188 +      }
 19.4189 +      
 19.4190 +      /* Try to expand forward into next chunk;  split off remainder below */
 19.4191 +      else if (next != av->top && 
 19.4192 +               !inuse(next) &&
 19.4193 +               (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
 19.4194 +               (CHUNK_SIZE_T)(nb)) {
 19.4195 +        newp = oldp;
 19.4196 +        unlink(next, bck, fwd);
 19.4197 +      }
 19.4198 +
 19.4199 +      /* allocate, copy, free */
 19.4200 +      else {
 19.4201 +        newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
 19.4202 +        if (newmem == 0)
 19.4203 +          return 0; /* propagate failure */
 19.4204 +      
 19.4205 +        newp = mem2chunk(newmem);
 19.4206 +        newsize = chunksize(newp);
 19.4207 +        
 19.4208 +        /*
 19.4209 +          Avoid copy if newp is next chunk after oldp.
 19.4210 +        */
 19.4211 +        if (newp == next) {
 19.4212 +          newsize += oldsize;
 19.4213 +          newp = oldp;
 19.4214 +        }
 19.4215 +        else {
 19.4216 +          /*
 19.4217 +            Unroll copy of <= 36 bytes (72 if 8byte sizes)
 19.4218 +            We know that contents have an odd number of
 19.4219 +            INTERNAL_SIZE_T-sized words; minimally 3.
 19.4220 +          */
 19.4221 +          
 19.4222 +          copysize = oldsize - SIZE_SZ;
 19.4223 +          s = (INTERNAL_SIZE_T*)(oldmem);
 19.4224 +          d = (INTERNAL_SIZE_T*)(newmem);
 19.4225 +          ncopies = copysize / sizeof(INTERNAL_SIZE_T);
 19.4226 +          assert(ncopies >= 3);
 19.4227 +          
 19.4228 +          if (ncopies > 9)
 19.4229 +            MALLOC_COPY(d, s, copysize);
 19.4230 +          
 19.4231 +          else {
 19.4232 +            *(d+0) = *(s+0);
 19.4233 +            *(d+1) = *(s+1);
 19.4234 +            *(d+2) = *(s+2);
 19.4235 +            if (ncopies > 4) {
 19.4236 +              *(d+3) = *(s+3);
 19.4237 +              *(d+4) = *(s+4);
 19.4238 +              if (ncopies > 6) {
 19.4239 +                *(d+5) = *(s+5);
 19.4240 +                *(d+6) = *(s+6);
 19.4241 +                if (ncopies > 8) {
 19.4242 +                  *(d+7) = *(s+7);
 19.4243 +                  *(d+8) = *(s+8);
 19.4244 +                }
 19.4245 +              }
 19.4246 +            }
 19.4247 +          }
 19.4248 +          
 19.4249 +          fREe(oldmem);
 19.4250 +          check_inuse_chunk(newp);
 19.4251 +          return chunk2mem(newp);
 19.4252 +        }
 19.4253 +      }
 19.4254 +    }
 19.4255 +
 19.4256 +    /* If possible, free extra space in old or extended chunk */
 19.4257 +
 19.4258 +    assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
 19.4259 +
 19.4260 +    remainder_size = newsize - nb;
 19.4261 +
 19.4262 +    if (remainder_size < MINSIZE) { /* not enough extra to split off */
 19.4263 +      set_head_size(newp, newsize);
 19.4264 +      set_inuse_bit_at_offset(newp, newsize);
 19.4265 +    }
 19.4266 +    else { /* split remainder */
 19.4267 +      remainder = chunk_at_offset(newp, nb);
 19.4268 +      set_head_size(newp, nb);
 19.4269 +      set_head(remainder, remainder_size | PREV_INUSE);
 19.4270 +      /* Mark remainder as inuse so free() won't complain */
 19.4271 +      set_inuse_bit_at_offset(remainder, remainder_size);
 19.4272 +      fREe(chunk2mem(remainder)); 
 19.4273 +    }
 19.4274 +
 19.4275 +    check_inuse_chunk(newp);
 19.4276 +    return chunk2mem(newp);
 19.4277 +  }
 19.4278 +
 19.4279 +  /*
 19.4280 +    Handle mmap cases
 19.4281 +  */
 19.4282 +
 19.4283 +  else {
 19.4284 +#if HAVE_MMAP
 19.4285 +
 19.4286 +#if HAVE_MREMAP
 19.4287 +    INTERNAL_SIZE_T offset = oldp->prev_size;
 19.4288 +    size_t pagemask = av->pagesize - 1;
 19.4289 +    char *cp;
 19.4290 +    CHUNK_SIZE_T  sum;
 19.4291 +    
 19.4292 +    /* Note the extra SIZE_SZ overhead */
 19.4293 +    newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
 19.4294 +
 19.4295 +    /* don't need to remap if still within same page */
 19.4296 +    if (oldsize == newsize - offset) 
 19.4297 +      return oldmem;
 19.4298 +
 19.4299 +    cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
 19.4300 +    
 19.4301 +    if (cp != (char*)MORECORE_FAILURE) {
 19.4302 +
 19.4303 +      newp = (mchunkptr)(cp + offset);
 19.4304 +      set_head(newp, (newsize - offset)|IS_MMAPPED);
 19.4305 +      
 19.4306 +      assert(aligned_OK(chunk2mem(newp)));
 19.4307 +      assert((newp->prev_size == offset));
 19.4308 +      
 19.4309 +      /* update statistics */
 19.4310 +      sum = av->mmapped_mem += newsize - oldsize;
 19.4311 +      if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem)) 
 19.4312 +        av->max_mmapped_mem = sum;
 19.4313 +      sum += av->sbrked_mem;
 19.4314 +      if (sum > (CHUNK_SIZE_T)(av->max_total_mem)) 
 19.4315 +        av->max_total_mem = sum;
 19.4316 +      
 19.4317 +      return chunk2mem(newp);
 19.4318 +    }
 19.4319 +#endif
 19.4320 +
 19.4321 +    /* Note the extra SIZE_SZ overhead. */
 19.4322 +    if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ)) 
 19.4323 +      newmem = oldmem; /* do nothing */
 19.4324 +    else {
 19.4325 +      /* Must alloc, copy, free. */
 19.4326 +      newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
 19.4327 +      if (newmem != 0) {
 19.4328 +        MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
 19.4329 +        fREe(oldmem);
 19.4330 +      }
 19.4331 +    }
 19.4332 +    return newmem;
 19.4333 +
 19.4334 +#else 
 19.4335 +    /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
 19.4336 +    check_malloc_state();
 19.4337 +    MALLOC_FAILURE_ACTION;
 19.4338 +    return 0;
 19.4339 +#endif
 19.4340 +  }
 19.4341 +}
 19.4342 +
 19.4343 +/*
 19.4344 +  ------------------------------ memalign ------------------------------
 19.4345 +*/
 19.4346 +
 19.4347 +#if __STD_C
 19.4348 +Void_t* mEMALIGn(size_t alignment, size_t bytes)
 19.4349 +#else
 19.4350 +Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
 19.4351 +#endif
 19.4352 +{
 19.4353 +  INTERNAL_SIZE_T nb;             /* padded  request size */
 19.4354 +  char*           m;              /* memory returned by malloc call */
 19.4355 +  mchunkptr       p;              /* corresponding chunk */
 19.4356 +  char*           brk;            /* alignment point within p */
 19.4357 +  mchunkptr       newp;           /* chunk to return */
 19.4358 +  INTERNAL_SIZE_T newsize;        /* its size */
 19.4359 +  INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
 19.4360 +  mchunkptr       remainder;      /* spare room at end to split off */
 19.4361 +  CHUNK_SIZE_T    remainder_size; /* its size */
 19.4362 +  INTERNAL_SIZE_T size;
 19.4363 +
 19.4364 +  /* If need less alignment than we give anyway, just relay to malloc */
 19.4365 +
 19.4366 +  if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
 19.4367 +
 19.4368 +  /* Otherwise, ensure that it is at least a minimum chunk size */
 19.4369 +
 19.4370 +  if (alignment <  MINSIZE) alignment = MINSIZE;
 19.4371 +
 19.4372 +  /* Make sure alignment is power of 2 (in case MINSIZE is not).  */
 19.4373 +  if ((alignment & (alignment - 1)) != 0) {
 19.4374 +    size_t a = MALLOC_ALIGNMENT * 2;
 19.4375 +    while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
 19.4376 +    alignment = a;
 19.4377 +  }
 19.4378 +
 19.4379 +  checked_request2size(bytes, nb);
 19.4380 +
 19.4381 +  /*
 19.4382 +    Strategy: find a spot within that chunk that meets the alignment
 19.4383 +    request, and then possibly free the leading and trailing space.
 19.4384 +  */
 19.4385 +
 19.4386 +
 19.4387 +  /* Call malloc with worst case padding to hit alignment. */
 19.4388 +
 19.4389 +  m  = (char*)(mALLOc(nb + alignment + MINSIZE));
 19.4390 +
 19.4391 +  if (m == 0) return 0; /* propagate failure */
 19.4392 +
 19.4393 +  p = mem2chunk(m);
 19.4394 +
 19.4395 +  if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
 19.4396 +
 19.4397 +    /*
 19.4398 +      Find an aligned spot inside chunk.  Since we need to give back
 19.4399 +      leading space in a chunk of at least MINSIZE, if the first
 19.4400 +      calculation places us at a spot with less than MINSIZE leader,
 19.4401 +      we can move to the next aligned spot -- we've allocated enough
 19.4402 +      total room so that this is always possible.
 19.4403 +    */
 19.4404 +
 19.4405 +    brk = (char*)mem2chunk((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
 19.4406 +                           -((signed long) alignment)));
 19.4407 +    if ((CHUNK_SIZE_T)(brk - (char*)(p)) < MINSIZE)
 19.4408 +      brk += alignment;
 19.4409 +
 19.4410 +    newp = (mchunkptr)brk;
 19.4411 +    leadsize = brk - (char*)(p);
 19.4412 +    newsize = chunksize(p) - leadsize;
 19.4413 +
 19.4414 +    /* For mmapped chunks, just adjust offset */
 19.4415 +    if (chunk_is_mmapped(p)) {
 19.4416 +      newp->prev_size = p->prev_size + leadsize;
 19.4417 +      set_head(newp, newsize|IS_MMAPPED);
 19.4418 +      return chunk2mem(newp);
 19.4419 +    }
 19.4420 +
 19.4421 +    /* Otherwise, give back leader, use the rest */
 19.4422 +    set_head(newp, newsize | PREV_INUSE);
 19.4423 +    set_inuse_bit_at_offset(newp, newsize);
 19.4424 +    set_head_size(p, leadsize);
 19.4425 +    fREe(chunk2mem(p));
 19.4426 +    p = newp;
 19.4427 +
 19.4428 +    assert (newsize >= nb &&
 19.4429 +            (((PTR_UINT)(chunk2mem(p))) % alignment) == 0);
 19.4430 +  }
 19.4431 +
 19.4432 +  /* Also give back spare room at the end */
 19.4433 +  if (!chunk_is_mmapped(p)) {
 19.4434 +    size = chunksize(p);
 19.4435 +    if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
 19.4436 +      remainder_size = size - nb;
 19.4437 +      remainder = chunk_at_offset(p, nb);
 19.4438 +      set_head(remainder, remainder_size | PREV_INUSE);
 19.4439 +      set_head_size(p, nb);
 19.4440 +      fREe(chunk2mem(remainder));
 19.4441 +    }
 19.4442 +  }
 19.4443 +
 19.4444 +  check_inuse_chunk(p);
 19.4445 +  return chunk2mem(p);
 19.4446 +}
 19.4447 +
 19.4448 +/*
 19.4449 +  ------------------------------ calloc ------------------------------
 19.4450 +*/
 19.4451 +
 19.4452 +#if __STD_C
 19.4453 +Void_t* cALLOc(size_t n_elements, size_t elem_size)
 19.4454 +#else
 19.4455 +Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
 19.4456 +#endif
 19.4457 +{
 19.4458 +  mchunkptr p;
 19.4459 +  CHUNK_SIZE_T  clearsize;
 19.4460 +  CHUNK_SIZE_T  nclears;
 19.4461 +  INTERNAL_SIZE_T* d;
 19.4462 +
 19.4463 +  Void_t* mem = mALLOc(n_elements * elem_size);
 19.4464 +
 19.4465 +  if (mem != 0) {
 19.4466 +    p = mem2chunk(mem);
 19.4467 +
 19.4468 +    if (!chunk_is_mmapped(p))
 19.4469 +    {  
 19.4470 +      /*
 19.4471 +        Unroll clear of <= 36 bytes (72 if 8byte sizes)
 19.4472 +        We know that contents have an odd number of
 19.4473 +        INTERNAL_SIZE_T-sized words; minimally 3.
 19.4474 +      */
 19.4475 +
 19.4476 +      d = (INTERNAL_SIZE_T*)mem;
 19.4477 +      clearsize = chunksize(p) - SIZE_SZ;
 19.4478 +      nclears = clearsize / sizeof(INTERNAL_SIZE_T);
 19.4479 +      assert(nclears >= 3);
 19.4480 +
 19.4481 +      if (nclears > 9)
 19.4482 +        MALLOC_ZERO(d, clearsize);
 19.4483 +
 19.4484 +      else {
 19.4485 +        *(d+0) = 0;
 19.4486 +        *(d+1) = 0;
 19.4487 +        *(d+2) = 0;
 19.4488 +        if (nclears > 4) {
 19.4489 +          *(d+3) = 0;
 19.4490 +          *(d+4) = 0;
 19.4491 +          if (nclears > 6) {
 19.4492 +            *(d+5) = 0;
 19.4493 +            *(d+6) = 0;
 19.4494 +            if (nclears > 8) {
 19.4495 +              *(d+7) = 0;
 19.4496 +              *(d+8) = 0;
 19.4497 +            }
 19.4498 +          }
 19.4499 +        }
 19.4500 +      }
 19.4501 +    }
 19.4502 +#if ! MMAP_CLEARS
 19.4503 +    else
 19.4504 +    {
 19.4505 +      d = (INTERNAL_SIZE_T*)mem;
 19.4506 +      /*
 19.4507 +        Note the additional SIZE_SZ
 19.4508 +      */
 19.4509 +      clearsize = chunksize(p) - 2*SIZE_SZ;
 19.4510 +      MALLOC_ZERO(d, clearsize);
 19.4511 +    }
 19.4512 +#endif
 19.4513 +  }
 19.4514 +  return mem;
 19.4515 +}
 19.4516 +
 19.4517 +/*
 19.4518 +  ------------------------------ cfree ------------------------------
 19.4519 +*/
 19.4520 +
 19.4521 +#if __STD_C
 19.4522 +void cFREe(Void_t *mem)
 19.4523 +#else
 19.4524 +void cFREe(mem) Void_t *mem;
 19.4525 +#endif
 19.4526 +{
 19.4527 +  fREe(mem);
 19.4528 +}
 19.4529 +
 19.4530 +/*
 19.4531 +  ------------------------- independent_calloc -------------------------
 19.4532 +*/
 19.4533 +
 19.4534 +#if __STD_C
 19.4535 +Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[])
 19.4536 +#else
 19.4537 +Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[];
 19.4538 +#endif
 19.4539 +{
 19.4540 +  size_t sz = elem_size; /* serves as 1-element array */
 19.4541 +  /* opts arg of 3 means all elements are same size, and should be cleared */
 19.4542 +  return iALLOc(n_elements, &sz, 3, chunks);
 19.4543 +}
 19.4544 +
 19.4545 +/*
 19.4546 +  ------------------------- independent_comalloc -------------------------
 19.4547 +*/
 19.4548 +
 19.4549 +#if __STD_C
 19.4550 +Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[])
 19.4551 +#else
 19.4552 +Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[];
 19.4553 +#endif
 19.4554 +{
 19.4555 +  return iALLOc(n_elements, sizes, 0, chunks);
 19.4556 +}
 19.4557 +
 19.4558 +
 19.4559 +/*
 19.4560 +  ------------------------------ ialloc ------------------------------
 19.4561 +  ialloc provides common support for independent_X routines, handling all of
 19.4562 +  the combinations that can result.
 19.4563 +
 19.4564 +  The opts arg has:
 19.4565 +    bit 0 set if all elements are same size (using sizes[0])
 19.4566 +    bit 1 set if elements should be zeroed
 19.4567 +*/
 19.4568 +
 19.4569 +
 19.4570 +#if __STD_C
 19.4571 +static Void_t** iALLOc(size_t n_elements, 
 19.4572 +                       size_t* sizes,  
 19.4573 +                       int opts,
 19.4574 +                       Void_t* chunks[])
 19.4575 +#else
 19.4576 +static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
 19.4577 +#endif
 19.4578 +{
 19.4579 +  mstate av = get_malloc_state();
 19.4580 +  INTERNAL_SIZE_T element_size;   /* chunksize of each element, if all same */
 19.4581 +  INTERNAL_SIZE_T contents_size;  /* total size of elements */
 19.4582 +  INTERNAL_SIZE_T array_size;     /* request size of pointer array */
 19.4583 +  Void_t*         mem;            /* malloced aggregate space */
 19.4584 +  mchunkptr       p;              /* corresponding chunk */
 19.4585 +  INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
 19.4586 +  Void_t**        marray;         /* either "chunks" or malloced ptr array */
 19.4587 +  mchunkptr       array_chunk;    /* chunk for malloced ptr array */
 19.4588 +  int             mmx;            /* to disable mmap */
 19.4589 +  INTERNAL_SIZE_T size;           
 19.4590 +  size_t          i;
 19.4591 +
 19.4592 +  /* Ensure initialization */
 19.4593 +  if (av->max_fast == 0) malloc_consolidate(av);
 19.4594 +
 19.4595 +  /* compute array length, if needed */
 19.4596 +  if (chunks != 0) {
 19.4597 +    if (n_elements == 0)
 19.4598 +      return chunks; /* nothing to do */
 19.4599 +    marray = chunks;
 19.4600 +    array_size = 0;
 19.4601 +  }
 19.4602 +  else {
 19.4603 +    /* if empty req, must still return chunk representing empty array */
 19.4604 +    if (n_elements == 0) 
 19.4605 +      return (Void_t**) mALLOc(0);
 19.4606 +    marray = 0;
 19.4607 +    array_size = request2size(n_elements * (sizeof(Void_t*)));
 19.4608 +  }
 19.4609 +
 19.4610 +  /* compute total element size */
 19.4611 +  if (opts & 0x1) { /* all-same-size */
 19.4612 +    element_size = request2size(*sizes);
 19.4613 +    contents_size = n_elements * element_size;
 19.4614 +  }
 19.4615 +  else { /* add up all the sizes */
 19.4616 +    element_size = 0;
 19.4617 +    contents_size = 0;
 19.4618 +    for (i = 0; i != n_elements; ++i) 
 19.4619 +      contents_size += request2size(sizes[i]);     
 19.4620 +  }
 19.4621 +
 19.4622 +  /* subtract out alignment bytes from total to minimize overallocation */
 19.4623 +  size = contents_size + array_size - MALLOC_ALIGN_MASK;
 19.4624 +  
 19.4625 +  /* 
 19.4626 +     Allocate the aggregate chunk.
 19.4627 +     But first disable mmap so malloc won't use it, since
 19.4628 +     we would not be able to later free/realloc space internal
 19.4629 +     to a segregated mmap region.
 19.4630 + */
 19.4631 +  mmx = av->n_mmaps_max;   /* disable mmap */
 19.4632 +  av->n_mmaps_max = 0;
 19.4633 +  mem = mALLOc(size);
 19.4634 +  av->n_mmaps_max = mmx;   /* reset mmap */
 19.4635 +  if (mem == 0) 
 19.4636 +    return 0;
 19.4637 +
 19.4638 +  p = mem2chunk(mem);
 19.4639 +  assert(!chunk_is_mmapped(p)); 
 19.4640 +  remainder_size = chunksize(p);
 19.4641 +
 19.4642 +  if (opts & 0x2) {       /* optionally clear the elements */
 19.4643 +    MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
 19.4644 +  }
 19.4645 +
 19.4646 +  /* If not provided, allocate the pointer array as final part of chunk */
 19.4647 +  if (marray == 0) {
 19.4648 +    array_chunk = chunk_at_offset(p, contents_size);
 19.4649 +    marray = (Void_t**) (chunk2mem(array_chunk));
 19.4650 +    set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE);
 19.4651 +    remainder_size = contents_size;
 19.4652 +  }
 19.4653 +
 19.4654 +  /* split out elements */
 19.4655 +  for (i = 0; ; ++i) {
 19.4656 +    marray[i] = chunk2mem(p);
 19.4657 +    if (i != n_elements-1) {
 19.4658 +      if (element_size != 0) 
 19.4659 +        size = element_size;
 19.4660 +      else
 19.4661 +        size = request2size(sizes[i]);          
 19.4662 +      remainder_size -= size;
 19.4663 +      set_head(p, size | PREV_INUSE);
 19.4664 +      p = chunk_at_offset(p, size);
 19.4665 +    }
 19.4666 +    else { /* the final element absorbs any overallocation slop */
 19.4667 +      set_head(p, remainder_size | PREV_INUSE);
 19.4668 +      break;
 19.4669 +    }
 19.4670 +  }
 19.4671 +
 19.4672 +#if DEBUG
 19.4673 +  if (marray != chunks) {
 19.4674 +    /* final element must have exactly exhausted chunk */
 19.4675 +    if (element_size != 0) 
 19.4676 +      assert(remainder_size == element_size);
 19.4677 +    else
 19.4678 +      assert(remainder_size == request2size(sizes[i]));
 19.4679 +    check_inuse_chunk(mem2chunk(marray));
 19.4680 +  }
 19.4681 +
 19.4682 +  for (i = 0; i != n_elements; ++i)
 19.4683 +    check_inuse_chunk(mem2chunk(marray[i]));
 19.4684 +#endif
 19.4685 +
 19.4686 +  return marray;
 19.4687 +}
 19.4688 +
 19.4689 +
 19.4690 +/*
 19.4691 +  ------------------------------ valloc ------------------------------
 19.4692 +*/
 19.4693 +
 19.4694 +#if __STD_C
 19.4695 +Void_t* vALLOc(size_t bytes)
 19.4696 +#else
 19.4697 +Void_t* vALLOc(bytes) size_t bytes;
 19.4698 +#endif
 19.4699 +{
 19.4700 +  /* Ensure initialization */
 19.4701 +  mstate av = get_malloc_state();
 19.4702 +  if (av->max_fast == 0) malloc_consolidate(av);
 19.4703 +  return mEMALIGn(av->pagesize, bytes);
 19.4704 +}
 19.4705 +
 19.4706 +/*
 19.4707 +  ------------------------------ pvalloc ------------------------------
 19.4708 +*/
 19.4709 +
 19.4710 +
 19.4711 +#if __STD_C
 19.4712 +Void_t* pVALLOc(size_t bytes)
 19.4713 +#else
 19.4714 +Void_t* pVALLOc(bytes) size_t bytes;
 19.4715 +#endif
 19.4716 +{
 19.4717 +  mstate av = get_malloc_state();
 19.4718 +  size_t pagesz;
 19.4719 +
 19.4720 +  /* Ensure initialization */
 19.4721 +  if (av->max_fast == 0) malloc_consolidate(av);
 19.4722 +  pagesz = av->pagesize;
 19.4723 +  return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
 19.4724 +}
 19.4725 +   
 19.4726 +
 19.4727 +/*
 19.4728 +  ------------------------------ malloc_trim ------------------------------
 19.4729 +*/
 19.4730 +
 19.4731 +#if __STD_C
 19.4732 +int mTRIm(size_t pad)
 19.4733 +#else
 19.4734 +int mTRIm(pad) size_t pad;
 19.4735 +#endif
 19.4736 +{
 19.4737 +  mstate av = get_malloc_state();
 19.4738 +  /* Ensure initialization/consolidation */
 19.4739 +  malloc_consolidate(av);
 19.4740 +
 19.4741 +#ifndef MORECORE_CANNOT_TRIM        
 19.4742 +  return sYSTRIm(pad, av);
 19.4743 +#else
 19.4744 +  return 0;
 19.4745 +#endif
 19.4746 +}
 19.4747 +
 19.4748 +
 19.4749 +/*
 19.4750 +  ------------------------- malloc_usable_size -------------------------
 19.4751 +*/
 19.4752 +
 19.4753 +#if __STD_C
 19.4754 +size_t mUSABLe(Void_t* mem)
 19.4755 +#else
 19.4756 +size_t mUSABLe(mem) Void_t* mem;
 19.4757 +#endif
 19.4758 +{
 19.4759 +  mchunkptr p;
 19.4760 +  if (mem != 0) {
 19.4761 +    p = mem2chunk(mem);
 19.4762 +    if (chunk_is_mmapped(p))
 19.4763 +      return chunksize(p) - 2*SIZE_SZ;
 19.4764 +    else if (inuse(p))
 19.4765 +      return chunksize(p) - SIZE_SZ;
 19.4766 +  }
 19.4767 +  return 0;
 19.4768 +}
 19.4769 +
 19.4770 +/*
 19.4771 +  ------------------------------ mallinfo ------------------------------
 19.4772 +*/
 19.4773 +
 19.4774 +struct mallinfo mALLINFo()
 19.4775 +{
 19.4776 +  mstate av = get_malloc_state();
 19.4777 +  struct mallinfo mi;
 19.4778 +  int i;
 19.4779 +  mbinptr b;
 19.4780 +  mchunkptr p;
 19.4781 +  INTERNAL_SIZE_T avail;
 19.4782 +  INTERNAL_SIZE_T fastavail;
 19.4783 +  int nblocks;
 19.4784 +  int nfastblocks;
 19.4785 +
 19.4786 +  /* Ensure initialization */
 19.4787 +  if (av->top == 0)  malloc_consolidate(av);
 19.4788 +
 19.4789 +  check_malloc_state();
 19.4790 +
 19.4791 +  /* Account for top */
 19.4792 +  avail = chunksize(av->top);
 19.4793 +  nblocks = 1;  /* top always exists */
 19.4794 +
 19.4795 +  /* traverse fastbins */
 19.4796 +  nfastblocks = 0;
 19.4797 +  fastavail = 0;
 19.4798 +
 19.4799 +  for (i = 0; i < NFASTBINS; ++i) {
 19.4800 +    for (p = av->fastbins[i]; p != 0; p = p->fd) {
 19.4801 +      ++nfastblocks;
 19.4802 +      fastavail += chunksize(p);
 19.4803 +    }
 19.4804 +  }
 19.4805 +
 19.4806 +  avail += fastavail;
 19.4807 +
 19.4808 +  /* traverse regular bins */
 19.4809 +  for (i = 1; i < NBINS; ++i) {
 19.4810 +    b = bin_at(av, i);
 19.4811 +    for (p = last(b); p != b; p = p->bk) {
 19.4812 +      ++nblocks;
 19.4813 +      avail += chunksize(p);
 19.4814 +    }
 19.4815 +  }
 19.4816 +
 19.4817 +  mi.smblks = nfastblocks;
 19.4818 +  mi.ordblks = nblocks;
 19.4819 +  mi.fordblks = avail;
 19.4820 +  mi.uordblks = av->sbrked_mem - avail;
 19.4821 +  mi.arena = av->sbrked_mem;
 19.4822 +  mi.hblks = av->n_mmaps;
 19.4823 +  mi.hblkhd = av->mmapped_mem;
 19.4824 +  mi.fsmblks = fastavail;
 19.4825 +  mi.keepcost = chunksize(av->top);
 19.4826 +  mi.usmblks = av->max_total_mem;
 19.4827 +  return mi;
 19.4828 +}
 19.4829 +
 19.4830 +/*
 19.4831 +  ------------------------------ malloc_stats ------------------------------
 19.4832 +*/
 19.4833 +
 19.4834 +void mSTATs()
 19.4835 +{
 19.4836 +  struct mallinfo mi = mALLINFo();
 19.4837 +
 19.4838 +#ifdef WIN32
 19.4839 +  {
 19.4840 +    CHUNK_SIZE_T  free, reserved, committed;
 19.4841 +    vminfo (&free, &reserved, &committed);
 19.4842 +    fprintf(stderr, "free bytes       = %10lu\n", 
 19.4843 +            free);
 19.4844 +    fprintf(stderr, "reserved bytes   = %10lu\n", 
 19.4845 +            reserved);
 19.4846 +    fprintf(stderr, "committed bytes  = %10lu\n", 
 19.4847 +            committed);
 19.4848 +  }
 19.4849 +#endif
 19.4850 +
 19.4851 +/* RN XXX  */
 19.4852 +  printf("max system bytes = %10lu\n",
 19.4853 +          (CHUNK_SIZE_T)(mi.usmblks));
 19.4854 +  printf("system bytes     = %10lu\n",
 19.4855 +          (CHUNK_SIZE_T)(mi.arena + mi.hblkhd));
 19.4856 +  printf("in use bytes     = %10lu\n",
 19.4857 +          (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
 19.4858 +
 19.4859 +#ifdef WIN32 
 19.4860 +  {
 19.4861 +    CHUNK_SIZE_T  kernel, user;
 19.4862 +    if (cpuinfo (TRUE, &kernel, &user)) {
 19.4863 +      fprintf(stderr, "kernel ms        = %10lu\n", 
 19.4864 +              kernel);
 19.4865 +      fprintf(stderr, "user ms          = %10lu\n", 
 19.4866 +              user);
 19.4867 +    }
 19.4868 +  }
 19.4869 +#endif
 19.4870 +}
 19.4871 +
 19.4872 +
 19.4873 +/*
 19.4874 +  ------------------------------ mallopt ------------------------------
 19.4875 +*/
 19.4876 +
 19.4877 +#if __STD_C
 19.4878 +int mALLOPt(int param_number, int value)
 19.4879 +#else
 19.4880 +int mALLOPt(param_number, value) int param_number; int value;
 19.4881 +#endif
 19.4882 +{
 19.4883 +  mstate av = get_malloc_state();
 19.4884 +  /* Ensure initialization/consolidation */
 19.4885 +  malloc_consolidate(av);
 19.4886 +
 19.4887 +  switch(param_number) {
 19.4888 +  case M_MXFAST:
 19.4889 +    if (value >= 0 && value <= MAX_FAST_SIZE) {
 19.4890 +      set_max_fast(av, value);
 19.4891 +      return 1;
 19.4892 +    }
 19.4893 +    else
 19.4894 +      return 0;
 19.4895 +
 19.4896 +  case M_TRIM_THRESHOLD:
 19.4897 +    av->trim_threshold = value;
 19.4898 +    return 1;
 19.4899 +
 19.4900 +  case M_TOP_PAD:
 19.4901 +    av->top_pad = value;
 19.4902 +    return 1;
 19.4903 +
 19.4904 +  case M_MMAP_THRESHOLD:
 19.4905 +    av->mmap_threshold = value;
 19.4906 +    return 1;
 19.4907 +
 19.4908 +  case M_MMAP_MAX:
 19.4909 +#if !HAVE_MMAP
 19.4910 +    if (value != 0)
 19.4911 +      return 0;
 19.4912 +#endif
 19.4913 +    av->n_mmaps_max = value;
 19.4914 +    return 1;
 19.4915 +
 19.4916 +  default:
 19.4917 +    return 0;
 19.4918 +  }
 19.4919 +}
 19.4920 +
 19.4921 +
 19.4922 +/* 
 19.4923 +  -------------------- Alternative MORECORE functions --------------------
 19.4924 +*/
 19.4925 +
 19.4926 +
 19.4927 +/*
 19.4928 +  General Requirements for MORECORE.
 19.4929 +
 19.4930 +  The MORECORE function must have the following properties:
 19.4931 +
 19.4932 +  If MORECORE_CONTIGUOUS is false:
 19.4933 +
 19.4934 +    * MORECORE must allocate in multiples of pagesize. It will
 19.4935 +      only be called with arguments that are multiples of pagesize.
 19.4936 +
 19.4937 +    * MORECORE(0) must return an address that is at least 
 19.4938 +      MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
 19.4939 +
 19.4940 +  else (i.e. If MORECORE_CONTIGUOUS is true):
 19.4941 +
 19.4942 +    * Consecutive calls to MORECORE with positive arguments
 19.4943 +      return increasing addresses, indicating that space has been
 19.4944 +      contiguously extended. 
 19.4945 +
 19.4946 +    * MORECORE need not allocate in multiples of pagesize.
 19.4947 +      Calls to MORECORE need not have args of multiples of pagesize.
 19.4948 +
 19.4949 +    * MORECORE need not page-align.
 19.4950 +
 19.4951 +  In either case:
 19.4952 +
 19.4953 +    * MORECORE may allocate more memory than requested. (Or even less,
 19.4954 +      but this will generally result in a malloc failure.)
 19.4955 +
 19.4956 +    * MORECORE must not allocate memory when given argument zero, but
 19.4957 +      instead return one past the end address of memory from previous
 19.4958 +      nonzero call. This malloc does NOT call MORECORE(0)
 19.4959 +      until at least one call with positive arguments is made, so
 19.4960 +      the initial value returned is not important.
 19.4961 +
 19.4962 +    * Even though consecutive calls to MORECORE need not return contiguous
 19.4963 +      addresses, it must be OK for malloc'ed chunks to span multiple
 19.4964 +      regions in those cases where they do happen to be contiguous.
 19.4965 +
 19.4966 +    * MORECORE need not handle negative arguments -- it may instead
 19.4967 +      just return MORECORE_FAILURE when given negative arguments.
 19.4968 +      Negative arguments are always multiples of pagesize. MORECORE
 19.4969 +      must not misinterpret negative args as large positive unsigned
 19.4970 +      args. You can suppress all such calls from even occurring by defining
 19.4971 +      MORECORE_CANNOT_TRIM,
 19.4972 +
 19.4973 +  There is some variation across systems about the type of the
 19.4974 +  argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
 19.4975 +  actually be size_t, because sbrk supports negative args, so it is
 19.4976 +  normally the signed type of the same width as size_t (sometimes
 19.4977 +  declared as "intptr_t", and sometimes "ptrdiff_t").  It doesn't much
 19.4978 +  matter though. Internally, we use "long" as arguments, which should
 19.4979 +  work across all reasonable possibilities.
 19.4980 +
 19.4981 +  Additionally, if MORECORE ever returns failure for a positive
 19.4982 +  request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
 19.4983 +  system allocator. This is a useful backup strategy for systems with
 19.4984 +  holes in address spaces -- in this case sbrk cannot contiguously
 19.4985 +  expand the heap, but mmap may be able to map noncontiguous space.
 19.4986 +
 19.4987 +  If you'd like mmap to ALWAYS be used, you can define MORECORE to be
 19.4988 +  a function that always returns MORECORE_FAILURE.
 19.4989 +
 19.4990 +  Malloc only has limited ability to detect failures of MORECORE
 19.4991 +  to supply contiguous space when it says it can. In particular,
 19.4992 +  multithreaded programs that do not use locks may result in
 19.4993 +  rece conditions across calls to MORECORE that result in gaps
 19.4994 +  that cannot be detected as such, and subsequent corruption.
 19.4995 +
 19.4996 +  If you are using this malloc with something other than sbrk (or its
 19.4997 +  emulation) to supply memory regions, you probably want to set
 19.4998 +  MORECORE_CONTIGUOUS as false.  As an example, here is a custom
 19.4999 +  allocator kindly contributed for pre-OSX macOS.  It uses virtually
 19.5000 +  but not necessarily physically contiguous non-paged memory (locked
 19.5001 +  in, present and won't get swapped out).  You can use it by
 19.5002 +  uncommenting this section, adding some #includes, and setting up the
 19.5003 +  appropriate defines above:
 19.5004 +
 19.5005 +      #define MORECORE osMoreCore
 19.5006 +      #define MORECORE_CONTIGUOUS 0
 19.5007 +
 19.5008 +  There is also a shutdown routine that should somehow be called for
 19.5009 +  cleanup upon program exit.
 19.5010 +
 19.5011 +  #define MAX_POOL_ENTRIES 100
 19.5012 +  #define MINIMUM_MORECORE_SIZE  (64 * 1024)
 19.5013 +  static int next_os_pool;
 19.5014 +  void *our_os_pools[MAX_POOL_ENTRIES];
 19.5015 +
 19.5016 +  void *osMoreCore(int size)
 19.5017 +  {
 19.5018 +    void *ptr = 0;
 19.5019 +    static void *sbrk_top = 0;
 19.5020 +
 19.5021 +    if (size > 0)
 19.5022 +    {
 19.5023 +      if (size < MINIMUM_MORECORE_SIZE)
 19.5024 +         size = MINIMUM_MORECORE_SIZE;
 19.5025 +      if (CurrentExecutionLevel() == kTaskLevel)
 19.5026 +         ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
 19.5027 +      if (ptr == 0)
 19.5028 +      {
 19.5029 +        return (void *) MORECORE_FAILURE;
 19.5030 +      }
 19.5031 +      // save ptrs so they can be freed during cleanup
 19.5032 +      our_os_pools[next_os_pool] = ptr;
 19.5033 +      next_os_pool++;
 19.5034 +      ptr = (void *) ((((CHUNK_SIZE_T) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
 19.5035 +      sbrk_top = (char *) ptr + size;
 19.5036 +      return ptr;
 19.5037 +    }
 19.5038 +    else if (size < 0)
 19.5039 +    {
 19.5040 +      // we don't currently support shrink behavior
 19.5041 +      return (void *) MORECORE_FAILURE;
 19.5042 +    }
 19.5043 +    else
 19.5044 +    {
 19.5045 +      return sbrk_top;
 19.5046 +    }
 19.5047 +  }
 19.5048 +
 19.5049 +  // cleanup any allocated memory pools
 19.5050 +  // called as last thing before shutting down driver
 19.5051 +
 19.5052 +  void osCleanupMem(void)
 19.5053 +  {
 19.5054 +    void **ptr;
 19.5055 +
 19.5056 +    for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
 19.5057 +      if (*ptr)
 19.5058 +      {
 19.5059 +         PoolDeallocate(*ptr);
 19.5060 +         *ptr = 0;
 19.5061 +      }
 19.5062 +  }
 19.5063 +
 19.5064 +*/
 19.5065 +
 19.5066 +
 19.5067 +/* 
 19.5068 +  -------------------------------------------------------------- 
 19.5069 +
 19.5070 +  Emulation of sbrk for win32. 
 19.5071 +  Donated by J. Walter <Walter@GeNeSys-e.de>.
 19.5072 +  For additional information about this code, and malloc on Win32, see 
 19.5073 +     http://www.genesys-e.de/jwalter/
 19.5074 +*/
 19.5075 +
 19.5076 +
 19.5077 +#ifdef WIN32
 19.5078 +
 19.5079 +#ifdef _DEBUG
 19.5080 +/* #define TRACE */
 19.5081 +#endif
 19.5082 +
 19.5083 +/* Support for USE_MALLOC_LOCK */
 19.5084 +#ifdef USE_MALLOC_LOCK
 19.5085 +
 19.5086 +/* Wait for spin lock */
 19.5087 +static int slwait (int *sl) {
 19.5088 +    while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0) 
 19.5089 +	    Sleep (0);
 19.5090 +    return 0;
 19.5091 +}
 19.5092 +
 19.5093 +/* Release spin lock */
 19.5094 +static int slrelease (int *sl) {
 19.5095 +    InterlockedExchange (sl, 0);
 19.5096 +    return 0;
 19.5097 +}
 19.5098 +
 19.5099 +#ifdef NEEDED
 19.5100 +/* Spin lock for emulation code */
 19.5101 +static int g_sl;
 19.5102 +#endif
 19.5103 +
 19.5104 +#endif /* USE_MALLOC_LOCK */
 19.5105 +
 19.5106 +/* getpagesize for windows */
 19.5107 +static long getpagesize (void) {
 19.5108 +    static long g_pagesize = 0;
 19.5109 +    if (! g_pagesize) {
 19.5110 +        SYSTEM_INFO system_info;
 19.5111 +        GetSystemInfo (&system_info);
 19.5112 +        g_pagesize = system_info.dwPageSize;
 19.5113 +    }
 19.5114 +    return g_pagesize;
 19.5115 +}
 19.5116 +static long getregionsize (void) {
 19.5117 +    static long g_regionsize = 0;
 19.5118 +    if (! g_regionsize) {
 19.5119 +        SYSTEM_INFO system_info;
 19.5120 +        GetSystemInfo (&system_info);
 19.5121 +        g_regionsize = system_info.dwAllocationGranularity;
 19.5122 +    }
 19.5123 +    return g_regionsize;
 19.5124 +}
 19.5125 +
 19.5126 +/* A region list entry */
 19.5127 +typedef struct _region_list_entry {
 19.5128 +    void *top_allocated;
 19.5129 +    void *top_committed;
 19.5130 +    void *top_reserved;
 19.5131 +    long reserve_size;
 19.5132 +    struct _region_list_entry *previous;
 19.5133 +} region_list_entry;
 19.5134 +
 19.5135 +/* Allocate and link a region entry in the region list */
 19.5136 +static int region_list_append (region_list_entry **last, void *base_reserved, long reserve_size) {
 19.5137 +    region_list_entry *next = HeapAlloc (GetProcessHeap (), 0, sizeof (region_list_entry));
 19.5138 +    if (! next)
 19.5139 +        return FALSE;
 19.5140 +    next->top_allocated = (char *) base_reserved;
 19.5141 +    next->top_committed = (char *) base_reserved;
 19.5142 +    next->top_reserved = (char *) base_reserved + reserve_size;
 19.5143 +    next->reserve_size = reserve_size;
 19.5144 +    next->previous = *last;
 19.5145 +    *last = next;
 19.5146 +    return TRUE;
 19.5147 +}
 19.5148 +/* Free and unlink the last region entry from the region list */
 19.5149 +static int region_list_remove (region_list_entry **last) {
 19.5150 +    region_list_entry *previous = (*last)->previous;
 19.5151 +    if (! HeapFree (GetProcessHeap (), sizeof (region_list_entry), *last))
 19.5152 +        return FALSE;
 19.5153 +    *last = previous;
 19.5154 +    return TRUE;
 19.5155 +}
 19.5156 +
 19.5157 +#define CEIL(size,to)	(((size)+(to)-1)&~((to)-1))
 19.5158 +#define FLOOR(size,to)	((size)&~((to)-1))
 19.5159 +
 19.5160 +#define SBRK_SCALE  0
 19.5161 +/* #define SBRK_SCALE  1 */
 19.5162 +/* #define SBRK_SCALE  2 */
 19.5163 +/* #define SBRK_SCALE  4  */
 19.5164 +
 19.5165 +/* sbrk for windows */
 19.5166 +static void *sbrk (long size) {
 19.5167 +    static long g_pagesize, g_my_pagesize;
 19.5168 +    static long g_regionsize, g_my_regionsize;
 19.5169 +    static region_list_entry *g_last;
 19.5170 +    void *result = (void *) MORECORE_FAILURE;
 19.5171 +#ifdef TRACE
 19.5172 +    printf ("sbrk %d\n", size);
 19.5173 +#endif
 19.5174 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
 19.5175 +    /* Wait for spin lock */
 19.5176 +    slwait (&g_sl);
 19.5177 +#endif
 19.5178 +    /* First time initialization */
 19.5179 +    if (! g_pagesize) {
 19.5180 +        g_pagesize = getpagesize ();
 19.5181 +        g_my_pagesize = g_pagesize << SBRK_SCALE;
 19.5182 +    }
 19.5183 +    if (! g_regionsize) {
 19.5184 +        g_regionsize = getregionsize ();
 19.5185 +        g_my_regionsize = g_regionsize << SBRK_SCALE;
 19.5186 +    }
 19.5187 +    if (! g_last) {
 19.5188 +        if (! region_list_append (&g_last, 0, 0)) 
 19.5189 +           goto sbrk_exit;
 19.5190 +    }
 19.5191 +    /* Assert invariants */
 19.5192 +    assert (g_last);
 19.5193 +    assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
 19.5194 +            g_last->top_allocated <= g_last->top_committed);
 19.5195 +    assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
 19.5196 +            g_last->top_committed <= g_last->top_reserved &&
 19.5197 +            (unsigned) g_last->top_committed % g_pagesize == 0);
 19.5198 +    assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
 19.5199 +    assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
 19.5200 +    /* Allocation requested? */
 19.5201 +    if (size >= 0) {
 19.5202 +        /* Allocation size is the requested size */
 19.5203 +        long allocate_size = size;
 19.5204 +        /* Compute the size to commit */
 19.5205 +        long to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
 19.5206 +        /* Do we reach the commit limit? */
 19.5207 +        if (to_commit > 0) {
 19.5208 +            /* Round size to commit */
 19.5209 +            long commit_size = CEIL (to_commit, g_my_pagesize);
 19.5210 +            /* Compute the size to reserve */
 19.5211 +            long to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved;
 19.5212 +            /* Do we reach the reserve limit? */
 19.5213 +            if (to_reserve > 0) {
 19.5214 +                /* Compute the remaining size to commit in the current region */
 19.5215 +                long remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed;
 19.5216 +                if (remaining_commit_size > 0) {
 19.5217 +                    /* Assert preconditions */
 19.5218 +                    assert ((unsigned) g_last->top_committed % g_pagesize == 0);
 19.5219 +                    assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); {
 19.5220 +                        /* Commit this */
 19.5221 +                        void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size,
 19.5222 +							                                 MEM_COMMIT, PAGE_READWRITE);
 19.5223 +                        /* Check returned pointer for consistency */
 19.5224 +                        if (base_committed != g_last->top_committed)
 19.5225 +                            goto sbrk_exit;
 19.5226 +                        /* Assert postconditions */
 19.5227 +                        assert ((unsigned) base_committed % g_pagesize == 0);
 19.5228 +#ifdef TRACE
 19.5229 +                        printf ("Commit %p %d\n", base_committed, remaining_commit_size);
 19.5230 +#endif
 19.5231 +                        /* Adjust the regions commit top */
 19.5232 +                        g_last->top_committed = (char *) base_committed + remaining_commit_size;
 19.5233 +                    }
 19.5234 +                } {
 19.5235 +                    /* Now we are going to search and reserve. */
 19.5236 +                    int contiguous = -1;
 19.5237 +                    int found = FALSE;
 19.5238 +                    MEMORY_BASIC_INFORMATION memory_info;
 19.5239 +                    void *base_reserved;
 19.5240 +                    long reserve_size;
 19.5241 +                    do {
 19.5242 +                        /* Assume contiguous memory */
 19.5243 +                        contiguous = TRUE;
 19.5244 +                        /* Round size to reserve */
 19.5245 +                        reserve_size = CEIL (to_reserve, g_my_regionsize);
 19.5246 +                        /* Start with the current region's top */
 19.5247 +                        memory_info.BaseAddress = g_last->top_reserved;
 19.5248 +                        /* Assert preconditions */
 19.5249 +                        assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
 19.5250 +                        assert (0 < reserve_size && reserve_size % g_regionsize == 0);
 19.5251 +                        while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
 19.5252 +                            /* Assert postconditions */
 19.5253 +                            assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
 19.5254 +#ifdef TRACE
 19.5255 +                            printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize, 
 19.5256 +                                    memory_info.State == MEM_FREE ? "FREE": 
 19.5257 +                                    (memory_info.State == MEM_RESERVE ? "RESERVED":
 19.5258 +                                     (memory_info.State == MEM_COMMIT ? "COMMITTED": "?")));
 19.5259 +#endif
 19.5260 +                            /* Region is free, well aligned and big enough: we are done */
 19.5261 +                            if (memory_info.State == MEM_FREE &&
 19.5262 +                                (unsigned) memory_info.BaseAddress % g_regionsize == 0 &&
 19.5263 +                                memory_info.RegionSize >= (unsigned) reserve_size) {
 19.5264 +                                found = TRUE;
 19.5265 +                                break;
 19.5266 +                            }
 19.5267 +                            /* From now on we can't get contiguous memory! */
 19.5268 +                            contiguous = FALSE;
 19.5269 +                            /* Recompute size to reserve */
 19.5270 +                            reserve_size = CEIL (allocate_size, g_my_regionsize);
 19.5271 +                            memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
 19.5272 +                            /* Assert preconditions */
 19.5273 +                            assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
 19.5274 +                            assert (0 < reserve_size && reserve_size % g_regionsize == 0);
 19.5275 +                        }
 19.5276 +                        /* Search failed? */
 19.5277 +                        if (! found) 
 19.5278 +                            goto sbrk_exit;
 19.5279 +                        /* Assert preconditions */
 19.5280 +                        assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0);
 19.5281 +                        assert (0 < reserve_size && reserve_size % g_regionsize == 0);
 19.5282 +                        /* Try to reserve this */
 19.5283 +                        base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size, 
 19.5284 +					                                  MEM_RESERVE, PAGE_NOACCESS);
 19.5285 +                        if (! base_reserved) {
 19.5286 +                            int rc = GetLastError ();
 19.5287 +                            if (rc != ERROR_INVALID_ADDRESS) 
 19.5288 +                                goto sbrk_exit;
 19.5289 +                        }
 19.5290 +                        /* A null pointer signals (hopefully) a race condition with another thread. */
 19.5291 +                        /* In this case, we try again. */
 19.5292 +                    } while (! base_reserved);
 19.5293 +                    /* Check returned pointer for consistency */
 19.5294 +                    if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress)
 19.5295 +                        goto sbrk_exit;
 19.5296 +                    /* Assert postconditions */
 19.5297 +                    assert ((unsigned) base_reserved % g_regionsize == 0);
 19.5298 +#ifdef TRACE
 19.5299 +                    printf ("Reserve %p %d\n", base_reserved, reserve_size);
 19.5300 +#endif
 19.5301 +                    /* Did we get contiguous memory? */
 19.5302 +                    if (contiguous) {
 19.5303 +                        long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated;
 19.5304 +                        /* Adjust allocation size */
 19.5305 +                        allocate_size -= start_size;
 19.5306 +                        /* Adjust the regions allocation top */
 19.5307 +                        g_last->top_allocated = g_last->top_committed;
 19.5308 +                        /* Recompute the size to commit */
 19.5309 +                        to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
 19.5310 +                        /* Round size to commit */
 19.5311 +                        commit_size = CEIL (to_commit, g_my_pagesize);
 19.5312 +                    } 
 19.5313 +                    /* Append the new region to the list */
 19.5314 +                    if (! region_list_append (&g_last, base_reserved, reserve_size))
 19.5315 +                        goto sbrk_exit;
 19.5316 +                    /* Didn't we get contiguous memory? */
 19.5317 +                    if (! contiguous) {
 19.5318 +                        /* Recompute the size to commit */
 19.5319 +                        to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
 19.5320 +                        /* Round size to commit */
 19.5321 +                        commit_size = CEIL (to_commit, g_my_pagesize);
 19.5322 +                    }
 19.5323 +                }
 19.5324 +            } 
 19.5325 +            /* Assert preconditions */
 19.5326 +            assert ((unsigned) g_last->top_committed % g_pagesize == 0);
 19.5327 +            assert (0 < commit_size && commit_size % g_pagesize == 0); {
 19.5328 +                /* Commit this */
 19.5329 +                void *base_committed = VirtualAlloc (g_last->top_committed, commit_size, 
 19.5330 +				    			                     MEM_COMMIT, PAGE_READWRITE);
 19.5331 +                /* Check returned pointer for consistency */
 19.5332 +                if (base_committed != g_last->top_committed)
 19.5333 +                    goto sbrk_exit;
 19.5334 +                /* Assert postconditions */
 19.5335 +                assert ((unsigned) base_committed % g_pagesize == 0);
 19.5336 +#ifdef TRACE
 19.5337 +                printf ("Commit %p %d\n", base_committed, commit_size);
 19.5338 +#endif
 19.5339 +                /* Adjust the regions commit top */
 19.5340 +                g_last->top_committed = (char *) base_committed + commit_size;
 19.5341 +            }
 19.5342 +        } 
 19.5343 +        /* Adjust the regions allocation top */
 19.5344 +        g_last->top_allocated = (char *) g_last->top_allocated + allocate_size;
 19.5345 +        result = (char *) g_last->top_allocated - size;
 19.5346 +    /* Deallocation requested? */
 19.5347 +    } else if (size < 0) {
 19.5348 +        long deallocate_size = - size;
 19.5349 +        /* As long as we have a region to release */
 19.5350 +        while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) {
 19.5351 +            /* Get the size to release */
 19.5352 +            long release_size = g_last->reserve_size;
 19.5353 +            /* Get the base address */
 19.5354 +            void *base_reserved = (char *) g_last->top_reserved - release_size;
 19.5355 +            /* Assert preconditions */
 19.5356 +            assert ((unsigned) base_reserved % g_regionsize == 0); 
 19.5357 +            assert (0 < release_size && release_size % g_regionsize == 0); {
 19.5358 +                /* Release this */
 19.5359 +                int rc = VirtualFree (base_reserved, 0, 
 19.5360 +                                      MEM_RELEASE);
 19.5361 +                /* Check returned code for consistency */
 19.5362 +                if (! rc)
 19.5363 +                    goto sbrk_exit;
 19.5364 +#ifdef TRACE
 19.5365 +                printf ("Release %p %d\n", base_reserved, release_size);
 19.5366 +#endif
 19.5367 +            }
 19.5368 +            /* Adjust deallocation size */
 19.5369 +            deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved;
 19.5370 +            /* Remove the old region from the list */
 19.5371 +            if (! region_list_remove (&g_last))
 19.5372 +                goto sbrk_exit;
 19.5373 +        } {
 19.5374 +            /* Compute the size to decommit */
 19.5375 +            long to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size);
 19.5376 +            if (to_decommit >= g_my_pagesize) {
 19.5377 +                /* Compute the size to decommit */
 19.5378 +                long decommit_size = FLOOR (to_decommit, g_my_pagesize);
 19.5379 +                /*  Compute the base address */
 19.5380 +                void *base_committed = (char *) g_last->top_committed - decommit_size;
 19.5381 +                /* Assert preconditions */
 19.5382 +                assert ((unsigned) base_committed % g_pagesize == 0);
 19.5383 +                assert (0 < decommit_size && decommit_size % g_pagesize == 0); {
 19.5384 +                    /* Decommit this */
 19.5385 +                    int rc = VirtualFree ((char *) base_committed, decommit_size, 
 19.5386 +                                          MEM_DECOMMIT);
 19.5387 +                    /* Check returned code for consistency */
 19.5388 +                    if (! rc)
 19.5389 +                        goto sbrk_exit;
 19.5390 +#ifdef TRACE
 19.5391 +                    printf ("Decommit %p %d\n", base_committed, decommit_size);
 19.5392 +#endif
 19.5393 +                }
 19.5394 +                /* Adjust deallocation size and regions commit and allocate top */
 19.5395 +                deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed;
 19.5396 +                g_last->top_committed = base_committed;
 19.5397 +                g_last->top_allocated = base_committed;
 19.5398 +            }
 19.5399 +        }
 19.5400 +        /* Adjust regions allocate top */
 19.5401 +        g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size;
 19.5402 +        /* Check for underflow */
 19.5403 +        if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated ||
 19.5404 +            g_last->top_allocated > g_last->top_committed) {
 19.5405 +            /* Adjust regions allocate top */
 19.5406 +            g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size;
 19.5407 +            goto sbrk_exit;
 19.5408 +        }
 19.5409 +        result = g_last->top_allocated;
 19.5410 +    }
 19.5411 +    /* Assert invariants */
 19.5412 +    assert (g_last);
 19.5413 +    assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
 19.5414 +            g_last->top_allocated <= g_last->top_committed);
 19.5415 +    assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
 19.5416 +            g_last->top_committed <= g_last->top_reserved &&
 19.5417 +            (unsigned) g_last->top_committed % g_pagesize == 0);
 19.5418 +    assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
 19.5419 +    assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
 19.5420 +
 19.5421 +sbrk_exit:
 19.5422 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
 19.5423 +    /* Release spin lock */
 19.5424 +    slrelease (&g_sl);
 19.5425 +#endif
 19.5426 +    return result;
 19.5427 +}
 19.5428 +
 19.5429 +/* mmap for windows */
 19.5430 +static void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) {
 19.5431 +    static long g_pagesize;
 19.5432 +    static long g_regionsize;
 19.5433 +#ifdef TRACE
 19.5434 +    printf ("mmap %d\n", size);
 19.5435 +#endif
 19.5436 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
 19.5437 +    /* Wait for spin lock */
 19.5438 +    slwait (&g_sl);
 19.5439 +#endif
 19.5440 +    /* First time initialization */
 19.5441 +    if (! g_pagesize) 
 19.5442 +        g_pagesize = getpagesize ();
 19.5443 +    if (! g_regionsize) 
 19.5444 +        g_regionsize = getregionsize ();
 19.5445 +    /* Assert preconditions */
 19.5446 +    assert ((unsigned) ptr % g_regionsize == 0);
 19.5447 +    assert (size % g_pagesize == 0);
 19.5448 +    /* Allocate this */
 19.5449 +    ptr = VirtualAlloc (ptr, size,
 19.5450 +					    MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE);
 19.5451 +    if (! ptr) {
 19.5452 +        ptr = (void *) MORECORE_FAILURE;
 19.5453 +        goto mmap_exit;
 19.5454 +    }
 19.5455 +    /* Assert postconditions */
 19.5456 +    assert ((unsigned) ptr % g_regionsize == 0);
 19.5457 +#ifdef TRACE
 19.5458 +    printf ("Commit %p %d\n", ptr, size);
 19.5459 +#endif
 19.5460 +mmap_exit:
 19.5461 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
 19.5462 +    /* Release spin lock */
 19.5463 +    slrelease (&g_sl);
 19.5464 +#endif
 19.5465 +    return ptr;
 19.5466 +}
 19.5467 +
 19.5468 +/* munmap for windows */
 19.5469 +static long munmap (void *ptr, long size) {
 19.5470 +    static long g_pagesize;
 19.5471 +    static long g_regionsize;
 19.5472 +    int rc = MUNMAP_FAILURE;
 19.5473 +#ifdef TRACE
 19.5474 +    printf ("munmap %p %d\n", ptr, size);
 19.5475 +#endif
 19.5476 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
 19.5477 +    /* Wait for spin lock */
 19.5478 +    slwait (&g_sl);
 19.5479 +#endif
 19.5480 +    /* First time initialization */
 19.5481 +    if (! g_pagesize) 
 19.5482 +        g_pagesize = getpagesize ();
 19.5483 +    if (! g_regionsize) 
 19.5484 +        g_regionsize = getregionsize ();
 19.5485 +    /* Assert preconditions */
 19.5486 +    assert ((unsigned) ptr % g_regionsize == 0);
 19.5487 +    assert (size % g_pagesize == 0);
 19.5488 +    /* Free this */
 19.5489 +    if (! VirtualFree (ptr, 0, 
 19.5490 +                       MEM_RELEASE))
 19.5491 +        goto munmap_exit;
 19.5492 +    rc = 0;
 19.5493 +#ifdef TRACE
 19.5494 +    printf ("Release %p %d\n", ptr, size);
 19.5495 +#endif
 19.5496 +munmap_exit:
 19.5497 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
 19.5498 +    /* Release spin lock */
 19.5499 +    slrelease (&g_sl);
 19.5500 +#endif
 19.5501 +    return rc;
 19.5502 +}
 19.5503 +
 19.5504 +static void vminfo (CHUNK_SIZE_T  *free, CHUNK_SIZE_T  *reserved, CHUNK_SIZE_T  *committed) {
 19.5505 +    MEMORY_BASIC_INFORMATION memory_info;
 19.5506 +    memory_info.BaseAddress = 0;
 19.5507 +    *free = *reserved = *committed = 0;
 19.5508 +    while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
 19.5509 +        switch (memory_info.State) {
 19.5510 +        case MEM_FREE:
 19.5511 +            *free += memory_info.RegionSize;
 19.5512 +            break;
 19.5513 +        case MEM_RESERVE:
 19.5514 +            *reserved += memory_info.RegionSize;
 19.5515 +            break;
 19.5516 +        case MEM_COMMIT:
 19.5517 +            *committed += memory_info.RegionSize;
 19.5518 +            break;
 19.5519 +        }
 19.5520 +        memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
 19.5521 +    }
 19.5522 +}
 19.5523 +
 19.5524 +static int cpuinfo (int whole, CHUNK_SIZE_T  *kernel, CHUNK_SIZE_T  *user) {
 19.5525 +    if (whole) {
 19.5526 +        __int64 creation64, exit64, kernel64, user64;
 19.5527 +        int rc = GetProcessTimes (GetCurrentProcess (), 
 19.5528 +                                  (FILETIME *) &creation64,  
 19.5529 +                                  (FILETIME *) &exit64, 
 19.5530 +                                  (FILETIME *) &kernel64, 
 19.5531 +                                  (FILETIME *) &user64);
 19.5532 +        if (! rc) {
 19.5533 +            *kernel = 0;
 19.5534 +            *user = 0;
 19.5535 +            return FALSE;
 19.5536 +        } 
 19.5537 +        *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
 19.5538 +        *user = (CHUNK_SIZE_T) (user64 / 10000);
 19.5539 +        return TRUE;
 19.5540 +    } else {
 19.5541 +        __int64 creation64, exit64, kernel64, user64;
 19.5542 +        int rc = GetThreadTimes (GetCurrentThread (), 
 19.5543 +                                 (FILETIME *) &creation64,  
 19.5544 +                                 (FILETIME *) &exit64, 
 19.5545 +                                 (FILETIME *) &kernel64, 
 19.5546 +                                 (FILETIME *) &user64);
 19.5547 +        if (! rc) {
 19.5548 +            *kernel = 0;
 19.5549 +            *user = 0;
 19.5550 +            return FALSE;
 19.5551 +        } 
 19.5552 +        *kernel = (CHUNK_SIZE_T) (kernel64 / 10000);
 19.5553 +        *user = (CHUNK_SIZE_T) (user64 / 10000);
 19.5554 +        return TRUE;
 19.5555 +    }
 19.5556 +}
 19.5557 +
 19.5558 +#endif /* WIN32 */
 19.5559 +
 19.5560 +/* ------------------------------------------------------------
 19.5561 +History:
 19.5562 +    V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
 19.5563 +      * Fix malloc_state bitmap array misdeclaration
 19.5564 +
 19.5565 +    V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
 19.5566 +      * Allow tuning of FIRST_SORTED_BIN_SIZE
 19.5567 +      * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
 19.5568 +      * Better detection and support for non-contiguousness of MORECORE. 
 19.5569 +        Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
 19.5570 +      * Bypass most of malloc if no frees. Thanks To Emery Berger.
 19.5571 +      * Fix freeing of old top non-contiguous chunk im sysmalloc.
 19.5572 +      * Raised default trim and map thresholds to 256K.
 19.5573 +      * Fix mmap-related #defines. Thanks to Lubos Lunak.
 19.5574 +      * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
 19.5575 +      * Branch-free bin calculation
 19.5576 +      * Default trim and mmap thresholds now 256K.
 19.5577 +
 19.5578 +    V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
 19.5579 +      * Introduce independent_comalloc and independent_calloc.
 19.5580 +        Thanks to Michael Pachos for motivation and help.
 19.5581 +      * Make optional .h file available
 19.5582 +      * Allow > 2GB requests on 32bit systems.
 19.5583 +      * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
 19.5584 +        Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
 19.5585 +        and Anonymous.
 19.5586 +      * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for 
 19.5587 +        helping test this.)
 19.5588 +      * memalign: check alignment arg
 19.5589 +      * realloc: don't try to shift chunks backwards, since this
 19.5590 +        leads to  more fragmentation in some programs and doesn't
 19.5591 +        seem to help in any others.
 19.5592 +      * Collect all cases in malloc requiring system memory into sYSMALLOc
 19.5593 +      * Use mmap as backup to sbrk
 19.5594 +      * Place all internal state in malloc_state
 19.5595 +      * Introduce fastbins (although similar to 2.5.1)
 19.5596 +      * Many minor tunings and cosmetic improvements
 19.5597 +      * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK 
 19.5598 +      * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
 19.5599 +        Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
 19.5600 +      * Include errno.h to support default failure action.
 19.5601 +
 19.5602 +    V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
 19.5603 +      * return null for negative arguments
 19.5604 +      * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
 19.5605 +         * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
 19.5606 +          (e.g. WIN32 platforms)
 19.5607 +         * Cleanup header file inclusion for WIN32 platforms
 19.5608 +         * Cleanup code to avoid Microsoft Visual C++ compiler complaints
 19.5609 +         * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
 19.5610 +           memory allocation routines
 19.5611 +         * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
 19.5612 +         * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
 19.5613 +           usage of 'assert' in non-WIN32 code
 19.5614 +         * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
 19.5615 +           avoid infinite loop
 19.5616 +      * Always call 'fREe()' rather than 'free()'
 19.5617 +
 19.5618 +    V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
 19.5619 +      * Fixed ordering problem with boundary-stamping
 19.5620 +
 19.5621 +    V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
 19.5622 +      * Added pvalloc, as recommended by H.J. Liu
 19.5623 +      * Added 64bit pointer support mainly from Wolfram Gloger
 19.5624 +      * Added anonymously donated WIN32 sbrk emulation
 19.5625 +      * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
 19.5626 +      * malloc_extend_top: fix mask error that caused wastage after
 19.5627 +        foreign sbrks
 19.5628 +      * Add linux mremap support code from HJ Liu
 19.5629 +
 19.5630 +    V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
 19.5631 +      * Integrated most documentation with the code.
 19.5632 +      * Add support for mmap, with help from
 19.5633 +        Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
 19.5634 +      * Use last_remainder in more cases.
 19.5635 +      * Pack bins using idea from  colin@nyx10.cs.du.edu
 19.5636 +      * Use ordered bins instead of best-fit threshhold
 19.5637 +      * Eliminate block-local decls to simplify tracing and debugging.
 19.5638 +      * Support another case of realloc via move into top
 19.5639 +      * Fix error occuring when initial sbrk_base not word-aligned.
 19.5640 +      * Rely on page size for units instead of SBRK_UNIT to
 19.5641 +        avoid surprises about sbrk alignment conventions.
 19.5642 +      * Add mallinfo, mallopt. Thanks to Raymond Nijssen
 19.5643 +        (raymond@es.ele.tue.nl) for the suggestion.
 19.5644 +      * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
 19.5645 +      * More precautions for cases where other routines call sbrk,
 19.5646 +        courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
 19.5647 +      * Added macros etc., allowing use in linux libc from
 19.5648 +        H.J. Lu (hjl@gnu.ai.mit.edu)
 19.5649 +      * Inverted this history list
 19.5650 +
 19.5651 +    V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
 19.5652 +      * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
 19.5653 +      * Removed all preallocation code since under current scheme
 19.5654 +        the work required to undo bad preallocations exceeds
 19.5655 +        the work saved in good cases for most test programs.
 19.5656 +      * No longer use return list or unconsolidated bins since
 19.5657 +        no scheme using them consistently outperforms those that don't
 19.5658 +        given above changes.
 19.5659 +      * Use best fit for very large chunks to prevent some worst-cases.
 19.5660 +      * Added some support for debugging
 19.5661 +
 19.5662 +    V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
 19.5663 +      * Removed footers when chunks are in use. Thanks to
 19.5664 +        Paul Wilson (wilson@cs.texas.edu) for the suggestion.
 19.5665 +
 19.5666 +    V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
 19.5667 +      * Added malloc_trim, with help from Wolfram Gloger
 19.5668 +        (wmglo@Dent.MED.Uni-Muenchen.DE).
 19.5669 +
 19.5670 +    V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
 19.5671 +
 19.5672 +    V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
 19.5673 +      * realloc: try to expand in both directions
 19.5674 +      * malloc: swap order of clean-bin strategy;
 19.5675 +      * realloc: only conditionally expand backwards
 19.5676 +      * Try not to scavenge used bins
 19.5677 +      * Use bin counts as a guide to preallocation
 19.5678 +      * Occasionally bin return list chunks in first scan
 19.5679 +      * Add a few optimizations from colin@nyx10.cs.du.edu
 19.5680 +
 19.5681 +    V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
 19.5682 +      * faster bin computation & slightly different binning
 19.5683 +      * merged all consolidations to one part of malloc proper
 19.5684 +         (eliminating old malloc_find_space & malloc_clean_bin)
 19.5685 +      * Scan 2 returns chunks (not just 1)
 19.5686 +      * Propagate failure in realloc if malloc returns 0
 19.5687 +      * Add stuff to allow compilation on non-ANSI compilers
 19.5688 +          from kpv@research.att.com
 19.5689 +
 19.5690 +    V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
 19.5691 +      * removed potential for odd address access in prev_chunk
 19.5692 +      * removed dependency on getpagesize.h
 19.5693 +      * misc cosmetics and a bit more internal documentation
 19.5694 +      * anticosmetics: mangled names in macros to evade debugger strangeness
 19.5695 +      * tested on sparc, hp-700, dec-mips, rs6000
 19.5696 +          with gcc & native cc (hp, dec only) allowing
 19.5697 +          Detlefs & Zorn comparison study (in SIGPLAN Notices.)
 19.5698 +
 19.5699 +    Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
 19.5700 +      * Based loosely on libg++-1.2X malloc. (It retains some of the overall
 19.5701 +         structure of old version,  but most details differ.)
 19.5702 +
 19.5703 +*/
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/mini-os/lib/math.c	Mon Oct 06 11:26:01 2003 +0000
    20.3 @@ -0,0 +1,385 @@
    20.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    20.5 + ****************************************************************************
    20.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
    20.7 + ****************************************************************************
    20.8 + *
    20.9 + *        File: math.c
   20.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
   20.11 + *     Changes: 
   20.12 + *              
   20.13 + *        Date: Aug 2003
   20.14 + * 
   20.15 + * Environment: Xen Minimal OS
   20.16 + * Description:  Library functions for 64bit arith and other
   20.17 + *               from freebsd, files in sys/libkern/ (qdivrem.c, etc)
   20.18 + *
   20.19 + ****************************************************************************
   20.20 + * $Id: c-insert.c,v 1.7 2002/11/08 16:04:34 rn Exp $
   20.21 + ****************************************************************************
   20.22 + *-
   20.23 + * Copyright (c) 1992, 1993
   20.24 + *	The Regents of the University of California.  All rights reserved.
   20.25 + *
   20.26 + * This software was developed by the Computer Systems Engineering group
   20.27 + * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
   20.28 + * contributed to Berkeley.
   20.29 + *
   20.30 + * Redistribution and use in source and binary forms, with or without
   20.31 + * modification, are permitted provided that the following conditions
   20.32 + * are met:
   20.33 + * 1. Redistributions of source code must retain the above copyright
   20.34 + *    notice, this list of conditions and the following disclaimer.
   20.35 + * 2. Redistributions in binary form must reproduce the above copyright
   20.36 + *    notice, this list of conditions and the following disclaimer in the
   20.37 + *    documentation and/or other materials provided with the distribution.
   20.38 + * 3. All advertising materials mentioning features or use of this software
   20.39 + *    must display the following acknowledgement:
   20.40 + *	This product includes software developed by the University of
   20.41 + *	California, Berkeley and its contributors.
   20.42 + * 4. Neither the name of the University nor the names of its contributors
   20.43 + *    may be used to endorse or promote products derived from this software
   20.44 + *    without specific prior written permission.
   20.45 + *
   20.46 + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   20.47 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20.48 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20.49 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   20.50 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20.51 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20.52 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   20.53 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   20.54 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   20.55 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   20.56 + * SUCH DAMAGE.
   20.57 + *
   20.58 + * $FreeBSD: src/sys/libkern/divdi3.c,v 1.6 1999/08/28 00:46:31 peter Exp $
   20.59 +*/
   20.60 +
   20.61 +#include <types.h>
   20.62 +
   20.63 +/*
   20.64 + * Depending on the desired operation, we view a `long long' (aka quad_t) in
   20.65 + * one or more of the following formats.
   20.66 + */
   20.67 +union uu {
   20.68 +        s64            q;              /* as a (signed) quad */
   20.69 +        s64            uq;             /* as an unsigned quad */
   20.70 +        long           sl[2];          /* as two signed longs */
   20.71 +        unsigned long  ul[2];          /* as two unsigned longs */
   20.72 +};
   20.73 +/* XXX RN: Yuck hardcoded endianess :) */
   20.74 +#define _QUAD_HIGHWORD 1
   20.75 +#define _QUAD_LOWWORD 0
   20.76 +/*
   20.77 + * Define high and low longwords.
   20.78 + */
   20.79 +#define H               _QUAD_HIGHWORD
   20.80 +#define L               _QUAD_LOWWORD
   20.81 +
   20.82 +/*
   20.83 + * Total number of bits in a quad_t and in the pieces that make it up.
   20.84 + * These are used for shifting, and also below for halfword extraction
   20.85 + * and assembly.
   20.86 + */
   20.87 +#define CHAR_BIT        8               /* number of bits in a char */
   20.88 +#define QUAD_BITS       (sizeof(s64) * CHAR_BIT)
   20.89 +#define LONG_BITS       (sizeof(long) * CHAR_BIT)
   20.90 +#define HALF_BITS       (sizeof(long) * CHAR_BIT / 2)
   20.91 +
   20.92 +/*
   20.93 + * Extract high and low shortwords from longword, and move low shortword of
   20.94 + * longword to upper half of long, i.e., produce the upper longword of
   20.95 + * ((quad_t)(x) << (number_of_bits_in_long/2)).  (`x' must actually be u_long.)
   20.96 + *
   20.97 + * These are used in the multiply code, to split a longword into upper
   20.98 + * and lower halves, and to reassemble a product as a quad_t, shifted left
   20.99 + * (sizeof(long)*CHAR_BIT/2).
  20.100 + */
  20.101 +#define HHALF(x)        ((x) >> HALF_BITS)
  20.102 +#define LHALF(x)        ((x) & ((1 << HALF_BITS) - 1))
  20.103 +#define LHUP(x)         ((x) << HALF_BITS)
  20.104 +
  20.105 +/*
  20.106 + * Multiprecision divide.  This algorithm is from Knuth vol. 2 (2nd ed),
  20.107 + * section 4.3.1, pp. 257--259.
  20.108 + */
  20.109 +#define	B	(1 << HALF_BITS)	/* digit base */
  20.110 +
  20.111 +/* Combine two `digits' to make a single two-digit number. */
  20.112 +#define	COMBINE(a, b) (((u_long)(a) << HALF_BITS) | (b))
  20.113 +
  20.114 +/* select a type for digits in base B: use unsigned short if they fit */
  20.115 +#if ULONG_MAX == 0xffffffff && USHRT_MAX >= 0xffff
  20.116 +typedef unsigned short digit;
  20.117 +#else
  20.118 +typedef u_long digit;
  20.119 +#endif
  20.120 +
  20.121 +
  20.122 +/*
  20.123 + * Shift p[0]..p[len] left `sh' bits, ignoring any bits that
  20.124 + * `fall out' the left (there never will be any such anyway).
  20.125 + * We may assume len >= 0.  NOTE THAT THIS WRITES len+1 DIGITS.
  20.126 + */
  20.127 +static void
  20.128 +shl(register digit *p, register int len, register int sh)
  20.129 +{
  20.130 +	register int i;
  20.131 +
  20.132 +	for (i = 0; i < len; i++)
  20.133 +		p[i] = LHALF(p[i] << sh) | (p[i + 1] >> (HALF_BITS - sh));
  20.134 +	p[i] = LHALF(p[i] << sh);
  20.135 +}
  20.136 +
  20.137 +/*
  20.138 + * __qdivrem(u, v, rem) returns u/v and, optionally, sets *rem to u%v.
  20.139 + *
  20.140 + * We do this in base 2-sup-HALF_BITS, so that all intermediate products
  20.141 + * fit within u_long.  As a consequence, the maximum length dividend and
  20.142 + * divisor are 4 `digits' in this base (they are shorter if they have
  20.143 + * leading zeros).
  20.144 + */
  20.145 +u64
  20.146 +__qdivrem(uq, vq, arq)
  20.147 +	u64 uq, vq, *arq;
  20.148 +{
  20.149 +	union uu tmp;
  20.150 +	digit *u, *v, *q;
  20.151 +	register digit v1, v2;
  20.152 +	u_long qhat, rhat, t;
  20.153 +	int m, n, d, j, i;
  20.154 +	digit uspace[5], vspace[5], qspace[5];
  20.155 +
  20.156 +	/*
  20.157 +	 * Take care of special cases: divide by zero, and u < v.
  20.158 +	 */
  20.159 +	if (vq == 0) {
  20.160 +		/* divide by zero. */
  20.161 +		static volatile const unsigned int zero = 0;
  20.162 +
  20.163 +		tmp.ul[H] = tmp.ul[L] = 1 / zero;
  20.164 +		if (arq)
  20.165 +			*arq = uq;
  20.166 +		return (tmp.q);
  20.167 +	}
  20.168 +	if (uq < vq) {
  20.169 +		if (arq)
  20.170 +			*arq = uq;
  20.171 +		return (0);
  20.172 +	}
  20.173 +	u = &uspace[0];
  20.174 +	v = &vspace[0];
  20.175 +	q = &qspace[0];
  20.176 +
  20.177 +	/*
  20.178 +	 * Break dividend and divisor into digits in base B, then
  20.179 +	 * count leading zeros to determine m and n.  When done, we
  20.180 +	 * will have:
  20.181 +	 *	u = (u[1]u[2]...u[m+n]) sub B
  20.182 +	 *	v = (v[1]v[2]...v[n]) sub B
  20.183 +	 *	v[1] != 0
  20.184 +	 *	1 < n <= 4 (if n = 1, we use a different division algorithm)
  20.185 +	 *	m >= 0 (otherwise u < v, which we already checked)
  20.186 +	 *	m + n = 4
  20.187 +	 * and thus
  20.188 +	 *	m = 4 - n <= 2
  20.189 +	 */
  20.190 +	tmp.uq = uq;
  20.191 +	u[0] = 0;
  20.192 +	u[1] = HHALF(tmp.ul[H]);
  20.193 +	u[2] = LHALF(tmp.ul[H]);
  20.194 +	u[3] = HHALF(tmp.ul[L]);
  20.195 +	u[4] = LHALF(tmp.ul[L]);
  20.196 +	tmp.uq = vq;
  20.197 +	v[1] = HHALF(tmp.ul[H]);
  20.198 +	v[2] = LHALF(tmp.ul[H]);
  20.199 +	v[3] = HHALF(tmp.ul[L]);
  20.200 +	v[4] = LHALF(tmp.ul[L]);
  20.201 +	for (n = 4; v[1] == 0; v++) {
  20.202 +		if (--n == 1) {
  20.203 +			u_long rbj;	/* r*B+u[j] (not root boy jim) */
  20.204 +			digit q1, q2, q3, q4;
  20.205 +
  20.206 +			/*
  20.207 +			 * Change of plan, per exercise 16.
  20.208 +			 *	r = 0;
  20.209 +			 *	for j = 1..4:
  20.210 +			 *		q[j] = floor((r*B + u[j]) / v),
  20.211 +			 *		r = (r*B + u[j]) % v;
  20.212 +			 * We unroll this completely here.
  20.213 +			 */
  20.214 +			t = v[2];	/* nonzero, by definition */
  20.215 +			q1 = u[1] / t;
  20.216 +			rbj = COMBINE(u[1] % t, u[2]);
  20.217 +			q2 = rbj / t;
  20.218 +			rbj = COMBINE(rbj % t, u[3]);
  20.219 +			q3 = rbj / t;
  20.220 +			rbj = COMBINE(rbj % t, u[4]);
  20.221 +			q4 = rbj / t;
  20.222 +			if (arq)
  20.223 +				*arq = rbj % t;
  20.224 +			tmp.ul[H] = COMBINE(q1, q2);
  20.225 +			tmp.ul[L] = COMBINE(q3, q4);
  20.226 +			return (tmp.q);
  20.227 +		}
  20.228 +	}
  20.229 +
  20.230 +	/*
  20.231 +	 * By adjusting q once we determine m, we can guarantee that
  20.232 +	 * there is a complete four-digit quotient at &qspace[1] when
  20.233 +	 * we finally stop.
  20.234 +	 */
  20.235 +	for (m = 4 - n; u[1] == 0; u++)
  20.236 +		m--;
  20.237 +	for (i = 4 - m; --i >= 0;)
  20.238 +		q[i] = 0;
  20.239 +	q += 4 - m;
  20.240 +
  20.241 +	/*
  20.242 +	 * Here we run Program D, translated from MIX to C and acquiring
  20.243 +	 * a few minor changes.
  20.244 +	 *
  20.245 +	 * D1: choose multiplier 1 << d to ensure v[1] >= B/2.
  20.246 +	 */
  20.247 +	d = 0;
  20.248 +	for (t = v[1]; t < B / 2; t <<= 1)
  20.249 +		d++;
  20.250 +	if (d > 0) {
  20.251 +		shl(&u[0], m + n, d);		/* u <<= d */
  20.252 +		shl(&v[1], n - 1, d);		/* v <<= d */
  20.253 +	}
  20.254 +	/*
  20.255 +	 * D2: j = 0.
  20.256 +	 */
  20.257 +	j = 0;
  20.258 +	v1 = v[1];	/* for D3 -- note that v[1..n] are constant */
  20.259 +	v2 = v[2];	/* for D3 */
  20.260 +	do {
  20.261 +		register digit uj0, uj1, uj2;
  20.262 +
  20.263 +		/*
  20.264 +		 * D3: Calculate qhat (\^q, in TeX notation).
  20.265 +		 * Let qhat = min((u[j]*B + u[j+1])/v[1], B-1), and
  20.266 +		 * let rhat = (u[j]*B + u[j+1]) mod v[1].
  20.267 +		 * While rhat < B and v[2]*qhat > rhat*B+u[j+2],
  20.268 +		 * decrement qhat and increase rhat correspondingly.
  20.269 +		 * Note that if rhat >= B, v[2]*qhat < rhat*B.
  20.270 +		 */
  20.271 +		uj0 = u[j + 0];	/* for D3 only -- note that u[j+...] change */
  20.272 +		uj1 = u[j + 1];	/* for D3 only */
  20.273 +		uj2 = u[j + 2];	/* for D3 only */
  20.274 +		if (uj0 == v1) {
  20.275 +			qhat = B;
  20.276 +			rhat = uj1;
  20.277 +			goto qhat_too_big;
  20.278 +		} else {
  20.279 +			u_long nn = COMBINE(uj0, uj1);
  20.280 +			qhat = nn / v1;
  20.281 +			rhat = nn % v1;
  20.282 +		}
  20.283 +		while (v2 * qhat > COMBINE(rhat, uj2)) {
  20.284 +	qhat_too_big:
  20.285 +			qhat--;
  20.286 +			if ((rhat += v1) >= B)
  20.287 +				break;
  20.288 +		}
  20.289 +		/*
  20.290 +		 * D4: Multiply and subtract.
  20.291 +		 * The variable `t' holds any borrows across the loop.
  20.292 +		 * We split this up so that we do not require v[0] = 0,
  20.293 +		 * and to eliminate a final special case.
  20.294 +		 */
  20.295 +		for (t = 0, i = n; i > 0; i--) {
  20.296 +			t = u[i + j] - v[i] * qhat - t;
  20.297 +			u[i + j] = LHALF(t);
  20.298 +			t = (B - HHALF(t)) & (B - 1);
  20.299 +		}
  20.300 +		t = u[j] - t;
  20.301 +		u[j] = LHALF(t);
  20.302 +		/*
  20.303 +		 * D5: test remainder.
  20.304 +		 * There is a borrow if and only if HHALF(t) is nonzero;
  20.305 +		 * in that (rare) case, qhat was too large (by exactly 1).
  20.306 +		 * Fix it by adding v[1..n] to u[j..j+n].
  20.307 +		 */
  20.308 +		if (HHALF(t)) {
  20.309 +			qhat--;
  20.310 +			for (t = 0, i = n; i > 0; i--) { /* D6: add back. */
  20.311 +				t += u[i + j] + v[i];
  20.312 +				u[i + j] = LHALF(t);
  20.313 +				t = HHALF(t);
  20.314 +			}
  20.315 +			u[j] = LHALF(u[j] + t);
  20.316 +		}
  20.317 +		q[j] = qhat;
  20.318 +	} while (++j <= m);		/* D7: loop on j. */
  20.319 +
  20.320 +	/*
  20.321 +	 * If caller wants the remainder, we have to calculate it as
  20.322 +	 * u[m..m+n] >> d (this is at most n digits and thus fits in
  20.323 +	 * u[m+1..m+n], but we may need more source digits).
  20.324 +	 */
  20.325 +	if (arq) {
  20.326 +		if (d) {
  20.327 +			for (i = m + n; i > m; --i)
  20.328 +				u[i] = (u[i] >> d) |
  20.329 +				    LHALF(u[i - 1] << (HALF_BITS - d));
  20.330 +			u[i] = 0;
  20.331 +		}
  20.332 +		tmp.ul[H] = COMBINE(uspace[1], uspace[2]);
  20.333 +		tmp.ul[L] = COMBINE(uspace[3], uspace[4]);
  20.334 +		*arq = tmp.q;
  20.335 +	}
  20.336 +
  20.337 +	tmp.ul[H] = COMBINE(qspace[1], qspace[2]);
  20.338 +	tmp.ul[L] = COMBINE(qspace[3], qspace[4]);
  20.339 +	return (tmp.q);
  20.340 +}
  20.341 +
  20.342 +
  20.343 +/*
  20.344 + * Divide two signed quads.
  20.345 + * ??? if -1/2 should produce -1 on this machine, this code is wrong
  20.346 + */
  20.347 +s64
  20.348 +__divdi3(s64 a, s64 b)
  20.349 +{
  20.350 +	u64 ua, ub, uq;
  20.351 +	int neg;
  20.352 +
  20.353 +	if (a < 0)
  20.354 +		ua = -(u64)a, neg = 1;
  20.355 +	else
  20.356 +		ua = a, neg = 0;
  20.357 +	if (b < 0)
  20.358 +		ub = -(u64)b, neg ^= 1;
  20.359 +	else
  20.360 +		ub = b;
  20.361 +	uq = __qdivrem(ua, ub, (u64 *)0);
  20.362 +	return (neg ? -uq : uq);
  20.363 +}
  20.364 +
  20.365 +/*
  20.366 + * Divide two unsigned quads.
  20.367 + */
  20.368 +u64
  20.369 +__udivdi3(a, b)
  20.370 +        u64 a, b;
  20.371 +{
  20.372 +        return (__qdivrem(a, b, (u64 *)0));
  20.373 +}
  20.374 +
  20.375 +
  20.376 +/*
  20.377 + * Return remainder after dividing two unsigned quads.
  20.378 + */
  20.379 +u_quad_t
  20.380 +__umoddi3(a, b)
  20.381 +        u_quad_t a, b;
  20.382 +{
  20.383 +        u_quad_t r;
  20.384 +
  20.385 +        (void)__qdivrem(a, b, &r);
  20.386 +        return (r);
  20.387 +}
  20.388 +
    21.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.2 +++ b/mini-os/lib/printf.c	Mon Oct 06 11:26:01 2003 +0000
    21.3 @@ -0,0 +1,470 @@
    21.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    21.5 + ****************************************************************************
    21.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
    21.7 + ****************************************************************************
    21.8 + *
    21.9 + *        File: printf.c
   21.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
   21.11 + *     Changes: 
   21.12 + *              
   21.13 + *        Date: Aug 2003
   21.14 + * 
   21.15 + * Environment: Xen Minimal OS
   21.16 + * Description: Library functions for printing
   21.17 + *              (freebsd port, mainly sys/subr_prf.c)
   21.18 + *
   21.19 + ****************************************************************************
   21.20 + * $Id: c-insert.c,v 1.7 2002/11/08 16:04:34 rn Exp $
   21.21 + ****************************************************************************
   21.22 + *
   21.23 + *-
   21.24 + * Copyright (c) 1992, 1993
   21.25 + *	The Regents of the University of California.  All rights reserved.
   21.26 + *
   21.27 + * This software was developed by the Computer Systems Engineering group
   21.28 + * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
   21.29 + * contributed to Berkeley.
   21.30 + *
   21.31 + * Redistribution and use in source and binary forms, with or without
   21.32 + * modification, are permitted provided that the following conditions
   21.33 + * are met:
   21.34 + * 1. Redistributions of source code must retain the above copyright
   21.35 + *    notice, this list of conditions and the following disclaimer.
   21.36 + * 2. Redistributions in binary form must reproduce the above copyright
   21.37 + *    notice, this list of conditions and the following disclaimer in the
   21.38 + *    documentation and/or other materials provided with the distribution.
   21.39 + * 3. All advertising materials mentioning features or use of this software
   21.40 + *    must display the following acknowledgement:
   21.41 + *	This product includes software developed by the University of
   21.42 + *	California, Berkeley and its contributors.
   21.43 + * 4. Neither the name of the University nor the names of its contributors
   21.44 + *    may be used to endorse or promote products derived from this software
   21.45 + *    without specific prior written permission.
   21.46 + *
   21.47 + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21.48 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21.49 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21.50 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21.51 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21.52 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21.53 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21.54 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   21.55 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   21.56 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   21.57 + * SUCH DAMAGE.
   21.58 + *
   21.59 + * $FreeBSD: src/sys/libkern/divdi3.c,v 1.6 1999/08/28 00:46:31 peter Exp $
   21.60 + */
   21.61 +
   21.62 +#include <os.h>
   21.63 +#include <types.h>
   21.64 +#include <hypervisor.h>
   21.65 +#include <lib.h>
   21.66 +
   21.67 +/****************************************************************************
   21.68 + * RN: printf family of routines
   21.69 + * taken mainly from sys/subr_prf.c
   21.70 + ****************************************************************************/
   21.71 +char const hex2ascii_data[] = "0123456789abcdefghijklmnopqrstuvwxyz";
   21.72 +#define hex2ascii(hex)  (hex2ascii_data[hex])
   21.73 +#define NBBY    8               /* number of bits in a byte */
   21.74 +#define MAXNBUF    (sizeof(quad_t) * NBBY + 1)
   21.75 +
   21.76 +static int kvprintf(char const *fmt, void *arg, int radix, va_list ap);
   21.77 +
   21.78 +
   21.79 +int
   21.80 +printf(const char *fmt, ...)
   21.81 +{
   21.82 +	va_list ap;
   21.83 +	int retval;
   21.84 +    static char printk_buf[1024];
   21.85 +
   21.86 +	va_start(ap, fmt);
   21.87 +	retval = kvprintf(fmt, printk_buf, 10, ap);
   21.88 +    printk_buf[retval] = '\0';
   21.89 +	va_end(ap);
   21.90 +    (void)HYPERVISOR_console_write(printk_buf, strlen(printk_buf));
   21.91 +	return retval;
   21.92 +}
   21.93 +
   21.94 +int
   21.95 +vprintf(const char *fmt, va_list ap)
   21.96 +{
   21.97 +	int retval;
   21.98 +    static char printk_buf[1024];
   21.99 +	retval = kvprintf(fmt, printk_buf, 10, ap);
  21.100 +    printk_buf[retval] = '\0';
  21.101 +    (void)HYPERVISOR_console_write(printk_buf, strlen(printk_buf));
  21.102 +	return retval;
  21.103 +}
  21.104 +
  21.105 +int
  21.106 +sprintf(char *buf, const char *cfmt, ...)
  21.107 +{
  21.108 +	int retval;
  21.109 +	va_list ap;
  21.110 +
  21.111 +	va_start(ap, cfmt);
  21.112 +	retval = kvprintf(cfmt, (void *)buf, 10, ap);
  21.113 +	buf[retval] = '\0';
  21.114 +	va_end(ap);
  21.115 +	return retval;
  21.116 +}
  21.117 +
  21.118 +int
  21.119 +vsprintf(char *buf, const char *cfmt, va_list ap)
  21.120 +{
  21.121 +	int retval;
  21.122 +
  21.123 +	retval = kvprintf(cfmt, (void *)buf, 10, ap);
  21.124 +	buf[retval] = '\0';
  21.125 +	return retval;
  21.126 +}
  21.127 +
  21.128 +
  21.129 +/*
  21.130 + * Put a NUL-terminated ASCII number (base <= 36) in a buffer in reverse
  21.131 + * order; return an optional length and a pointer to the last character
  21.132 + * written in the buffer (i.e., the first character of the string).
  21.133 + * The buffer pointed to by `nbuf' must have length >= MAXNBUF.
  21.134 + */
  21.135 +static char *
  21.136 +ksprintn(nbuf, ul, base, lenp)
  21.137 +	char *nbuf;
  21.138 +	u_long ul;
  21.139 +	int base, *lenp;
  21.140 +{
  21.141 +	char *p;
  21.142 +
  21.143 +	p = nbuf;
  21.144 +	*p = '\0';
  21.145 +	do {
  21.146 +		*++p = hex2ascii(ul % base);
  21.147 +	} while (ul /= base);
  21.148 +	if (lenp)
  21.149 +		*lenp = p - nbuf;
  21.150 +	return (p);
  21.151 +}
  21.152 +/* ksprintn, but for a quad_t. */
  21.153 +static char *
  21.154 +ksprintqn(nbuf, uq, base, lenp)
  21.155 +	char *nbuf;
  21.156 +	u_quad_t uq;
  21.157 +	int base, *lenp;
  21.158 +{
  21.159 +	char *p;
  21.160 +
  21.161 +	p = nbuf;
  21.162 +	*p = '\0';
  21.163 +	do {
  21.164 +		*++p = hex2ascii(uq % base);
  21.165 +	} while (uq /= base);
  21.166 +	if (lenp)
  21.167 +		*lenp = p - nbuf;
  21.168 +	return (p);
  21.169 +}
  21.170 +
  21.171 +/*
  21.172 + * Scaled down version of printf(3).
  21.173 + *
  21.174 + * Two additional formats:
  21.175 + *
  21.176 + * The format %b is supported to decode error registers.
  21.177 + * Its usage is:
  21.178 + *
  21.179 + *	printf("reg=%b\n", regval, "<base><arg>*");
  21.180 + *
  21.181 + * where <base> is the output base expressed as a control character, e.g.
  21.182 + * \10 gives octal; \20 gives hex.  Each arg is a sequence of characters,
  21.183 + * the first of which gives the bit number to be inspected (origin 1), and
  21.184 + * the next characters (up to a control character, i.e. a character <= 32),
  21.185 + * give the name of the register.  Thus:
  21.186 + *
  21.187 + *	kvprintf("reg=%b\n", 3, "\10\2BITTWO\1BITONE\n");
  21.188 + *
  21.189 + * would produce output:
  21.190 + *
  21.191 + *	reg=3<BITTWO,BITONE>
  21.192 + *
  21.193 + * XXX:  %D  -- Hexdump, takes pointer and separator string:
  21.194 + *		("%6D", ptr, ":")   -> XX:XX:XX:XX:XX:XX
  21.195 + *		("%*D", len, ptr, " " -> XX XX XX XX ...
  21.196 + */
  21.197 +
  21.198 +/* RN: This normally takes a function for output. 
  21.199 + * we always print to a string and the use HYPERCALL for write to console */
  21.200 +static int
  21.201 +kvprintf(char const *fmt, void *arg, int radix, va_list ap)
  21.202 +{
  21.203 +
  21.204 +#define PCHAR(c) {int cc=(c); *d++ = cc; retval++; }
  21.205 +
  21.206 +	char nbuf[MAXNBUF];
  21.207 +	char *p, *q, *d;
  21.208 +	u_char *up;
  21.209 +	int ch, n;
  21.210 +	u_long ul;
  21.211 +	u_quad_t uq;
  21.212 +	int base, lflag, qflag, tmp, width, ladjust, sharpflag, neg, sign, dot;
  21.213 +	int dwidth;
  21.214 +	char padc;
  21.215 +	int retval = 0;
  21.216 +
  21.217 +	ul = 0;
  21.218 +	uq = 0;
  21.219 +    d = (char *) arg;
  21.220 +
  21.221 +	if (fmt == NULL)
  21.222 +		fmt = "(fmt null)\n";
  21.223 +
  21.224 +	if (radix < 2 || radix > 36)
  21.225 +		radix = 10;
  21.226 +
  21.227 +	for (;;) {
  21.228 +		padc = ' ';
  21.229 +		width = 0;
  21.230 +		while ((ch = (u_char)*fmt++) != '%') {
  21.231 +			if (ch == '\0') 
  21.232 +				return retval;
  21.233 +			PCHAR(ch);
  21.234 +		}
  21.235 +		qflag = 0; lflag = 0; ladjust = 0; sharpflag = 0; neg = 0;
  21.236 +		sign = 0; dot = 0; dwidth = 0;
  21.237 +reswitch:	switch (ch = (u_char)*fmt++) {
  21.238 +		case '.':
  21.239 +			dot = 1;
  21.240 +			goto reswitch;
  21.241 +		case '#':
  21.242 +			sharpflag = 1;
  21.243 +			goto reswitch;
  21.244 +		case '+':
  21.245 +			sign = 1;
  21.246 +			goto reswitch;
  21.247 +		case '-':
  21.248 +			ladjust = 1;
  21.249 +			goto reswitch;
  21.250 +		case '%':
  21.251 +			PCHAR(ch);
  21.252 +			break;
  21.253 +		case '*':
  21.254 +			if (!dot) {
  21.255 +				width = va_arg(ap, int);
  21.256 +				if (width < 0) {
  21.257 +					ladjust = !ladjust;
  21.258 +					width = -width;
  21.259 +				}
  21.260 +			} else {
  21.261 +				dwidth = va_arg(ap, int);
  21.262 +			}
  21.263 +			goto reswitch;
  21.264 +		case '0':
  21.265 +			if (!dot) {
  21.266 +				padc = '0';
  21.267 +				goto reswitch;
  21.268 +			}
  21.269 +		case '1': case '2': case '3': case '4':
  21.270 +		case '5': case '6': case '7': case '8': case '9':
  21.271 +				for (n = 0;; ++fmt) {
  21.272 +					n = n * 10 + ch - '0';
  21.273 +					ch = *fmt;
  21.274 +					if (ch < '0' || ch > '9')
  21.275 +						break;
  21.276 +				}
  21.277 +			if (dot)
  21.278 +				dwidth = n;
  21.279 +			else
  21.280 +				width = n;
  21.281 +			goto reswitch;
  21.282 +		case 'b':
  21.283 +			ul = va_arg(ap, int);
  21.284 +			p = va_arg(ap, char *);
  21.285 +			for (q = ksprintn(nbuf, ul, *p++, NULL); *q;)
  21.286 +				PCHAR(*q--);
  21.287 +
  21.288 +			if (!ul)
  21.289 +				break;
  21.290 +
  21.291 +			for (tmp = 0; *p;) {
  21.292 +				n = *p++;
  21.293 +				if (ul & (1 << (n - 1))) {
  21.294 +					PCHAR(tmp ? ',' : '<');
  21.295 +					for (; (n = *p) > ' '; ++p)
  21.296 +						PCHAR(n);
  21.297 +					tmp = 1;
  21.298 +				} else
  21.299 +					for (; *p > ' '; ++p)
  21.300 +						continue;
  21.301 +			}
  21.302 +			if (tmp)
  21.303 +				PCHAR('>');
  21.304 +			break;
  21.305 +		case 'c':
  21.306 +			PCHAR(va_arg(ap, int));
  21.307 +			break;
  21.308 +		case 'D':
  21.309 +			up = va_arg(ap, u_char *);
  21.310 +			p = va_arg(ap, char *);
  21.311 +			if (!width)
  21.312 +				width = 16;
  21.313 +			while(width--) {
  21.314 +				PCHAR(hex2ascii(*up >> 4));
  21.315 +				PCHAR(hex2ascii(*up & 0x0f));
  21.316 +				up++;
  21.317 +				if (width)
  21.318 +					for (q=p;*q;q++)
  21.319 +						PCHAR(*q);
  21.320 +			}
  21.321 +			break;
  21.322 +		case 'd':
  21.323 +			if (qflag)
  21.324 +				uq = va_arg(ap, quad_t);
  21.325 +			else if (lflag)
  21.326 +				ul = va_arg(ap, long);
  21.327 +			else
  21.328 +				ul = va_arg(ap, int);
  21.329 +			sign = 1;
  21.330 +			base = 10;
  21.331 +			goto number;
  21.332 +		case 'l':
  21.333 +			if (lflag) {
  21.334 +				lflag = 0;
  21.335 +				qflag = 1;
  21.336 +			} else
  21.337 +				lflag = 1;
  21.338 +			goto reswitch;
  21.339 +		case 'o':
  21.340 +			if (qflag)
  21.341 +				uq = va_arg(ap, u_quad_t);
  21.342 +			else if (lflag)
  21.343 +				ul = va_arg(ap, u_long);
  21.344 +			else
  21.345 +				ul = va_arg(ap, u_int);
  21.346 +			base = 8;
  21.347 +			goto nosign;
  21.348 +		case 'p':
  21.349 +			ul = (uintptr_t)va_arg(ap, void *);
  21.350 +			base = 16;
  21.351 +			sharpflag = (width == 0);
  21.352 +			goto nosign;
  21.353 +		case 'q':
  21.354 +			qflag = 1;
  21.355 +			goto reswitch;
  21.356 +		case 'n':
  21.357 +		case 'r':
  21.358 +			if (qflag)
  21.359 +				uq = va_arg(ap, u_quad_t);
  21.360 +			else if (lflag)
  21.361 +				ul = va_arg(ap, u_long);
  21.362 +			else
  21.363 +				ul = sign ?
  21.364 +				    (u_long)va_arg(ap, int) : va_arg(ap, u_int);
  21.365 +			base = radix;
  21.366 +			goto number;
  21.367 +		case 's':
  21.368 +			p = va_arg(ap, char *);
  21.369 +			if (p == NULL)
  21.370 +				p = "(null)";
  21.371 +			if (!dot)
  21.372 +				n = strlen (p);
  21.373 +			else
  21.374 +				for (n = 0; n < dwidth && p[n]; n++)
  21.375 +					continue;
  21.376 +
  21.377 +			width -= n;
  21.378 +
  21.379 +			if (!ladjust && width > 0)
  21.380 +				while (width--)
  21.381 +					PCHAR(padc);
  21.382 +			while (n--)
  21.383 +				PCHAR(*p++);
  21.384 +			if (ladjust && width > 0)
  21.385 +				while (width--)
  21.386 +					PCHAR(padc);
  21.387 +			break;
  21.388 +		case 'u':
  21.389 +			if (qflag)
  21.390 +				uq = va_arg(ap, u_quad_t);
  21.391 +			else if (lflag)
  21.392 +				ul = va_arg(ap, u_long);
  21.393 +			else
  21.394 +				ul = va_arg(ap, u_int);
  21.395 +			base = 10;
  21.396 +			goto nosign;
  21.397 +		case 'x':
  21.398 +		case 'X':
  21.399 +			if (qflag)
  21.400 +				uq = va_arg(ap, u_quad_t);
  21.401 +			else if (lflag)
  21.402 +				ul = va_arg(ap, u_long);
  21.403 +			else
  21.404 +				ul = va_arg(ap, u_int);
  21.405 +			base = 16;
  21.406 +			goto nosign;
  21.407 +		case 'z':
  21.408 +			if (qflag)
  21.409 +				uq = va_arg(ap, u_quad_t);
  21.410 +			else if (lflag)
  21.411 +				ul = va_arg(ap, u_long);
  21.412 +			else
  21.413 +				ul = sign ?
  21.414 +				    (u_long)va_arg(ap, int) : va_arg(ap, u_int);
  21.415 +			base = 16;
  21.416 +			goto number;
  21.417 +nosign:			sign = 0;
  21.418 +number:			
  21.419 +			if (qflag) {
  21.420 +				if (sign && (quad_t)uq < 0) {
  21.421 +					neg = 1;
  21.422 +					uq = -(quad_t)uq;
  21.423 +				}
  21.424 +				p = ksprintqn(nbuf, uq, base, &tmp);
  21.425 +			} else {
  21.426 +				if (sign && (long)ul < 0) {
  21.427 +					neg = 1;
  21.428 +					ul = -(long)ul;
  21.429 +				}
  21.430 +				p = ksprintn(nbuf, ul, base, &tmp);
  21.431 +			}
  21.432 +			if (sharpflag && (qflag ? uq != 0 : ul != 0)) {
  21.433 +				if (base == 8)
  21.434 +					tmp++;
  21.435 +				else if (base == 16)
  21.436 +					tmp += 2;
  21.437 +			}
  21.438 +			if (neg)
  21.439 +				tmp++;
  21.440 +
  21.441 +			if (!ladjust && width && (width -= tmp) > 0)
  21.442 +				while (width--)
  21.443 +					PCHAR(padc);
  21.444 +			if (neg)
  21.445 +				PCHAR('-');
  21.446 +			if (sharpflag && (qflag ? uq != 0 : ul != 0)) {
  21.447 +				if (base == 8) {
  21.448 +					PCHAR('0');
  21.449 +				} else if (base == 16) {
  21.450 +					PCHAR('0');
  21.451 +					PCHAR('x');
  21.452 +				}
  21.453 +			}
  21.454 +
  21.455 +			while (*p)
  21.456 +				PCHAR(*p--);
  21.457 +
  21.458 +			if (ladjust && width && (width -= tmp) > 0)
  21.459 +				while (width--)
  21.460 +					PCHAR(padc);
  21.461 +
  21.462 +			break;
  21.463 +		default:
  21.464 +			PCHAR('%');
  21.465 +			if (lflag)
  21.466 +				PCHAR('l');
  21.467 +			PCHAR(ch);
  21.468 +			break;
  21.469 +		}
  21.470 +	}
  21.471 +#undef PCHAR
  21.472 +}
  21.473 +
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/mini-os/lib/string.c	Mon Oct 06 11:26:01 2003 +0000
    22.3 @@ -0,0 +1,142 @@
    22.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    22.5 + ****************************************************************************
    22.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
    22.7 + ****************************************************************************
    22.8 + *
    22.9 + *        File: string.c
   22.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
   22.11 + *     Changes: 
   22.12 + *              
   22.13 + *        Date: Aug 2003
   22.14 + * 
   22.15 + * Environment: Xen Minimal OS
   22.16 + * Description: Library function for string and memory manipulation
   22.17 + *              Origin unknown
   22.18 + *
   22.19 + ****************************************************************************
   22.20 + * $Id: c-insert.c,v 1.7 2002/11/08 16:04:34 rn Exp $
   22.21 + ****************************************************************************
   22.22 + */
   22.23 +
   22.24 +#include <os.h>
   22.25 +#include <types.h>
   22.26 +#include <lib.h>
   22.27 +
   22.28 +int memcmp(const void * cs,const void * ct,size_t count)
   22.29 +{
   22.30 +	const unsigned char *su1, *su2;
   22.31 +	signed char res = 0;
   22.32 +
   22.33 +	for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
   22.34 +		if ((res = *su1 - *su2) != 0)
   22.35 +			break;
   22.36 +	return res;
   22.37 +}
   22.38 +
   22.39 +void * memcpy(void * dest,const void *src,size_t count)
   22.40 +{
   22.41 +	char *tmp = (char *) dest, *s = (char *) src;
   22.42 +
   22.43 +	while (count--)
   22.44 +		*tmp++ = *s++;
   22.45 +
   22.46 +	return dest;
   22.47 +}
   22.48 +
   22.49 +int strncmp(const char * cs,const char * ct,size_t count)
   22.50 +{
   22.51 +	register signed char __res = 0;
   22.52 +
   22.53 +	while (count) {
   22.54 +		if ((__res = *cs - *ct++) != 0 || !*cs++)
   22.55 +			break;
   22.56 +		count--;
   22.57 +	}
   22.58 +
   22.59 +	return __res;
   22.60 +}
   22.61 +
   22.62 +int strcmp(const char * cs,const char * ct)
   22.63 +{
   22.64 +        register signed char __res;
   22.65 +
   22.66 +        while (1) {
   22.67 +                if ((__res = *cs - *ct++) != 0 || !*cs++)
   22.68 +                        break;
   22.69 +        }
   22.70 +
   22.71 +        return __res;
   22.72 +}
   22.73 +
   22.74 +char * strcpy(char * dest,const char *src)
   22.75 +{
   22.76 +        char *tmp = dest;
   22.77 +
   22.78 +        while ((*dest++ = *src++) != '\0')
   22.79 +                /* nothing */;
   22.80 +        return tmp;
   22.81 +}
   22.82 +
   22.83 +char * strncpy(char * dest,const char *src,size_t count)
   22.84 +{
   22.85 +        char *tmp = dest;
   22.86 +
   22.87 +        while (count-- && (*dest++ = *src++) != '\0')
   22.88 +                /* nothing */;
   22.89 +
   22.90 +        return tmp;
   22.91 +}
   22.92 +
   22.93 +void * memset(void * s,int c,size_t count)
   22.94 +{
   22.95 +        char *xs = (char *) s;
   22.96 +
   22.97 +        while (count--)
   22.98 +                *xs++ = c;
   22.99 +
  22.100 +        return s;
  22.101 +}
  22.102 +
  22.103 +size_t strnlen(const char * s, size_t count)
  22.104 +{
  22.105 +        const char *sc;
  22.106 +
  22.107 +        for (sc = s; count-- && *sc != '\0'; ++sc)
  22.108 +                /* nothing */;
  22.109 +        return sc - s;
  22.110 +}
  22.111 +
  22.112 +size_t strlen(const char * s)
  22.113 +{
  22.114 +	const char *sc;
  22.115 +
  22.116 +	for (sc = s; *sc != '\0'; ++sc)
  22.117 +		/* nothing */;
  22.118 +	return sc - s;
  22.119 +}
  22.120 +
  22.121 +char * strchr(const char * s, int c)
  22.122 +{
  22.123 +        for(; *s != (char) c; ++s)
  22.124 +                if (*s == '\0')
  22.125 +                        return NULL;
  22.126 +        return (char *) s;
  22.127 +}
  22.128 +
  22.129 +char * strstr(const char * s1,const char * s2)
  22.130 +{
  22.131 +        int l1, l2;
  22.132 +
  22.133 +        l2 = strlen(s2);
  22.134 +        if (!l2)
  22.135 +                return (char *) s1;
  22.136 +        l1 = strlen(s1);
  22.137 +        while (l1 >= l2) {
  22.138 +                l1--;
  22.139 +                if (!memcmp(s1,s2,l2))
  22.140 +                        return (char *) s1;
  22.141 +                s1++;
  22.142 +        }
  22.143 +        return NULL;
  22.144 +}
  22.145 +
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/mini-os/mm.c	Mon Oct 06 11:26:01 2003 +0000
    23.3 @@ -0,0 +1,375 @@
    23.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    23.5 + ****************************************************************************
    23.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
    23.7 + ****************************************************************************
    23.8 + *
    23.9 + *        File: mm.c
   23.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
   23.11 + *     Changes: 
   23.12 + *              
   23.13 + *        Date: Aug 2003
   23.14 + * 
   23.15 + * Environment: Xen Minimal OS
   23.16 + * Description: memory management related functions
   23.17 + *              contains buddy page allocator from Xen.
   23.18 + *
   23.19 + ****************************************************************************
   23.20 + * $Id: c-insert.c,v 1.7 2002/11/08 16:04:34 rn Exp $
   23.21 + ****************************************************************************
   23.22 + */
   23.23 +
   23.24 +
   23.25 +#include <os.h>
   23.26 +#include <hypervisor.h>
   23.27 +#include <mm.h>
   23.28 +#include <types.h>
   23.29 +#include <lib.h>
   23.30 +
   23.31 +unsigned long *phys_to_machine_mapping;
   23.32 +extern char *stack;
   23.33 +extern char _text, _etext, _edata, _end;
   23.34 +
   23.35 +static void init_page_allocator(unsigned long min, unsigned long max);
   23.36 +
   23.37 +void init_mm(void)
   23.38 +{
   23.39 +
   23.40 +    unsigned long start_pfn, max_pfn, max_free_pfn;
   23.41 +
   23.42 +    unsigned long *pgd = (unsigned long *)start_info.pt_base;
   23.43 +
   23.44 +    printk("MM: Init\n");
   23.45 +
   23.46 +    printk("  _text:        %p\n", &_text);
   23.47 +    printk("  _etext:       %p\n", &_etext);
   23.48 +    printk("  _edata:       %p\n", &_edata);
   23.49 +    printk("  stack start:  %p\n", &stack);
   23.50 +    printk("  _end:         %p\n", &_end);
   23.51 +
   23.52 +    /* set up minimal memory infos */
   23.53 +    start_pfn = PFN_UP(__pa(&_end));
   23.54 +    max_pfn = start_info.nr_pages;
   23.55 +
   23.56 +    printk("  start_pfn:    %lx\n", start_pfn);
   23.57 +    printk("  max_pfn:      %lx\n", max_pfn);
   23.58 +
   23.59 +    /*
   23.60 +     * we know where free tables start (start_pfn) and how many we 
   23.61 +     * have (max_pfn). 
   23.62 +     * 
   23.63 +     * Currently the hypervisor stores page tables it providesin the
   23.64 +     * high region of the this memory range.
   23.65 +     * 
   23.66 +     * next we work out how far down this goes (max_free_pfn)
   23.67 +     * 
   23.68 +     * XXX this assumes the hypervisor provided page tables to be in
   23.69 +     * the upper region of our initial memory. I don't know if this 
   23.70 +     * is always true.
   23.71 +     */
   23.72 +
   23.73 +    max_free_pfn = PFN_DOWN(__pa(pgd));
   23.74 +    {
   23.75 +        unsigned long *pgd = (unsigned long *)start_info.pt_base;
   23.76 +        unsigned long  pte;
   23.77 +        int i;
   23.78 +        printk("  pgd(pa(pgd)): %lx(%lx)", (u_long)pgd, __pa(pgd));
   23.79 +
   23.80 +        for ( i = 0; i < (HYPERVISOR_VIRT_START>>22); i++ )
   23.81 +        {
   23.82 +            unsigned long pgde = *pgd++;
   23.83 +            if ( !(pgde & 1) ) continue;
   23.84 +            pte = machine_to_phys(pgde & PAGE_MASK);
   23.85 +            printk("  PT(%x): %lx(%lx)", i, (u_long)__va(pte), pte);
   23.86 +            if (PFN_DOWN(pte) <= max_free_pfn) 
   23.87 +                max_free_pfn = PFN_DOWN(pte);
   23.88 +        }
   23.89 +    }
   23.90 +    max_free_pfn--;
   23.91 +    printk("  max_free_pfn: %lx\n", max_free_pfn);
   23.92 +
   23.93 +    /*
   23.94 +     * now we can initialise the page allocator
   23.95 +     */
   23.96 +    printk("MM: Initialise page allocator for %lx(%lx)-%lx(%lx)\n",
   23.97 +           (u_long)__va(PFN_PHYS(start_pfn)), PFN_PHYS(start_pfn), 
   23.98 +           (u_long)__va(PFN_PHYS(max_free_pfn)), PFN_PHYS(max_free_pfn));
   23.99 +    init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_free_pfn));   
  23.100 +
  23.101 +
  23.102 +    /* Now initialise the physical->machine mapping table. */
  23.103 +
  23.104 +
  23.105 +    printk("MM: done\n");
  23.106 +
  23.107 +    
  23.108 +}
  23.109 +
  23.110 +/*********************
  23.111 + * ALLOCATION BITMAP
  23.112 + *  One bit per page of memory. Bit set => page is allocated.
  23.113 + */
  23.114 +
  23.115 +static unsigned long *alloc_bitmap;
  23.116 +#define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
  23.117 +
  23.118 +#define allocated_in_map(_pn) \
  23.119 +(alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & (1<<((_pn)&(PAGES_PER_MAPWORD-1))))
  23.120 +
  23.121 +
  23.122 +/*
  23.123 + * Hint regarding bitwise arithmetic in map_{alloc,free}:
  23.124 + *  -(1<<n)  sets all bits >= n. 
  23.125 + *  (1<<n)-1 sets all bits <  n.
  23.126 + * Variable names in map_{alloc,free}:
  23.127 + *  *_idx == Index into `alloc_bitmap' array.
  23.128 + *  *_off == Bit offset within an element of the `alloc_bitmap' array.
  23.129 + */
  23.130 +
  23.131 +static void map_alloc(unsigned long first_page, unsigned long nr_pages)
  23.132 +{
  23.133 +    unsigned long start_off, end_off, curr_idx, end_idx;
  23.134 +
  23.135 +    curr_idx  = first_page / PAGES_PER_MAPWORD;
  23.136 +    start_off = first_page & (PAGES_PER_MAPWORD-1);
  23.137 +    end_idx   = (first_page + nr_pages) / PAGES_PER_MAPWORD;
  23.138 +    end_off   = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
  23.139 +
  23.140 +    if ( curr_idx == end_idx )
  23.141 +    {
  23.142 +        alloc_bitmap[curr_idx] |= ((1<<end_off)-1) & -(1<<start_off);
  23.143 +    }
  23.144 +    else 
  23.145 +    {
  23.146 +        alloc_bitmap[curr_idx] |= -(1<<start_off);
  23.147 +        while ( ++curr_idx < end_idx ) alloc_bitmap[curr_idx] = ~0L;
  23.148 +        alloc_bitmap[curr_idx] |= (1<<end_off)-1;
  23.149 +    }
  23.150 +}
  23.151 +
  23.152 +
  23.153 +static void map_free(unsigned long first_page, unsigned long nr_pages)
  23.154 +{
  23.155 +    unsigned long start_off, end_off, curr_idx, end_idx;
  23.156 +
  23.157 +    curr_idx = first_page / PAGES_PER_MAPWORD;
  23.158 +    start_off = first_page & (PAGES_PER_MAPWORD-1);
  23.159 +    end_idx   = (first_page + nr_pages) / PAGES_PER_MAPWORD;
  23.160 +    end_off   = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
  23.161 +
  23.162 +    if ( curr_idx == end_idx )
  23.163 +    {
  23.164 +        alloc_bitmap[curr_idx] &= -(1<<end_off) | ((1<<start_off)-1);
  23.165 +    }
  23.166 +    else 
  23.167 +    {
  23.168 +        alloc_bitmap[curr_idx] &= (1<<start_off)-1;
  23.169 +        while ( ++curr_idx != end_idx ) alloc_bitmap[curr_idx] = 0;
  23.170 +        alloc_bitmap[curr_idx] &= -(1<<end_off);
  23.171 +    }
  23.172 +}
  23.173 +
  23.174 +
  23.175 +
  23.176 +/*************************
  23.177 + * BINARY BUDDY ALLOCATOR
  23.178 + */
  23.179 +
  23.180 +typedef struct chunk_head_st chunk_head_t;
  23.181 +typedef struct chunk_tail_st chunk_tail_t;
  23.182 +
  23.183 +struct chunk_head_st {
  23.184 +    chunk_head_t  *next;
  23.185 +    chunk_head_t **pprev;
  23.186 +    int            level;
  23.187 +};
  23.188 +
  23.189 +struct chunk_tail_st {
  23.190 +    int level;
  23.191 +};
  23.192 +
  23.193 +/* Linked lists of free chunks of different powers-of-two in size. */
  23.194 +#define FREELIST_SIZE ((sizeof(void*)<<3)-PAGE_SHIFT)
  23.195 +static chunk_head_t *free_head[FREELIST_SIZE];
  23.196 +static chunk_head_t  free_tail[FREELIST_SIZE];
  23.197 +#define FREELIST_EMPTY(_l) ((_l)->next == NULL)
  23.198 +
  23.199 +#define round_pgdown(_p)  ((_p)&PAGE_MASK)
  23.200 +#define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
  23.201 +
  23.202 +
  23.203 +/*
  23.204 + * Initialise allocator, placing addresses [@min,@max] in free pool.
  23.205 + * @min and @max are PHYSICAL addresses.
  23.206 + */
  23.207 +static void init_page_allocator(unsigned long min, unsigned long max)
  23.208 +{
  23.209 +    int i;
  23.210 +    unsigned long range, bitmap_size;
  23.211 +    chunk_head_t *ch;
  23.212 +    chunk_tail_t *ct;
  23.213 +
  23.214 +    for ( i = 0; i < FREELIST_SIZE; i++ )
  23.215 +    {
  23.216 +        free_head[i]       = &free_tail[i];
  23.217 +        free_tail[i].pprev = &free_head[i];
  23.218 +        free_tail[i].next  = NULL;
  23.219 +    }
  23.220 +
  23.221 +    min = round_pgup  (min);
  23.222 +    max = round_pgdown(max);
  23.223 +
  23.224 +    /* Allocate space for the allocation bitmap. */
  23.225 +    bitmap_size  = (max+1) >> (PAGE_SHIFT+3);
  23.226 +    bitmap_size  = round_pgup(bitmap_size);
  23.227 +    alloc_bitmap = (unsigned long *)__va(min);
  23.228 +    min         += bitmap_size;
  23.229 +    range        = max - min;
  23.230 +
  23.231 +    /* All allocated by default. */
  23.232 +    memset(alloc_bitmap, ~0, bitmap_size);
  23.233 +    /* Free up the memory we've been given to play with. */
  23.234 +    map_free(min>>PAGE_SHIFT, range>>PAGE_SHIFT);
  23.235 +
  23.236 +    /* The buddy lists are addressed in high memory. */
  23.237 +    min += PAGE_OFFSET;
  23.238 +    max += PAGE_OFFSET;
  23.239 +
  23.240 +    while ( range != 0 )
  23.241 +    {
  23.242 +        /*
  23.243 +         * Next chunk is limited by alignment of min, but also
  23.244 +         * must not be bigger than remaining range.
  23.245 +         */
  23.246 +        for ( i = PAGE_SHIFT; (1<<(i+1)) <= range; i++ )
  23.247 +            if ( min & (1<<i) ) break;
  23.248 +
  23.249 +
  23.250 +        ch = (chunk_head_t *)min;
  23.251 +        min   += (1<<i);
  23.252 +        range -= (1<<i);
  23.253 +        ct = (chunk_tail_t *)min-1;
  23.254 +        i -= PAGE_SHIFT;
  23.255 +        ch->level       = i;
  23.256 +        ch->next        = free_head[i];
  23.257 +        ch->pprev       = &free_head[i];
  23.258 +        ch->next->pprev = &ch->next;
  23.259 +        free_head[i]    = ch;
  23.260 +        ct->level       = i;
  23.261 +    }
  23.262 +}
  23.263 +
  23.264 +
  23.265 +/* Release a PHYSICAL address range to the allocator. */
  23.266 +void release_bytes_to_allocator(unsigned long min, unsigned long max)
  23.267 +{
  23.268 +    min = round_pgup  (min) + PAGE_OFFSET;
  23.269 +    max = round_pgdown(max) + PAGE_OFFSET;
  23.270 +
  23.271 +    while ( min < max )
  23.272 +    {
  23.273 +        __free_pages(min, 0);
  23.274 +        min += PAGE_SIZE;
  23.275 +    }
  23.276 +}
  23.277 +
  23.278 +
  23.279 +/* Allocate 2^@order contiguous pages. Returns a VIRTUAL address. */
  23.280 +unsigned long __get_free_pages(int order)
  23.281 +{
  23.282 +    int i;
  23.283 +    chunk_head_t *alloc_ch, *spare_ch;
  23.284 +    chunk_tail_t            *spare_ct;
  23.285 +
  23.286 +
  23.287 +    /* Find smallest order which can satisfy the request. */
  23.288 +    for ( i = order; i < FREELIST_SIZE; i++ ) {
  23.289 +	if ( !FREELIST_EMPTY(free_head[i]) ) 
  23.290 +	    break;
  23.291 +    }
  23.292 +
  23.293 +    if ( i == FREELIST_SIZE ) goto no_memory;
  23.294 + 
  23.295 +    /* Unlink a chunk. */
  23.296 +    alloc_ch = free_head[i];
  23.297 +    free_head[i] = alloc_ch->next;
  23.298 +    alloc_ch->next->pprev = alloc_ch->pprev;
  23.299 +
  23.300 +    /* We may have to break the chunk a number of times. */
  23.301 +    while ( i != order )
  23.302 +    {
  23.303 +        /* Split into two equal parts. */
  23.304 +        i--;
  23.305 +        spare_ch = (chunk_head_t *)((char *)alloc_ch + (1<<(i+PAGE_SHIFT)));
  23.306 +        spare_ct = (chunk_tail_t *)((char *)spare_ch + (1<<(i+PAGE_SHIFT)))-1;
  23.307 +
  23.308 +        /* Create new header for spare chunk. */
  23.309 +        spare_ch->level = i;
  23.310 +        spare_ch->next  = free_head[i];
  23.311 +        spare_ch->pprev = &free_head[i];
  23.312 +        spare_ct->level = i;
  23.313 +
  23.314 +        /* Link in the spare chunk. */
  23.315 +        spare_ch->next->pprev = &spare_ch->next;
  23.316 +        free_head[i] = spare_ch;
  23.317 +    }
  23.318 +    
  23.319 +    map_alloc(__pa(alloc_ch)>>PAGE_SHIFT, 1<<order);
  23.320 +
  23.321 +    return((unsigned long)alloc_ch);
  23.322 +
  23.323 + no_memory:
  23.324 +
  23.325 +    printk("Cannot handle page request order %d!\n", order);
  23.326 +
  23.327 +    return 0;
  23.328 +}
  23.329 +
  23.330 +
  23.331 +/* Free 2^@order pages at VIRTUAL address @p. */
  23.332 +void __free_pages(unsigned long p, int order)
  23.333 +{
  23.334 +    unsigned long size = 1 << (order + PAGE_SHIFT);
  23.335 +    chunk_head_t *ch;
  23.336 +    chunk_tail_t *ct;
  23.337 +    unsigned long pagenr = __pa(p) >> PAGE_SHIFT;
  23.338 +
  23.339 +    map_free(pagenr, 1<<order);
  23.340 +    
  23.341 +    /* Merge chunks as far as possible. */
  23.342 +    for ( ; ; )
  23.343 +    {
  23.344 +        if ( (p & size) )
  23.345 +        {
  23.346 +            /* Merge with predecessor block? */
  23.347 +            if ( allocated_in_map(pagenr-1) ) break;
  23.348 +            ct = (chunk_tail_t *)p - 1;
  23.349 +            if ( ct->level != order ) break;
  23.350 +            ch = (chunk_head_t *)(p - size);
  23.351 +            p -= size;
  23.352 +        }
  23.353 +        else
  23.354 +        {
  23.355 +            /* Merge with successor block? */
  23.356 +            if ( allocated_in_map(pagenr+(1<<order)) ) break;
  23.357 +            ch = (chunk_head_t *)(p + size);
  23.358 +            if ( ch->level != order ) break;
  23.359 +        }
  23.360 +        
  23.361 +        /* Okay, unlink the neighbour. */
  23.362 +        *ch->pprev = ch->next;
  23.363 +        ch->next->pprev = ch->pprev;
  23.364 +
  23.365 +        order++;
  23.366 +        size <<= 1;
  23.367 +    }
  23.368 +
  23.369 +    /* Okay, add the final chunk to the appropriate free list. */
  23.370 +    ch = (chunk_head_t *)p;
  23.371 +    ct = (chunk_tail_t *)(p+size)-1;
  23.372 +    ct->level = order;
  23.373 +    ch->level = order;
  23.374 +    ch->pprev = &free_head[order];
  23.375 +    ch->next  = free_head[order];
  23.376 +    ch->next->pprev = &ch->next;
  23.377 +    free_head[order] = ch;
  23.378 +}
    24.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.2 +++ b/mini-os/time.c	Mon Oct 06 11:26:01 2003 +0000
    24.3 @@ -0,0 +1,149 @@
    24.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
    24.5 + ****************************************************************************
    24.6 + * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
    24.7 + ****************************************************************************
    24.8 + *
    24.9 + *        File: time.c
   24.10 + *      Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
   24.11 + *     Changes: 
   24.12 + *              
   24.13 + *        Date: Jul 2003
   24.14 + * 
   24.15 + * Environment: Xen Minimal OS
   24.16 + * Description: Simple time and timer functions
   24.17 + *
   24.18 + ****************************************************************************
   24.19 + * $Id: c-insert.c,v 1.7 2002/11/08 16:04:34 rn Exp $
   24.20 + ****************************************************************************
   24.21 + */
   24.22 +
   24.23 +
   24.24 +#include <os.h>
   24.25 +#include <types.h>
   24.26 +#include <hypervisor.h>
   24.27 +#include <events.h>
   24.28 +#include <time.h>
   24.29 +#include <lib.h>
   24.30 +
   24.31 +/************************************************************************
   24.32 + * Time functions
   24.33 + *************************************************************************/
   24.34 +
   24.35 +static unsigned int rdtsc_bitshift;
   24.36 +static u32      st_scale_f;
   24.37 +static u32      st_scale_i;
   24.38 +static u32      shadow_st_pcc;
   24.39 +static s_time_t shadow_st;
   24.40 +static u32      shadow_wc_version=0;
   24.41 +static long     shadow_tv_sec;
   24.42 +static long     shadow_tv_usec;
   24.43 +static s_time_t shadow_wc_timestamp;
   24.44 +
   24.45 +/*
   24.46 + * System time.
   24.47 + * We need to read the values from the shared info page "atomically" 
   24.48 + * and use the cycle counter value as the "version" number. Clashes
   24.49 + * should be very rare.
   24.50 + */
   24.51 +inline s_time_t get_s_time(void)
   24.52 +{
   24.53 +    s32 delta_tsc;
   24.54 +    u32 low;
   24.55 +    u64 delta, tsc;
   24.56 +    u32	version;
   24.57 +    u64 cpu_freq, scale;
   24.58 +
   24.59 +    /* check if our values are still up-to-date */
   24.60 +    while ( (version = HYPERVISOR_shared_info->wc_version) != 
   24.61 +            shadow_wc_version )
   24.62 +    {
   24.63 +        barrier();
   24.64 +
   24.65 +        shadow_wc_version   = version;
   24.66 +        shadow_tv_sec       = HYPERVISOR_shared_info->tv_sec;
   24.67 +        shadow_tv_usec      = HYPERVISOR_shared_info->tv_usec;
   24.68 +        shadow_wc_timestamp = HYPERVISOR_shared_info->wc_timestamp;
   24.69 +        shadow_st_pcc       = HYPERVISOR_shared_info->st_timestamp;
   24.70 +        shadow_st           = HYPERVISOR_shared_info->system_time;
   24.71 +
   24.72 +        rdtsc_bitshift      = HYPERVISOR_shared_info->rdtsc_bitshift;
   24.73 +        cpu_freq            = HYPERVISOR_shared_info->cpu_freq;
   24.74 +
   24.75 +        /* XXX cpu_freq as u32 limits it to 4.29 GHz. Get a better do_div! */
   24.76 +        scale = 1000000000LL << (32 + rdtsc_bitshift);
   24.77 +        scale /= cpu_freq;
   24.78 +        st_scale_f = scale & 0xffffffff;
   24.79 +        st_scale_i = scale >> 32;
   24.80 +
   24.81 +        barrier();
   24.82 +	}
   24.83 +
   24.84 +    rdtscll(tsc);
   24.85 +    low = (u32)(tsc >> rdtsc_bitshift);
   24.86 +    delta_tsc = (s32)(low - shadow_st_pcc);
   24.87 +    if ( unlikely(delta_tsc < 0) ) delta_tsc = 0;
   24.88 +    delta = ((u64)delta_tsc * st_scale_f);
   24.89 +    delta >>= 32;
   24.90 +    delta += ((u64)delta_tsc * st_scale_i);
   24.91 +
   24.92 +    return shadow_st + delta;
   24.93 +}
   24.94 +
   24.95 +
   24.96 +/*
   24.97 + * Wallclock time.
   24.98 + * Based on what the hypervisor tells us, extrapolated using system time.
   24.99 + * Again need to read a number of values from the shared page "atomically".
  24.100 + * this time using a version number.
  24.101 + */
  24.102 +void gettimeofday(struct timeval *tv)
  24.103 +{
  24.104 +    long          usec, sec;
  24.105 +    u64           now;
  24.106 +
  24.107 +    now   = get_s_time();
  24.108 +    usec  = ((unsigned long)(now-shadow_wc_timestamp))/1000;
  24.109 +    sec   = shadow_tv_sec;
  24.110 +    usec += shadow_tv_usec;
  24.111 +
  24.112 +    while ( usec >= 1000000 ) 
  24.113 +    {
  24.114 +        usec -= 1000000;
  24.115 +        sec++;
  24.116 +    }
  24.117 +
  24.118 +    tv->tv_sec = sec;
  24.119 +    tv->tv_usec = usec;
  24.120 +}
  24.121 +
  24.122 +
  24.123 +static void timer_handler(int ev, struct pt_regs *regs)
  24.124 +{
  24.125 +    static int i;
  24.126 +    s_time_t now;
  24.127 +
  24.128 +    i++;
  24.129 +    if (i >= 1000) {
  24.130 +        now = get_s_time();
  24.131 +        printf("T(%lld)\n", now);
  24.132 +        i = 0;
  24.133 +    }
  24.134 +}
  24.135 +
  24.136 +
  24.137 +void init_time(void)
  24.138 +{
  24.139 +    u64         __cpu_khz;
  24.140 +    unsigned long cpu_khz;
  24.141 +
  24.142 +    __cpu_khz = HYPERVISOR_shared_info->cpu_freq;
  24.143 +    cpu_khz = (u32) (__cpu_khz/1000);
  24.144 +
  24.145 +    printk("Xen reported: %lu.%03lu MHz processor.\n", 
  24.146 +           cpu_khz / 1000, cpu_khz % 1000);
  24.147 +
  24.148 +    add_ev_action(EV_TIMER, &timer_handler);
  24.149 +    enable_ev_action(EV_TIMER);
  24.150 +    enable_hypervisor_event(EV_TIMER);
  24.151 +
  24.152 +}
    25.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.2 +++ b/mini-os/traps.c	Mon Oct 06 11:26:01 2003 +0000
    25.3 @@ -0,0 +1,150 @@
    25.4 +
    25.5 +#include <os.h>
    25.6 +#include <hypervisor.h>
    25.7 +#include <lib.h>
    25.8 +
    25.9 +/*
   25.10 + * These are assembler stubs in entry.S.
   25.11 + * They are the actual entry points for virtual exceptions.
   25.12 + */
   25.13 +void divide_error(void);
   25.14 +void debug(void);
   25.15 +void int3(void);
   25.16 +void overflow(void);
   25.17 +void bounds(void);
   25.18 +void invalid_op(void);
   25.19 +void device_not_available(void);
   25.20 +void double_fault(void);
   25.21 +void coprocessor_segment_overrun(void);
   25.22 +void invalid_TSS(void);
   25.23 +void segment_not_present(void);
   25.24 +void stack_segment(void);
   25.25 +void general_protection(void);
   25.26 +void page_fault(void);
   25.27 +void coprocessor_error(void);
   25.28 +void simd_coprocessor_error(void);
   25.29 +void alignment_check(void);
   25.30 +void spurious_interrupt_bug(void);
   25.31 +void machine_check(void);
   25.32 +
   25.33 +/*
   25.34 + * C handlers here have their parameter-list constructed by the
   25.35 + * assembler stubs above. Each one gets a pointer to a list
   25.36 + * of register values (to be restored at end of exception).
   25.37 + * Some will also receive an error code -- this is the code that
   25.38 + * was generated by the processor for the underlying real exception. 
   25.39 + * 
   25.40 + * Note that the page-fault exception is special. It also receives
   25.41 + * the faulting linear address. Normally this would be found in
   25.42 + * register CR2, but that is not accessible in a virtualised OS.
   25.43 + */
   25.44 +
   25.45 +static void inline do_trap(int trapnr, char *str,
   25.46 +			   struct pt_regs * regs, long error_code)
   25.47 +{
   25.48 +    printk("Trap\n");
   25.49 +}
   25.50 +
   25.51 +#define DO_ERROR(trapnr, str, name) \
   25.52 +void do_##name(struct pt_regs * regs, long error_code) \
   25.53 +{ \
   25.54 +	do_trap(trapnr, str, regs, error_code); \
   25.55 +}
   25.56 +
   25.57 +#define DO_ERROR_INFO(trapnr, str, name, sicode, siaddr) \
   25.58 +void do_##name(struct pt_regs * regs, long error_code) \
   25.59 +{ \
   25.60 +	do_trap(trapnr, str, regs, error_code); \
   25.61 +}
   25.62 +
   25.63 +DO_ERROR_INFO( 0, "divide error", divide_error, FPE_INTDIV, regs->eip)
   25.64 +DO_ERROR( 3, "int3", int3)
   25.65 +DO_ERROR( 4, "overflow", overflow)
   25.66 +DO_ERROR( 5, "bounds", bounds)
   25.67 +DO_ERROR_INFO( 6, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
   25.68 +DO_ERROR( 7, "device not available", device_not_available)
   25.69 +DO_ERROR( 8, "double fault", double_fault)
   25.70 +DO_ERROR( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
   25.71 +DO_ERROR(10, "invalid TSS", invalid_TSS)
   25.72 +DO_ERROR(11, "segment not present", segment_not_present)
   25.73 +DO_ERROR(12, "stack segment", stack_segment)
   25.74 +DO_ERROR_INFO(17, "alignment check", alignment_check, BUS_ADRALN, 0)
   25.75 +DO_ERROR(18, "machine check", machine_check)
   25.76 +
   25.77 +void do_page_fault(struct pt_regs * regs, long error_code,
   25.78 +                   unsigned long address)
   25.79 +{
   25.80 +    printk("Page fault\n");
   25.81 +}
   25.82 +
   25.83 +void do_general_protection(struct pt_regs * regs, long error_code)
   25.84 +{
   25.85 +    printk("GPF\n");
   25.86 +}
   25.87 +
   25.88 +
   25.89 +void do_debug(struct pt_regs * regs, long error_code)
   25.90 +{
   25.91 +    printk("Debug exception\n");
   25.92 +#define TF_MASK 0x100
   25.93 +    regs->eflags &= ~TF_MASK;
   25.94 +}
   25.95 +
   25.96 +
   25.97 +
   25.98 +void do_coprocessor_error(struct pt_regs * regs, long error_code)
   25.99 +{
  25.100 +    printk("Copro error\n");
  25.101 +}
  25.102 +
  25.103 +void simd_math_error(void *eip)
  25.104 +{
  25.105 +    printk("SIMD error\n");
  25.106 +}
  25.107 +
  25.108 +void do_simd_coprocessor_error(struct pt_regs * regs,
  25.109 +					  long error_code)
  25.110 +{
  25.111 +    printk("SIMD copro error\n");
  25.112 +}
  25.113 +
  25.114 +void do_spurious_interrupt_bug(struct pt_regs * regs,
  25.115 +					  long error_code)
  25.116 +{
  25.117 +}
  25.118 +
  25.119 +/*
  25.120 + * Submit a virtual IDT to teh hypervisor. This consists of tuples
  25.121 + * (interrupt vector, privilege ring, CS:EIP of handler).
  25.122 + * The 'privilege ring' field specifies the least-privileged ring that
  25.123 + * can trap to that vector using a software-interrupt instruction (INT).
  25.124 + */
  25.125 +static trap_info_t trap_table[] = {
  25.126 +    {  0, 0, __KERNEL_CS, (unsigned long)divide_error                },
  25.127 +    {  1, 0, __KERNEL_CS, (unsigned long)debug                       },
  25.128 +    {  3, 3, __KERNEL_CS, (unsigned long)int3                        },
  25.129 +    {  4, 3, __KERNEL_CS, (unsigned long)overflow                    },
  25.130 +    {  5, 3, __KERNEL_CS, (unsigned long)bounds                      },
  25.131 +    {  6, 0, __KERNEL_CS, (unsigned long)invalid_op                  },
  25.132 +    {  7, 0, __KERNEL_CS, (unsigned long)device_not_available        },
  25.133 +    {  8, 0, __KERNEL_CS, (unsigned long)double_fault                },
  25.134 +    {  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
  25.135 +    { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS                 },
  25.136 +    { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present         },
  25.137 +    { 12, 0, __KERNEL_CS, (unsigned long)stack_segment               },
  25.138 +    { 13, 0, __KERNEL_CS, (unsigned long)general_protection          },
  25.139 +    { 14, 0, __KERNEL_CS, (unsigned long)page_fault                  },
  25.140 +    { 15, 0, __KERNEL_CS, (unsigned long)spurious_interrupt_bug      },
  25.141 +    { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error           },
  25.142 +    { 17, 0, __KERNEL_CS, (unsigned long)alignment_check             },
  25.143 +    { 18, 0, __KERNEL_CS, (unsigned long)machine_check               },
  25.144 +    { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error      },
  25.145 +    {  0, 0,           0, 0                           }
  25.146 +};
  25.147 +    
  25.148 +
  25.149 +
  25.150 +void trap_init(void)
  25.151 +{
  25.152 +    HYPERVISOR_set_trap_table(trap_table);    
  25.153 +}
    26.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.2 +++ b/mini-os/vmlinux.lds	Mon Oct 06 11:26:01 2003 +0000
    26.3 @@ -0,0 +1,82 @@
    26.4 +/* ld script to make i386 Linux kernel
    26.5 + * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
    26.6 + */
    26.7 +OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
    26.8 +OUTPUT_ARCH(i386)
    26.9 +ENTRY(_start)
   26.10 +SECTIONS
   26.11 +{
   26.12 +  . = 0xC0000000 + 0x000000;
   26.13 +  _text = .;			/* Text and read-only data */
   26.14 +  .text : {
   26.15 +	*(.text)
   26.16 +	*(.fixup)
   26.17 +	*(.gnu.warning)
   26.18 +	} = 0x9090
   26.19 +
   26.20 +  _etext = .;			/* End of text section */
   26.21 +
   26.22 +  .rodata : { *(.rodata) *(.rodata.*) }
   26.23 +  .kstrtab : { *(.kstrtab) }
   26.24 +
   26.25 +  . = ALIGN(16);		/* Exception table */
   26.26 +  __start___ex_table = .;
   26.27 +  __ex_table : { *(__ex_table) }
   26.28 +  __stop___ex_table = .;
   26.29 +
   26.30 +  __start___ksymtab = .;	/* Kernel symbol table */
   26.31 +  __ksymtab : { *(__ksymtab) }
   26.32 +  __stop___ksymtab = .;
   26.33 +
   26.34 +  .data : {			/* Data */
   26.35 +	*(.data)
   26.36 +	CONSTRUCTORS
   26.37 +	}
   26.38 +
   26.39 +  _edata = .;			/* End of data section */
   26.40 +
   26.41 +  . = ALIGN(8192);		/* init_task */
   26.42 +  .data.init_task : { *(.data.init_task) }
   26.43 +
   26.44 +  . = ALIGN(4096);		/* Init code and data */
   26.45 +  __init_begin = .;
   26.46 +  .text.init : { *(.text.init) }
   26.47 +  .data.init : { *(.data.init) }
   26.48 +  . = ALIGN(16);
   26.49 +  __setup_start = .;
   26.50 +  .setup.init : { *(.setup.init) }
   26.51 +  __setup_end = .;
   26.52 +  __initcall_start = .;
   26.53 +  .initcall.init : { *(.initcall.init) }
   26.54 +  __initcall_end = .;
   26.55 +  . = ALIGN(4096);
   26.56 +  __init_end = .;
   26.57 +
   26.58 +  . = ALIGN(4096);
   26.59 +  .data.page_aligned : { *(.data.idt) }
   26.60 +
   26.61 +  . = ALIGN(32);
   26.62 +  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
   26.63 +
   26.64 +  __bss_start = .;		/* BSS */
   26.65 +  .bss : {
   26.66 +	*(.bss)
   26.67 +	}
   26.68 +  _end = . ;
   26.69 +
   26.70 +  /* Sections to be discarded */
   26.71 +  /DISCARD/ : {
   26.72 +	*(.text.exit)
   26.73 +	*(.data.exit)
   26.74 +	*(.exitcall.exit)
   26.75 +	}
   26.76 +
   26.77 +  /* Stabs debugging sections.  */
   26.78 +  .stab 0 : { *(.stab) }
   26.79 +  .stabstr 0 : { *(.stabstr) }
   26.80 +  .stab.excl 0 : { *(.stab.excl) }
   26.81 +  .stab.exclstr 0 : { *(.stab.exclstr) }
   26.82 +  .stab.index 0 : { *(.stab.index) }
   26.83 +  .stab.indexstr 0 : { *(.stab.indexstr) }
   26.84 +  .comment 0 : { *(.comment) }
   26.85 +}