2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 /* Author: Bertrand Serlet, August 1999 */
28 #import "scalable_malloc.h"
30 #import <pthread_internals.h>
34 #include <mach/vm_statistics.h>
36 /********************* DEFINITIONS ************************/
38 #define DEBUG_MALLOC 0 // set to one to debug malloc itself
40 #define DEBUG_CLIENT 0 // set to one to help debug a nasty memory smasher
43 #warning DEBUG_MALLOC ENABLED
45 #define CHECK_LOCKED(szone, fun) { \
46 if (__is_threaded && TRY_LOCK(szone->lock)) { \
47 malloc_printf("*** lock was not set %p in %s\n", szone->lock, fun); \
51 #define INLINE __inline__
52 #define CHECK_LOCKED(szone, fun) {}
55 #define PAGE_SIZE_FIXED 1 // flip if the page size becomes variable, one day
57 #define vm_page_shift 12
59 static unsigned vm_page_shift
= 0; // guaranteed to be intialized by zone creation
62 typedef unsigned short msize_t
; // a size in multiples of SHIFT_SMALL_QUANTUM or SHIFT_TINY_QUANTUM
71 unsigned address_and_num_pages
;
72 // this type represents both an address and a number of pages
73 // the low bits are the number of pages; the high bits are the address
74 // note that the exact number of bits used for depends on the page size
75 // also, this cannot represent pointers larger than 1 << (vm_page_shift * 2)
78 typedef unsigned char grain_t
;
80 #define CHECK_REGIONS (1 << 31)
82 #define CHECKSUM_MAGIC 0x357B
84 #define MAX_RECORDER_BUFFER 256
86 /********************* DEFINITIONS for tiny ************************/
88 #define SHIFT_TINY_QUANTUM 4 // Required for AltiVec
89 #define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM)
91 #define FOLLOWING_TINY_PTR(ptr,msize) (((char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM))
93 #define NUM_TINY_SLOTS 32 // number of slots for free-lists
95 #define SHIFT_NUM_TINY_BLOCKS 16
96 #define NUM_TINY_BLOCKS (1 << SHIFT_NUM_TINY_BLOCKS)
97 #define TINY_BLOCKS_ALIGN (SHIFT_NUM_TINY_BLOCKS + SHIFT_TINY_QUANTUM)
98 #define TINY_REGION_SIZE ((NUM_TINY_BLOCKS * TINY_QUANTUM + (NUM_TINY_BLOCKS >> 2) + 8 + (1 << vm_page_shift) - 1) & ~ ((1 << vm_page_shift) - 1)) // enough room for the data, followed by the bit arrays (2-bits per block) plus 2 words of padding as our bitmap operators overflow, plus rounding to the nearest page
100 #define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[6])
101 // At the end of free blocks, we stick the size (for enabling coalescing)
102 #define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
105 #define TINY_REGION_ADDRESS(region) ((region) << TINY_BLOCKS_ALIGN)
106 #define TINY_REGION_END(region) (TINY_REGION_ADDRESS(region)+(1 << TINY_BLOCKS_ALIGN))
108 typedef unsigned short tiny_region_t
;
110 #define INITIAL_NUM_TINY_REGIONS 24 // must be even for szone to be aligned
112 #define TINY_CACHE 1 // This governs a last-free cache of 1 that bypasses the free-list
115 #warning TINY_CACHE turned off
118 /********************* DEFINITIONS for small ************************/
120 /* We store the meta bits on the side in two bytes, as follows:
121 - high order bit SMALL_IS_FREE is set when the block is avail (and starts here)
122 - when block size, expressed in SMALL_QUANTUM, is the other 15 bits
123 - else 0 signifies this block is in the middle of another block
126 #define SMALL_IS_FREE (1 << 15)
128 #define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9
129 #define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes
131 #define FOLLOWING_SMALL_PTR(ptr,msize) (((char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM))
133 #define NUM_SMALL_SLOTS 32 // number of slots for free-lists
135 #define SHIFT_NUM_SMALL_BLOCKS 14 // we can only represent up to 1<<15 for msize; but we chose to stay even below that to avoid the convention msize=0 => msize = (1<<15)
136 #define NUM_SMALL_BLOCKS (1 << SHIFT_NUM_SMALL_BLOCKS)
137 #define SMALL_BLOCKS_ALIGN (SHIFT_NUM_SMALL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23
138 #define SMALL_REGION_SIZE (NUM_SMALL_BLOCKS * SMALL_QUANTUM + NUM_SMALL_BLOCKS * 2) // data + meta data
140 #define SMALL_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
142 #define SMALL_REGION_ADDRESS(region) (((unsigned)region) << SMALL_BLOCKS_ALIGN)
143 #define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region)+(1 << SMALL_BLOCKS_ALIGN))
145 typedef unsigned short small_region_t
;
147 #define INITIAL_NUM_SMALL_REGIONS 6 // must be even for szone to be aligned
149 #define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use
151 #define SMALL_CACHE 1
153 #warning SMALL_CACHE turned off
156 /********************* DEFINITIONS for large ************************/
158 #define LARGE_THRESHOLD (15 * 1024) // at or above this use "large"
160 #if (LARGE_THRESHOLD > NUM_SMALL_SLOTS * SMALL_QUANTUM)
161 #error LARGE_THRESHOLD should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM
164 #define VM_COPY_THRESHOLD (40 * 1024)
165 // When all memory is touched after a copy, vm_copy() is always a lose
166 // But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages (on a G3/300MHz)
167 // This must be larger than LARGE_THRESHOLD
169 #define LARGE_ENTRY_ADDRESS(entry) \
170 (((entry).address_and_num_pages >> vm_page_shift) << vm_page_shift)
171 #define LARGE_ENTRY_NUM_PAGES(entry) \
172 ((entry).address_and_num_pages & ((1 << vm_page_shift) - 1))
173 #define LARGE_ENTRY_SIZE(entry) \
174 (LARGE_ENTRY_NUM_PAGES(entry) << vm_page_shift)
175 #define LARGE_ENTRY_MATCHES(entry,ptr) \
176 (!(((entry).address_and_num_pages - (unsigned)(ptr)) >> vm_page_shift))
177 #define LARGE_ENTRY_IS_EMPTY(entry) (!((entry).address_and_num_pages))
179 typedef compact_range_t large_entry_t
;
181 /********************* DEFINITIONS for huge ************************/
183 typedef vm_range_t huge_entry_t
;
185 /********************* zone itself ************************/
188 malloc_zone_t basic_zone
;
190 unsigned debug_flags
;
193 /* Regions for tiny objects */
194 unsigned num_tiny_regions
;
195 tiny_region_t
*tiny_regions
;
196 void *last_tiny_free
; // low SHIFT_TINY_QUANTUM indicate the msize
197 unsigned tiny_bitmap
; // cache of the 32 free lists
198 free_list_t
*tiny_free_list
[NUM_TINY_SLOTS
]; // 31 free lists for 1*TINY_QUANTUM to 31*TINY_QUANTUM plus 1 for larger than 32*SMALL_QUANTUM
199 size_t tiny_bytes_free_at_end
; // the last free region in the last block is treated as a big block in use that is not accounted for
200 unsigned num_tiny_objects
;
201 unsigned num_bytes_in_tiny_objects
;
203 /* Regions for small objects */
204 unsigned num_small_regions
;
205 small_region_t
*small_regions
;
206 void *last_small_free
; // low SHIFT_SMALL_QUANTUM indicate the msize
207 unsigned small_bitmap
; // cache of the free list
208 free_list_t
*small_free_list
[NUM_SMALL_SLOTS
];
209 size_t small_bytes_free_at_end
; // the last free region in the last block is treated as a big block in use that is not accounted for
210 unsigned num_small_objects
;
211 unsigned num_bytes_in_small_objects
;
213 /* large objects: vm_page_shift <= log2(size) < 2 *vm_page_shift */
214 unsigned num_large_objects_in_use
;
215 unsigned num_large_entries
;
216 large_entry_t
*large_entries
; // hashed by location; null entries don't count
217 unsigned num_bytes_in_large_objects
;
219 /* huge objects: log2(size) >= 2 *vm_page_shift */
220 unsigned char num_huge_entries
;
221 huge_entry_t
*huge_entries
;
222 unsigned num_bytes_in_huge_objects
;
224 /* Initial region list */
225 tiny_region_t initial_tiny_regions
[INITIAL_NUM_TINY_REGIONS
];
226 small_region_t initial_small_regions
[INITIAL_NUM_SMALL_REGIONS
];
229 static void *szone_malloc(szone_t
*szone
, size_t size
);
230 static INLINE
void *szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
);
231 static void szone_free(szone_t
*szone
, void *ptr
);
232 static boolean_t
szone_check_all(szone_t
*szone
, const char *function
);
233 static void szone_print(szone_t
*szone
, boolean_t verbose
);
234 static void *small_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
);
237 #define LOG(szone,ptr) \
238 (szone->log_address && (((unsigned)szone->log_address == -1) || (szone->log_address == (void *)(ptr))))
240 #define LOG(szone,ptr) 0
243 #define SZONE_LOCK(szone) { \
247 #define SZONE_UNLOCK(szone) { \
248 UNLOCK(szone->lock); \
251 #define LOCK_AND_NOTE_LOCKED(szone,locked) { \
252 CHECK(szone, __PRETTY_FUNCTION__); \
253 locked = 1; SZONE_LOCK(szone); \
256 #if DEBUG_MALLOC || DEBUG_CLIENT
257 #define CHECK(szone,fun) \
258 if ((szone)->debug_flags & CHECK_REGIONS) szone_check_all(szone, fun)
260 #define CHECK(szone,fun) {}
263 /********************* VERY LOW LEVEL UTILITIES ************************/
265 #if DEBUG_MALLOC || DEBUG_CLIENT
268 if (getenv("MallocErrorSleep")) {
269 malloc_printf("*** Sleeping to help debug\n");
270 sleep(3600); // to help debug
276 szone_error(szone_t
*szone
, const char *msg
, const void *ptr
) {
277 if (szone
) SZONE_UNLOCK(szone
);
279 malloc_printf("*** malloc[%d]: error for object %p: %s\n", getpid(), ptr
, msg
);
281 malloc_printf("*** malloc[%d]: error: %s\n", getpid(), msg
);
284 szone_print(szone
, 1);
293 protect(szone_t
*szone
, vm_address_t address
, vm_size_t size
,
294 unsigned protection
, unsigned debug_flags
) {
296 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_PRELUDE
)) {
297 err
= vm_protect(mach_task_self(), address
- (1 << vm_page_shift
), 1 << vm_page_shift
,
300 malloc_printf("*** malloc[%d]: Can't protect(%p) region for "
301 "prelude guard page at %p\n", getpid(), protection
,
302 address
- (1 << vm_page_shift
));
305 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE
)) {
306 err
= vm_protect(mach_task_self(), (vm_address_t
)(address
+ size
), 1 << vm_page_shift
, 0, protection
);
308 malloc_printf("*** malloc[%d]: Can't protect(%p) region for "
309 "postlude guard page at %p\n", getpid(), protection
,
316 allocate_pages(szone_t
*szone
, size_t size
, unsigned char align
, unsigned debug_flags
, int vm_page_label
) {
317 // align specifies a desired alignment (as a log) or 0 if no alignment requested
320 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
321 size_t allocation_size
= round_page(size
);
322 if (align
) add_guard_pages
= 0; // too cumbersome to deal with that
323 if (!allocation_size
) allocation_size
= 1 << vm_page_shift
;
324 if (add_guard_pages
) allocation_size
+= 2 * (1 << vm_page_shift
);
325 if (align
) allocation_size
+= 1 << align
;
326 err
= vm_allocate(mach_task_self(), &addr
, allocation_size
, vm_page_label
| 1);
328 malloc_printf("*** malloc: vm_allocate(size=%d) failed (error code=%d)\n", size
, err
);
329 szone_error(szone
, "Can't allocate region", NULL
);
333 // malloc_printf("In allocate_pages(size=%d(%p), align=%d) -> %p\n", size, size, align, addr);
334 vm_address_t aligned_address
= (addr
+ (1 << align
) - 1) & ~ ((1 << align
) - 1);
335 if (aligned_address
!= addr
) {
336 size_t delta
= aligned_address
- addr
;
337 err
= vm_deallocate(mach_task_self(), addr
, delta
);
338 if (err
) malloc_printf("*** malloc: freeing unaligned header failed with %d\n", err
);
339 // malloc_printf("deallocated unaligned header %p length=%d(%p)\n", addr, delta, delta);
340 addr
= aligned_address
;
341 allocation_size
-= delta
;
343 if (allocation_size
> size
) {
344 err
= vm_deallocate(mach_task_self(), addr
+size
, allocation_size
- size
);
345 if (err
) malloc_printf("*** malloc: freeing unaligned footer failed with %d\n", err
);
348 if (add_guard_pages
) {
349 addr
+= 1 << vm_page_shift
;
350 protect(szone
, addr
, size
, 0, debug_flags
);
356 deallocate_pages(szone_t
*szone
, vm_address_t addr
, size_t size
, unsigned debug_flags
) {
358 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
359 if (add_guard_pages
) {
360 addr
-= 1 << vm_page_shift
;
361 size
+= 2 * (1 << vm_page_shift
);
363 err
= vm_deallocate(mach_task_self(), addr
, size
);
365 szone_error(szone
, "Can't deallocate_pages region", (void *)addr
);
370 _szone_default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
) {
371 *ptr
= (void *)address
;
376 free_list_checksum(szone_t
*szone
, free_list_t
*ptr
, const char *msg
) {
377 // We always checksum, as testing whether to do it (based on szone->debug_flags) is as fast as doing it
378 if (ptr
->checksum
!= (((unsigned)ptr
->previous
) ^ ((unsigned)ptr
->next
) ^ CHECKSUM_MAGIC
)) {
380 malloc_printf("*** Incorrect checksum: %s\n", msg
);
382 szone_error(szone
, "Incorrect checksum for freed object - object was probably modified after being freed; break at szone_error", ptr
);
387 free_list_set_checksum(szone_t
*szone
, free_list_t
*ptr
) {
388 // We always set checksum, as testing whether to do it (based on
389 // szone->debug_flags) is slower than just doing it
390 ptr
->checksum
= ((unsigned)ptr
->previous
) ^ ((unsigned)ptr
->next
) ^ CHECKSUM_MAGIC
;
394 free_list_count(const free_list_t
*ptr
) {
398 // malloc_printf("%p ", ptr);
404 #define BITMAP32_SET(bitmap,bit) (bitmap |= 1 << (bit))
405 #define BITMAP32_CLR(bitmap,bit) (bitmap &= ~ (1 << (bit)))
406 #define BITMAP32_BIT(bitmap,bit) ((bitmap >> (bit)) & 1)
408 #define BITMAP32_FFS(bitmap) (ffs(bitmap))
409 // returns bit # of first bit that's one, starting at 1 (returns 0 if !bitmap)
411 /********************* TINY FREE LIST UTILITIES ************************/
413 // We encode the meta-headers as follows:
414 // Each quantum has an associated set of 2 bits:
415 // block_header when 1 says this block is the beginning of a block
416 // in_use when 1 says this block is in use
417 // so a block in use of size 3 is 1-1 0-X 0-X
418 // for a free block TINY_FREE_SIZE(ptr) carries the size and the bits are 1-0 X-X X-X
419 // for a block middle the bits are 0-0
421 // Attention double evaluation for these
422 #define BITARRAY_SET(bits,index) (bits[index>>3] |= (1 << (index & 7)))
423 #define BITARRAY_CLR(bits,index) (bits[index>>3] &= ~(1 << (index & 7)))
424 #define BITARRAY_BIT(bits,index) (((bits[index>>3]) >> (index & 7)) & 1)
426 // Following is for start<8 and end<=start+32
427 #define BITARRAY_MCLR_LESS_32(bits,start,end) { \
428 unsigned char *_bits = (bits); \
429 unsigned _end = (end); \
430 switch (_end >> 3) { \
431 case 4: _bits[4] &= ~ ((1 << (_end - 32)) - 1); _end = 32; \
432 case 3: _bits[3] &= ~ ((1 << (_end - 24)) - 1); _end = 24; \
433 case 2: _bits[2] &= ~ ((1 << (_end - 16)) - 1); _end = 16; \
434 case 1: _bits[1] &= ~ ((1 << (_end - 8)) - 1); _end = 8; \
435 case 0: _bits[0] &= ~ ((1 << _end) - (1 << (start))); \
439 #if 0 // Simple but slow version
440 #warning Slow version in effect
441 #define BITARRAY_MCLR(bits,index,num) { \
442 unsigned _ctr = (num); \
443 unsigned _cur = (index); \
444 while (_ctr--) {BITARRAY_CLR(bits,_cur); _cur++; } \
448 // Following is for num <= 32
449 #define BITARRAY_MCLR(bits,index,num) { \
450 unsigned _index = (index); \
451 unsigned char *_rebased = (bits) + (_index >> 3); \
453 BITARRAY_MCLR_LESS_32(_rebased, _index, _index + (num)); \
457 static INLINE msize_t
458 get_tiny_meta_header(const void *ptr
, boolean_t
*is_free
) {
459 // returns msize and is_free
460 // may return 0 for the msize component (meaning 65536)
461 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
462 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
463 unsigned char *block_header
= (unsigned char *)headers_start
;
464 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
465 unsigned byte_index
= index
>> 3;
466 block_header
+= byte_index
;
469 if (!BITMAP32_BIT(*block_header
, index
)) return 0;
470 unsigned char *in_use
= block_header
+ (NUM_TINY_BLOCKS
>> 3) + 4;
471 if (!BITMAP32_BIT(*in_use
, index
)) {
473 return TINY_FREE_SIZE(ptr
);
475 #if defined(__BIG_ENDIAN__)
476 unsigned *addr
= (void *)((unsigned)block_header
& ~3);
477 unsigned word0
= OSReadSwapInt32(addr
, 0);
478 unsigned word1
= OSReadSwapInt32(addr
, 4);
479 unsigned bits
= index
+ (((unsigned)block_header
& 3) * 8);
480 unsigned word
= (word0
>> bits
) | (word1
<< (32 - bits
));
481 unsigned result
= ffs(word
>> 1);
484 malloc_printf("*** get_tiny_meta_header() invariant broken %p %d\n", ptr
, result
);
490 unsigned cur
= index
+ 1;
491 while (!BITARRAY_BIT(block_header
, cur
)) cur
++; // assumes padding at the zone end
493 if (cur
- index
>= 32) {
494 malloc_printf("*** get_tiny_meta_header() invariant broken %p %d %d\n", ptr
, index
, cur
);
503 set_tiny_meta_header_in_use(const void *ptr
, msize_t msize
) {
504 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
505 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
506 unsigned char *block_header
= (unsigned char *)headers_start
;
507 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
509 if (msize
>= 32) malloc_printf("*** set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr
, msize
);
510 if ((unsigned)index
+ (unsigned)msize
> 0x10000) malloc_printf("*** set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr
, msize
);
512 unsigned byte_index
= index
>> 3;
513 block_header
+= byte_index
;
515 BITMAP32_SET(*block_header
, index
);
516 unsigned char *in_use
= block_header
+ (NUM_TINY_BLOCKS
>> 3) + 4;
517 BITMAP32_SET(*in_use
, index
);
519 msize_t clr_msize
= msize
-1;
521 byte_index
= index
>> 3;
522 block_header
+= byte_index
; in_use
+= byte_index
;
524 unsigned end_bit
= index
+ clr_msize
;
525 BITARRAY_MCLR_LESS_32(block_header
, index
, end_bit
);
526 BITARRAY_MCLR_LESS_32(in_use
, index
, end_bit
);
528 BITARRAY_SET(block_header
, index
+clr_msize
); // we set the block_header bit for the following block to reaffirm next block is a block
531 msize_t mf
= get_tiny_meta_header(ptr
, &ff
);
533 malloc_printf("*** setting header for tiny in_use %p : %d\n", ptr
, msize
);
534 malloc_printf("reading header for tiny %p : %d %d\n", ptr
, mf
, ff
);
540 set_tiny_meta_header_middle(const void *ptr
) {
541 // indicates this block is in the middle of an in use block
542 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
543 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
544 unsigned char *block_header
= (unsigned char *)headers_start
;
545 unsigned char *in_use
= (unsigned char *)(headers_start
+ (NUM_TINY_BLOCKS
>> 3) + 4);
546 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
547 BITARRAY_CLR(block_header
, index
); BITARRAY_CLR(in_use
, index
);
548 TINY_FREE_SIZE(ptr
) = 0;
552 set_tiny_meta_header_free(const void *ptr
, msize_t msize
) {
553 // !msize is acceptable and means 65536
554 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
555 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
556 unsigned char *block_header
= (unsigned char *)headers_start
;
557 unsigned char *in_use
= (unsigned char *)(headers_start
+ (NUM_TINY_BLOCKS
>> 3) + 4);
558 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
560 if ((unsigned)index
+ (unsigned)msize
> 0x10000) {
561 malloc_printf("*** setting header for tiny free %p msize too large: %d\n", ptr
, msize
);
564 BITARRAY_SET(block_header
, index
); BITARRAY_CLR(in_use
, index
);
565 TINY_FREE_SIZE(ptr
) = msize
;
566 // mark the end of this block
567 if (msize
) { // msize==0 => the whole region is free
568 void *follower
= FOLLOWING_TINY_PTR(ptr
, msize
);
569 TINY_PREVIOUS_MSIZE(follower
) = msize
;
573 msize_t mf
= get_tiny_meta_header(ptr
, &ff
);
574 if ((msize
!= mf
) || !ff
) {
575 malloc_printf("*** setting header for tiny free %p : %d\n", ptr
, msize
);
576 malloc_printf("reading header for tiny %p : %d %d\n", ptr
, mf
, ff
);
581 static INLINE boolean_t
582 tiny_meta_header_is_free(const void *ptr
) {
583 // returns msize and is_free shifted by 16
584 // may return 0 for the msize component (meaning 65536)
585 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
586 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
587 unsigned char *block_header
= (unsigned char *)headers_start
;
588 unsigned char *in_use
= (unsigned char *)(headers_start
+ (NUM_TINY_BLOCKS
>> 3) + 4);
589 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
590 if (!BITARRAY_BIT(block_header
, index
)) return 0;
591 return !BITARRAY_BIT(in_use
, index
);
595 tiny_previous_preceding_free(void *ptr
, msize_t
*prev_msize
) {
596 // returns the previous block, assuming and verifying it's free
597 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
598 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
599 unsigned char *block_header
= (unsigned char *)headers_start
;
600 unsigned char *in_use
= (unsigned char *)(headers_start
+ (NUM_TINY_BLOCKS
>> 3) + 4);
601 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
602 if (!index
) return NULL
;
603 msize_t previous_msize
= TINY_PREVIOUS_MSIZE(ptr
);
604 if (previous_msize
> index
) return NULL
;
605 msize_t previous_index
= index
- previous_msize
;
606 void *previous_ptr
= (void *)((shifted_base
<< TINY_BLOCKS_ALIGN
) + (previous_index
<< SHIFT_TINY_QUANTUM
));
607 if (TINY_FREE_SIZE(previous_ptr
) != previous_msize
) return NULL
;
608 if (!BITARRAY_BIT(block_header
, previous_index
)) return NULL
;
609 if (BITARRAY_BIT(in_use
, previous_index
)) return NULL
;
610 // conservative check did match true check
611 *prev_msize
= previous_msize
;
612 // malloc_printf("tiny_previous_preceding_free(%p) -> %p,%d\n", ptr, previous_ptr, previous_msize);
617 tiny_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
618 // Adds an item to the proper free list
619 // Also marks the meta-header of the block properly
620 // Assumes szone has been locked
621 grain_t slot
= (!msize
|| (msize
>= NUM_TINY_SLOTS
)) ? NUM_TINY_SLOTS
- 1 : msize
- 1;
622 free_list_t
*free_ptr
= ptr
;
623 free_list_t
*free_head
= szone
->tiny_free_list
[slot
];
625 if (LOG(szone
,ptr
)) {
626 malloc_printf("In tiny_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
628 if (((unsigned)ptr
) & (TINY_QUANTUM
- 1)) {
629 szone_error(szone
, "tiny_free_list_add_ptr: Unaligned ptr", ptr
);
632 set_tiny_meta_header_free(ptr
, msize
);
634 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
636 if (free_head
->previous
) {
637 malloc_printf("ptr=%p slot=%d free_head=%p previous=%p\n", ptr
, slot
, free_head
, free_head
->previous
);
638 szone_error(szone
, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr
);
640 if (! tiny_meta_header_is_free(free_head
)) {
641 malloc_printf("ptr=%p slot=%d free_head=%p\n", ptr
, slot
, free_head
);
642 szone_error(szone
, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr
);
645 free_head
->previous
= free_ptr
;
646 free_list_set_checksum(szone
, free_head
);
648 BITMAP32_SET(szone
->tiny_bitmap
, slot
);
650 free_ptr
->previous
= NULL
;
651 free_ptr
->next
= free_head
;
652 free_list_set_checksum(szone
, free_ptr
);
653 szone
->tiny_free_list
[slot
] = free_ptr
;
654 // malloc_printf("Setting head of free list for slot=%d to %p\n", slot, free_ptr);
658 tiny_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
659 // Removes item in the proper free list
660 // msize could be read, but all callers have it so we pass it in
661 // Assumes szone has been locked
662 grain_t slot
= (!msize
|| (msize
>= NUM_TINY_SLOTS
)) ? NUM_TINY_SLOTS
- 1 : msize
- 1;
663 free_list_t
*free_ptr
= ptr
;
664 free_list_t
*next
= free_ptr
->next
;
665 free_list_t
*previous
= free_ptr
->previous
;
667 if (LOG(szone
,ptr
)) {
668 malloc_printf("In tiny_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
671 free_list_checksum(szone
, free_ptr
, __PRETTY_FUNCTION__
);
673 // The block to remove is the head of the free list
675 if (szone
->tiny_free_list
[slot
] != ptr
) {
676 malloc_printf("ptr=%p slot=%d msize=%d szone->tiny_free_list[slot]=%p\n", ptr
, slot
, msize
, szone
->tiny_free_list
[slot
]);
677 szone_error(szone
, "tiny_free_list_remove_ptr: Internal invariant broken (szone->tiny_free_list[slot])", ptr
);
681 szone
->tiny_free_list
[slot
] = next
;
682 if (!next
) BITMAP32_CLR(szone
->tiny_bitmap
, slot
);
684 previous
->next
= next
;
685 free_list_set_checksum(szone
, previous
);
688 next
->previous
= previous
;
689 free_list_set_checksum(szone
, next
);
693 static INLINE tiny_region_t
*
694 tiny_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
) {
695 tiny_region_t
*region
= szone
->tiny_regions
;
696 unsigned num_regions
= szone
->num_tiny_regions
;
697 unsigned ptr_shifted
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
698 while (num_regions
--) {
699 tiny_region_t
this = *region
;
700 if (ptr_shifted
== this) return region
;
707 tiny_free_no_lock(szone_t
*szone
, tiny_region_t
*region
, void *ptr
, msize_t msize
) {
708 size_t original_size
= msize
<< SHIFT_TINY_QUANTUM
;
709 void *next_block
= ((char *)ptr
+ original_size
);
711 if (LOG(szone
,ptr
)) {
712 malloc_printf("In tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
715 malloc_printf("In tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
716 szone_error(szone
, "Trying to free tiny block that is too small", ptr
);
719 // We try to coalesce this block with the preceeding one
720 msize_t previous_msize
;
721 void *previous
= tiny_previous_preceding_free(ptr
, &previous_msize
);
724 if (LOG(szone
, ptr
) || LOG(szone
,previous
)) {
725 malloc_printf("In tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr
, previous
);
728 tiny_free_list_remove_ptr(szone
, previous
, previous_msize
);
730 msize
+= previous_msize
;
732 // We try to coalesce with the next block
733 if (((vm_address_t
)next_block
< TINY_REGION_END(*region
)) && tiny_meta_header_is_free(next_block
)) {
734 // The next block is free, we coalesce
735 msize_t next_msize
= TINY_FREE_SIZE(next_block
);
737 if (LOG(szone
, ptr
) || LOG(szone
, next_block
)) {
738 malloc_printf("In tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n", ptr
, msize
, next_block
, next_msize
);
741 if (next_msize
>= NUM_TINY_SLOTS
) {
742 // we take a short cut here to avoid removing next_block from the slot 31 freelist and then adding ptr back to slot 31
743 // malloc_printf("Replacing %p(msize=%d) with %p(msize=%d) in freelist\n", next_block, next_msize, ptr, msize+next_msize);
745 free_list_t
*big_free_block
= (free_list_t
*)next_block
;
746 free_list_t
*after_next_block
= big_free_block
->next
;
747 free_list_t
*before_next_block
= big_free_block
->previous
;
748 free_list_checksum(szone
, big_free_block
, __PRETTY_FUNCTION__
);
749 if (!before_next_block
) {
750 szone
->tiny_free_list
[NUM_TINY_SLOTS
-1] = ptr
;
752 before_next_block
->next
= ptr
;
753 free_list_set_checksum(szone
, before_next_block
);
755 if (after_next_block
) {
756 after_next_block
->previous
= ptr
;
757 free_list_set_checksum(szone
, after_next_block
);
759 ((free_list_t
*)ptr
)->previous
= before_next_block
;
760 ((free_list_t
*)ptr
)->next
= after_next_block
;
761 free_list_set_checksum(szone
, ptr
);
762 set_tiny_meta_header_free(ptr
, msize
);
763 set_tiny_meta_header_middle(big_free_block
); // clear the meta_header to enable coalescing backwards
764 goto tiny_free_ending
;
766 tiny_free_list_remove_ptr(szone
, next_block
, next_msize
);
767 set_tiny_meta_header_middle(next_block
); // clear the meta_header to enable coalescing backwards
770 if ((szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) && msize
) {
771 memset(ptr
, 0x55, msize
<< SHIFT_TINY_QUANTUM
);
773 tiny_free_list_add_ptr(szone
, ptr
, msize
);
775 // When in proper debug mode we write on the memory to help debug memory smashers
776 szone
->num_tiny_objects
--;
777 szone
->num_bytes_in_tiny_objects
-= original_size
; // we use original_size and not msize to avoid double counting the coalesced blocks
781 tiny_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
) {
782 // Allocates from the last region or a freshly allocated region
783 // Before anything we transform the tiny_bytes_free_at_end - if any - to a regular free block
784 if (szone
->tiny_bytes_free_at_end
) {
785 tiny_region_t last_region
= szone
-> tiny_regions
[szone
->num_tiny_regions
-1];
786 void *last_block
= (void *)(TINY_REGION_END(last_region
) - szone
->tiny_bytes_free_at_end
);
787 tiny_free_list_add_ptr(szone
, last_block
, szone
->tiny_bytes_free_at_end
>> SHIFT_TINY_QUANTUM
);
788 szone
->tiny_bytes_free_at_end
= 0;
791 // time to create a new region
792 vm_address_t aligned_address
= allocate_pages(szone
, TINY_REGION_SIZE
, TINY_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_TINY
));
793 if (! aligned_address
) {
797 // malloc_printf("Allocated tiny region #%d: %p [%y]\n", szone->num_tiny_regions, aligned_address, TINY_REGION_SIZE);
798 // We set the padding after block_header to be all 1
799 ((unsigned *)(aligned_address
+ (1 << TINY_BLOCKS_ALIGN
) + (NUM_TINY_BLOCKS
>> 3)))[0] = ~0;
800 if (szone
->num_tiny_regions
== INITIAL_NUM_TINY_REGIONS
) {
801 tiny_region_t
*new_regions
;
802 // malloc_printf("=== Growing tiny_regions (%d regions)\n", szone->num_tiny_regions);
803 new_regions
= small_malloc_from_region_no_lock(szone
, 16); // 16 * 512 bytes is plenty of tiny regions (more than 4,000)
804 if (!new_regions
) return NULL
;
805 memcpy(new_regions
, szone
->tiny_regions
, INITIAL_NUM_TINY_REGIONS
* sizeof(tiny_region_t
));
806 szone
->tiny_regions
= new_regions
; // we set the pointer after it's all ready to enable enumeration from another thread without locking
808 szone
->tiny_regions
[szone
->num_tiny_regions
] = aligned_address
>> TINY_BLOCKS_ALIGN
;
809 szone
->num_tiny_regions
++; // we set the number after the pointer is all ready to enable enumeration from another thread without taking the lock
810 ptr
= (void *)aligned_address
;
811 set_tiny_meta_header_in_use(ptr
, msize
);
812 szone
->num_tiny_objects
++;
813 szone
->num_bytes_in_tiny_objects
+= msize
<< SHIFT_TINY_QUANTUM
;
814 // We put a header on the last block so that it appears in use (for coalescing, etc...)
815 set_tiny_meta_header_in_use(ptr
+ (msize
<< SHIFT_TINY_QUANTUM
), 1);
816 szone
->tiny_bytes_free_at_end
= (NUM_TINY_BLOCKS
- msize
) << SHIFT_TINY_QUANTUM
;
818 if (LOG(szone
,ptr
)) {
819 malloc_printf("In tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
825 static INLINE boolean_t
826 try_realloc_tiny_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) {
827 // returns 1 on success
828 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
829 msize_t old_msize
= old_size
>> SHIFT_TINY_QUANTUM
;
830 unsigned next_index
= index
+ old_msize
;
831 // malloc_printf("try_realloc_tiny_in_place %p %d %d\n", ptr, old_size, new_size);
832 if (next_index
>= NUM_TINY_BLOCKS
) {
833 // malloc_printf("try_realloc_tiny_in_place can't take place at end %p %d %d %d\n", ptr, old_size, new_size, next_index);
836 void *next_block
= (char *)ptr
+ old_size
;
838 boolean_t is_free
= tiny_meta_header_is_free(next_block
);
841 return 0; // next_block is in use;
843 msize_t next_msize
= TINY_FREE_SIZE(next_block
);
844 if (old_size
+ (next_msize
>> SHIFT_TINY_QUANTUM
) < new_size
) {
845 // malloc_printf("try_realloc_tiny_in_place can't %p too small %d\n", next_block, next_msize);
847 return 0; // even with next block, not enough
849 tiny_free_list_remove_ptr(szone
, next_block
, next_msize
);
850 set_tiny_meta_header_middle(next_block
); // clear the meta_header to enable coalescing backwards
851 msize_t coalesced_msize
= (new_size
- old_size
+ TINY_QUANTUM
- 1) >> SHIFT_TINY_QUANTUM
;
852 msize_t leftover_msize
= next_msize
- coalesced_msize
;
853 // malloc_printf("Realloc in place for %p; current size=%d next_msize=%d wanted=%d\n", ptr, old_size, next_msize, new_size);
854 if (leftover_msize
) {
855 void *leftover
= next_block
+ (coalesced_msize
<< SHIFT_TINY_QUANTUM
);
856 // malloc_printf("Leftover in realloc in place %p leftover_msize=%d\n", leftover, leftover_msize);
857 tiny_free_list_add_ptr(szone
, leftover
, leftover_msize
);
859 set_tiny_meta_header_in_use(ptr
, old_msize
+ coalesced_msize
);
861 if (LOG(szone
,ptr
)) {
862 malloc_printf("In try_realloc_tiny_in_place(), ptr=%p, msize=%d\n", ptr
, old_msize
+ coalesced_msize
);
865 szone
->num_bytes_in_tiny_objects
+= coalesced_msize
<< SHIFT_TINY_QUANTUM
;
867 CHECK(szone
, __PRETTY_FUNCTION__
);
868 // malloc_printf("Extended ptr %p for realloc old=%d desired=%d new=%d leftover=%d\n", ptr, (unsigned)old_size, (unsigned)new_size, (unsigned)szone_size(szone, ptr), leftover_msize << SHIFT_TINY_QUANTUM);
873 szone_check_tiny_region(szone_t
*szone
, tiny_region_t
*region
) {
874 vm_address_t start
= TINY_REGION_ADDRESS(*region
);
875 void *ptr
= (void *)start
;
876 vm_address_t region_end
= TINY_REGION_END(*region
);
877 boolean_t prev_free
= 0;
878 if (region
== szone
->tiny_regions
+ szone
->num_tiny_regions
- 1) region_end
-= szone
->tiny_bytes_free_at_end
;
879 // malloc_printf("szone_check_tiny_region: szone=%p region=%p start=%p ptr=%p region_end=%p\n", szone, region, start, ptr, region_end);
880 while ((vm_address_t
)ptr
< region_end
) {
882 msize_t msize
= get_tiny_meta_header(ptr
, &is_free
);
883 if (is_free
&& !msize
&& (ptr
== (void *)start
)) {
884 // the entire region is free
887 // malloc_printf("tiny %p [%d %d]\n", ptr, msize, is_free);
889 malloc_printf("*** malloc[%d]: invariant broken for tiny block %p this msize=%d - size is too small\n", getpid(), ptr
, msize
);
893 // this block is in use
895 if (msize
> 31*TINY_QUANTUM
) {
896 malloc_printf("*** malloc[%d]: invariant broken for %p this tiny msize=%d[%p] - size is too large\n", getpid(), ptr
, msize
, msize
);
899 ptr
+= msize
<< SHIFT_TINY_QUANTUM
;
903 malloc_printf("*** malloc[%d]: invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n", getpid(), ptr
, msize
);
907 free_list_t
*free_head
= ptr
;
908 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
909 if (free_head
->previous
&& !tiny_meta_header_is_free(free_head
->previous
)) {
910 malloc_printf("*** malloc[%d]: invariant broken for %p (previous %p is not a free pointer)\n", getpid(), ptr
, free_head
->previous
);
913 if (free_head
->next
&& !tiny_meta_header_is_free(free_head
->next
)) {
914 malloc_printf("*** malloc[%d]: invariant broken for %p (next in free list %p is not a free pointer)\n", getpid(), ptr
, free_head
->next
);
917 void *follower
= FOLLOWING_TINY_PTR(ptr
, msize
);
918 if ((follower
!= (void *)region_end
) && (TINY_PREVIOUS_MSIZE(follower
) != msize
)) {
919 malloc_printf("*** malloc[%d]: invariant broken for tiny free %p followed by %p in region [%p-%p] (end marker incorrect) should be %d; in fact %d\n", getpid(), ptr
, follower
, TINY_REGION_ADDRESS(*region
), region_end
, msize
, TINY_PREVIOUS_MSIZE(follower
));
925 if (ptr
!= (void *)region_end
) {
926 malloc_printf("*** malloc[%d]: invariant broken for region end %p - %p\n", getpid(), ptr
, region_end
);
929 if (region
== szone
->tiny_regions
+ szone
->num_tiny_regions
- 1) {
930 if (szone
->tiny_bytes_free_at_end
) {
932 msize_t msize
= get_tiny_meta_header(ptr
, &is_free
);
933 if (is_free
|| (msize
!= 1)) {
934 malloc_printf("*** malloc[%d]: invariant broken for blocker block %p - %d %d\n", getpid(), ptr
, msize
, is_free
);
942 tiny_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t tiny_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
943 tiny_region_t
*regions
;
945 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
948 err
= reader(task
, region_address
, sizeof(tiny_region_t
) * num_regions
, (void **)®ions
);
950 while (index
< num_regions
) {
951 // unsigned num_in_use = 0;
952 // unsigned num_free = 0;
953 tiny_region_t region
= regions
[index
];
954 vm_range_t range
= {TINY_REGION_ADDRESS(region
), TINY_REGION_SIZE
};
955 // malloc_printf("Enumerating tiny ptrs for tiny region starting at %p\n", range.address);
956 if (type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) {
957 vm_range_t admin_range
= {range
.address
+ (1 << TINY_BLOCKS_ALIGN
), range
.size
- (1 << TINY_BLOCKS_ALIGN
)};
958 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &admin_range
, 1);
960 if (type_mask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
)) {
961 vm_range_t ptr_range
= {range
.address
, 1 << TINY_BLOCKS_ALIGN
};
962 recorder(task
, context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptr_range
, 1);
964 if (type_mask
& MALLOC_PTR_IN_USE_RANGE_TYPE
) {
965 unsigned char *mapped_region
;
966 err
= reader(task
, range
.address
, range
.size
, (void **)&mapped_region
);
968 unsigned char *block_header
= (unsigned char *)(mapped_region
+ (1 << TINY_BLOCKS_ALIGN
));
969 unsigned char *in_use
= block_header
+ (NUM_TINY_BLOCKS
>> 3) + 4;
970 unsigned block_index
= 0;
971 unsigned block_limit
= NUM_TINY_BLOCKS
;
972 if (index
== num_regions
- 1)
973 block_limit
-= (tiny_bytes_free_at_end
>> SHIFT_TINY_QUANTUM
);
974 while (block_index
< block_limit
) {
975 boolean_t is_free
= ! BITARRAY_BIT(in_use
, block_index
);
978 void *mapped_ptr
= mapped_region
+ (block_index
<< SHIFT_TINY_QUANTUM
);
979 msize
= TINY_FREE_SIZE(mapped_ptr
);
980 // printf("free: index=%x mapped=%p true_addr=%p msize=%d\n", block_index, mapped_ptr, (void *)range.address + (block_index << SHIFT_TINY_QUANTUM), msize);
985 unsigned bit
= block_index
+ 1;
986 while (! BITARRAY_BIT(block_header
, bit
)) { bit
++; msize
++; }
987 // printf("in_use: index=%x true_addr=%p msize=%d\n", block_index, (void *)range.address + (block_index << SHIFT_TINY_QUANTUM), msize);
989 buffer
[count
].address
= range
.address
+ (block_index
<< SHIFT_TINY_QUANTUM
);
990 buffer
[count
].size
= msize
<< SHIFT_TINY_QUANTUM
;
992 if (count
>= MAX_RECORDER_BUFFER
) {
993 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
997 block_index
+= msize
;
1000 // malloc_printf("Found in tiny region %d in_use and %d free\n", num_in_use, num_free);
1004 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
1009 static INLINE
void *
1010 tiny_malloc_from_free_list(szone_t
*szone
, msize_t msize
) {
1011 // Assumes we've locked the region
1014 grain_t slot
= msize
-1;
1015 free_list_t
**free_list
= szone
->tiny_free_list
;
1016 free_list_t
**the_slot
= free_list
+ slot
;
1020 next
= ((free_list_t
*)ptr
)->next
;
1022 next
->previous
= NULL
;
1023 free_list_set_checksum(szone
, next
);
1028 if (LOG(szone
,ptr
)) {
1029 malloc_printf("In tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr
, this_msize
);
1032 goto return_tiny_alloc
;
1034 // adjust slot based on bitmap
1035 unsigned bitmap
= szone
->tiny_bitmap
& ~ ((1 << slot
) - 1);
1036 if (! bitmap
) goto try_tiny_malloc_from_end
;
1037 slot
= BITMAP32_FFS(bitmap
) - 1;
1038 free_list_t
**limit
= free_list
+ NUM_TINY_SLOTS
- 1;
1040 while (free_list
< limit
) {
1041 // try bigger grains
1045 next
= ((free_list_t
*)ptr
)->next
;
1047 next
->previous
= NULL
;
1048 free_list_set_checksum(szone
, next
);
1051 this_msize
= TINY_FREE_SIZE(ptr
);
1053 if (LOG(szone
,ptr
)) {
1054 malloc_printf("In tiny_malloc_from_free_list(), bigger grain ptr=%p, msize=%d this_msize=%d\n", ptr
, msize
, this_msize
);
1057 goto add_leftover_and_proceed
;
1061 // we are now looking at the last slot (31)
1065 this_msize
= TINY_FREE_SIZE(ptr
);
1066 next
= ((free_list_t
*)ptr
)->next
;
1067 if (this_msize
- msize
>= NUM_TINY_SLOTS
) {
1068 // the leftover will go back to the free list, so we optimize by modifying the free list rather than removing the head and then adding back
1069 // malloc_printf("Allocation from largest tiny slot %p optimized\n", ptr);
1070 msize_t leftover_msize
= this_msize
- msize
;
1071 void *leftover_ptr
= ptr
+ (msize
<< SHIFT_TINY_QUANTUM
);
1072 *limit
= leftover_ptr
;
1074 next
->previous
= leftover_ptr
;
1075 free_list_set_checksum(szone
, next
);
1077 ((free_list_t
*)leftover_ptr
)->next
= next
;
1078 ((free_list_t
*)leftover_ptr
)->previous
= NULL
;
1079 free_list_set_checksum(szone
, leftover_ptr
);
1080 set_tiny_meta_header_free(leftover_ptr
, leftover_msize
);
1082 if (LOG(szone
,ptr
)) {
1083 malloc_printf("In tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr
, msize
, this_msize
);
1087 goto return_tiny_alloc
;
1091 next
->previous
= NULL
;
1092 free_list_set_checksum(szone
, next
);
1094 goto add_leftover_and_proceed
;
1096 try_tiny_malloc_from_end
:
1097 // Let's see if we can use szone->tiny_bytes_free_at_end
1098 if (szone
->tiny_bytes_free_at_end
>= (msize
<< SHIFT_TINY_QUANTUM
)) {
1099 ptr
= (void *)(TINY_REGION_END(szone
->tiny_regions
[szone
->num_tiny_regions
-1]) - szone
->tiny_bytes_free_at_end
);
1100 szone
->tiny_bytes_free_at_end
-= msize
<< SHIFT_TINY_QUANTUM
;
1101 if (szone
->tiny_bytes_free_at_end
) {
1102 // let's add an in use block after ptr to serve as boundary
1103 set_tiny_meta_header_in_use(ptr
+ (msize
<< SHIFT_TINY_QUANTUM
), 1);
1107 if (LOG(szone
,ptr
)) {
1108 malloc_printf("In tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr
, msize
);
1111 goto return_tiny_alloc
;
1114 add_leftover_and_proceed
:
1115 // malloc_printf("For msize=%d found tiny in free_list (slot=%d) this_msize=%d\n", msize, free_list - szone->tiny_free_list, this_msize);
1116 if (!this_msize
|| (this_msize
> msize
)) {
1117 msize_t leftover_msize
= this_msize
- msize
;
1118 void *leftover_ptr
= ptr
+ (msize
<< SHIFT_TINY_QUANTUM
);
1120 if (LOG(szone
,ptr
)) {
1121 malloc_printf("In tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr
, this_msize
);
1124 tiny_free_list_add_ptr(szone
, leftover_ptr
, leftover_msize
);
1128 szone
->num_tiny_objects
++;
1129 szone
->num_bytes_in_tiny_objects
+= this_msize
<< SHIFT_TINY_QUANTUM
;
1131 if (LOG(szone
,ptr
)) {
1132 malloc_printf("In tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr
, this_msize
, msize
);
1135 set_tiny_meta_header_in_use(ptr
, this_msize
);
1139 static INLINE
void *
1140 tiny_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
) {
1141 boolean_t locked
= 0;
1145 szone_error(szone
, "Invariant broken (!msize) in allocation (region)", NULL
);
1149 ptr
= (void *)szone
->last_tiny_free
;
1150 if ((((unsigned)ptr
) & (TINY_QUANTUM
- 1)) == msize
) {
1151 // we have a candidate - let's lock to make sure
1152 LOCK_AND_NOTE_LOCKED(szone
, locked
);
1153 if (ptr
== (void *)szone
->last_tiny_free
) {
1154 szone
->last_tiny_free
= NULL
;
1155 // malloc_printf("using last_tiny_free\n");
1156 SZONE_UNLOCK(szone
);
1157 CHECK(szone
, __PRETTY_FUNCTION__
);
1158 ptr
= (void *)((unsigned)ptr
& ~ (TINY_QUANTUM
- 1));
1159 if (cleared_requested
) {
1160 memset(ptr
, 0, msize
<< SHIFT_TINY_QUANTUM
);
1163 if (LOG(szone
,ptr
)) {
1164 malloc_printf("In tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr
, msize
);
1169 // malloc_printf("optimistic locking for last_tiny_free failed\n");
1172 // Except in rare occasions where we need to add a new region, we are going to end up locking, so we might as well lock right away to avoid doing unnecessary optimistic probes
1173 if (!locked
) LOCK_AND_NOTE_LOCKED(szone
, locked
);
1174 ptr
= tiny_malloc_from_free_list(szone
, msize
);
1175 // malloc_printf("tiny_malloc_from_free_list(%d) returned %p\n", msize, ptr);
1177 SZONE_UNLOCK(szone
);
1178 CHECK(szone
, __PRETTY_FUNCTION__
);
1179 if (cleared_requested
) {
1180 memset(ptr
, 0, msize
<< SHIFT_TINY_QUANTUM
);
1184 ptr
= tiny_malloc_from_region_no_lock(szone
, msize
);
1185 // malloc_printf("tiny_malloc_from_region_no_lock returned %p for msize=%d\n", ptr, msize);
1186 // we don't clear because this freshly allocated space is pristine
1187 SZONE_UNLOCK(szone
);
1188 CHECK(szone
, __PRETTY_FUNCTION__
);
1193 free_tiny(szone_t
*szone
, void *ptr
, tiny_region_t
*tiny_region
) {
1194 // ptr is known to be in tiny_region
1197 void *ptr2
= szone
->last_tiny_free
;
1198 if (ptr
== (void *)((unsigned)ptr2
& ~ (TINY_QUANTUM
- 1))) {
1199 szone_error(szone
, "Double free", ptr
);
1202 #endif /* TINY_CACHE */
1204 msize_t msize
= get_tiny_meta_header(ptr
, &is_free
);
1206 szone_error(szone
, "Double free", ptr
);
1209 // malloc_printf("%p[%x]\n", ptr, msize);
1212 malloc_printf("*** szone_free() block in use is too large: %p\n", ptr
);
1216 if (msize
< TINY_QUANTUM
) { // to see if the bits fit in the last 4 bits
1217 szone
->last_tiny_free
= (void *)(((unsigned)ptr
) | msize
);
1219 // malloc_printf("stuffing last_tiny_free\n");
1220 SZONE_UNLOCK(szone
);
1221 CHECK(szone
, __PRETTY_FUNCTION__
);
1224 // malloc_printf("replacing previous last_tiny_free %p with %p\n", ptr2, szone->last_tiny_free);
1225 msize
= (unsigned)ptr2
& (TINY_QUANTUM
- 1);
1226 ptr
= (void *)(((unsigned)ptr2
) & ~ (TINY_QUANTUM
- 1));
1227 tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
1229 szone_error(szone
, "Double free (tiny cache)", ptr
);
1233 tiny_free_no_lock(szone
, tiny_region
, ptr
, msize
);
1234 SZONE_UNLOCK(szone
);
1235 CHECK(szone
, __PRETTY_FUNCTION__
);
1239 print_tiny_free_list(szone_t
*szone
) {
1241 malloc_printf("Tiny free sizes: ");
1242 while (slot
< NUM_TINY_SLOTS
) {
1243 free_list_t
*ptr
= szone
->tiny_free_list
[slot
];
1245 malloc_printf("%s%y[%d]; ", (slot
== NUM_TINY_SLOTS
-1) ? ">=" : "", (slot
+1)*TINY_QUANTUM
, free_list_count(ptr
));
1249 malloc_printf("\n");
1253 print_tiny_region(boolean_t verbose
, tiny_region_t region
, size_t bytes_at_end
) {
1254 unsigned counts
[1024];
1255 unsigned in_use
= 0;
1256 vm_address_t start
= TINY_REGION_ADDRESS(region
);
1257 vm_address_t current
= start
;
1258 vm_address_t limit
= TINY_REGION_END(region
) - bytes_at_end
;
1259 memset(counts
, 0, 1024 * sizeof(unsigned));
1260 while (current
< limit
) {
1262 msize_t msize
= get_tiny_meta_header((void *)current
, &is_free
);
1263 // malloc_printf("%p [%d %d]; ", current, msize, is_free);
1264 if (is_free
& !msize
&& (current
== start
)) {
1265 // first block is all free
1269 malloc_printf("*** Error with %p: msize=%d\n", current
, msize
);
1274 if (msize
> 32) malloc_printf("*** Error at %p msize for in_use is %d\n", current
, msize
);
1275 if (msize
< 1024) counts
[msize
]++;
1278 current
+= msize
<< SHIFT_TINY_QUANTUM
;
1280 malloc_printf("Tiny region [%p-%p, %y]\t", start
, TINY_REGION_END(region
), (int)TINY_REGION_SIZE
);
1281 malloc_printf("In_use=%d ", in_use
);
1282 if (bytes_at_end
) malloc_printf("Untouched=%y ", bytes_at_end
);
1283 if (verbose
&& in_use
) {
1285 malloc_printf("\n\tSizes in use: ");
1288 malloc_printf("%d[%d] ", ci
<< SHIFT_TINY_QUANTUM
, counts
[ci
]);
1293 malloc_printf("\n");
1297 tiny_free_list_check(szone_t
*szone
, grain_t slot
) {
1298 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1300 free_list_t
*ptr
= szone
->tiny_free_list
[slot
];
1301 free_list_t
*previous
= NULL
;
1303 free_list_checksum(szone
, ptr
, __PRETTY_FUNCTION__
);
1304 boolean_t is_free
= tiny_meta_header_is_free(ptr
);
1306 malloc_printf("*** malloc[%d]: In-use ptr in free list slot=%d count=%d ptr=%p\n", getpid(), slot
, count
, ptr
);
1309 if (((unsigned)ptr
) & (TINY_QUANTUM
- 1)) {
1310 malloc_printf("*** malloc[%d]: Unaligned ptr in free list slot=%d count=%d ptr=%p\n", getpid(), slot
, count
, ptr
);
1313 if (!tiny_region_for_ptr_no_lock(szone
, ptr
)) {
1314 malloc_printf("*** malloc[%d]: Ptr not in szone slot=%d count=%d ptr=%p\n", getpid(), slot
, count
, ptr
);
1317 if (ptr
->previous
!= previous
) {
1318 malloc_printf("*** malloc[%d]: Previous incorrectly set slot=%d count=%d ptr=%p\n", getpid(), slot
, count
, ptr
);
1325 // malloc_printf("tiny_free_list_check passed\n");
1329 /********************* SMALL FREE LIST UTILITIES ************************/
1331 static INLINE msize_t
*
1332 small_meta_headers(const void *ptr
) {
1333 // returns address of meta info
1334 unsigned short shifted_base
= ((unsigned)ptr
) >> SMALL_BLOCKS_ALIGN
;
1335 unsigned headers_start
= (shifted_base
+ 1) << SMALL_BLOCKS_ALIGN
;
1336 return (msize_t
*)headers_start
;
1339 static INLINE msize_t
1340 small_meta_index(const void *ptr
) {
1341 // returns address of meta info
1342 return (((unsigned)ptr
) >> SHIFT_SMALL_QUANTUM
) & (NUM_SMALL_BLOCKS
- 1);
1345 static INLINE msize_t
*
1346 small_meta_header(const void *ptr
) {
1347 // returns address of meta info
1348 msize_t
*meta_headers
= small_meta_headers(ptr
);
1349 msize_t index
= small_meta_index(ptr
);
1350 return meta_headers
+ index
;
1354 small_meta_header_set_is_free(msize_t
*meta_headers
, msize_t index
, msize_t msize
) {
1355 // Indicates that the meta_header for index says 'is free'
1356 meta_headers
[index
] = msize
| SMALL_IS_FREE
;
1360 small_meta_header_set_in_use(msize_t
*meta_headers
, msize_t index
, msize_t msize
) {
1361 // Indicates that the meta_header for index says 'in use'
1362 meta_headers
[index
] = msize
;
1366 small_meta_header_set_middle(msize_t
*meta_headers
, msize_t index
) {
1367 // Indicates that the meta_header for index says 'in the middle of a block'
1368 meta_headers
[index
] = 0;
1372 small_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
1373 // Adds an item to the proper free list
1374 // Also marks the header of the block properly
1375 // Assumes szone has been locked
1376 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1377 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
1378 free_list_t
*free_ptr
= ptr
;
1379 free_list_t
*free_head
= szone
->small_free_list
[grain
];
1381 if (LOG(szone
,ptr
)) {
1382 malloc_printf("In small_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
1384 if (((unsigned)ptr
) & (SMALL_QUANTUM
- 1)) {
1385 szone_error(szone
, "small_free_list_add_ptr: Unaligned ptr", ptr
);
1389 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
1391 if (free_head
->previous
) {
1392 malloc_printf("ptr=%p grain=%d free_head=%p previous=%p\n", ptr
, grain
, free_head
, free_head
->previous
);
1393 szone_error(szone
, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr
);
1395 if (!(small_meta_header(free_head
)[0] & SMALL_IS_FREE
)) {
1396 malloc_printf("ptr=%p grain=%d free_head=%p\n", ptr
, grain
, free_head
);
1397 szone_error(szone
, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr
);
1400 free_head
->previous
= free_ptr
;
1401 free_list_set_checksum(szone
, free_head
);
1403 BITMAP32_SET(szone
->small_bitmap
, grain
);
1405 free_ptr
->previous
= NULL
;
1406 free_ptr
->next
= free_head
;
1407 free_list_set_checksum(szone
, free_ptr
);
1408 szone
->small_free_list
[grain
] = free_ptr
;
1409 void *follower
= ptr
+ (msize
<< SHIFT_SMALL_QUANTUM
);
1410 SMALL_PREVIOUS_MSIZE(follower
) = msize
;
1414 small_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
1415 // Removes item in the proper free list
1416 // msize could be read, but all callers have it so we pass it in
1417 // Assumes szone has been locked
1418 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1419 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
1420 free_list_t
*free_ptr
= ptr
;
1421 free_list_t
*next
= free_ptr
->next
;
1422 free_list_t
*previous
= free_ptr
->previous
;
1424 if (LOG(szone
,ptr
)) {
1425 malloc_printf("In small_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
1428 free_list_checksum(szone
, free_ptr
, __PRETTY_FUNCTION__
);
1431 if (szone
->small_free_list
[grain
] != ptr
) {
1432 malloc_printf("ptr=%p grain=%d msize=%d szone->small_free_list[grain]=%p\n", ptr
, grain
, msize
, szone
->small_free_list
[grain
]);
1433 szone_error(szone
, "small_free_list_remove_ptr: Internal invariant broken (szone->small_free_list[grain])", ptr
);
1437 szone
->small_free_list
[grain
] = next
;
1438 if (!next
) BITMAP32_CLR(szone
->small_bitmap
, grain
);
1440 previous
->next
= next
;
1441 free_list_set_checksum(szone
, previous
);
1444 next
->previous
= previous
;
1445 free_list_set_checksum(szone
, next
);
1449 static INLINE small_region_t
*
1450 small_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
) {
1451 small_region_t
*region
= szone
->small_regions
;
1452 unsigned num_regions
= szone
->num_small_regions
;
1453 unsigned ptr_shifted
= ((unsigned)ptr
) >> SMALL_BLOCKS_ALIGN
;
1454 while (num_regions
--) {
1455 small_region_t
this = *region
;
1456 if (ptr_shifted
== this) return region
;
1463 small_free_no_lock(szone_t
*szone
, small_region_t
*region
, void *ptr
, msize_t msize
) {
1465 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1466 msize_t
*meta_headers
= small_meta_headers(ptr
);
1467 msize_t index
= small_meta_index(ptr
);
1468 size_t original_size
= msize
<< SHIFT_SMALL_QUANTUM
;
1469 void *next_block
= ((char *)ptr
+ original_size
);
1470 msize_t next_index
= index
+ msize
;
1472 if (LOG(szone
,ptr
)) {
1473 malloc_printf("In small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1476 malloc_printf("In small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1477 szone_error(szone
, "Trying to free small block that is too small", ptr
);
1479 // printf("In small_free_no_lock %p - msize=%d\n", ptr, msize);
1481 // We try to coalesce this block with the preceeding one
1482 if (index
&& (SMALL_PREVIOUS_MSIZE(ptr
) <= index
)) {
1483 msize_t previous_msize
= SMALL_PREVIOUS_MSIZE(ptr
);
1484 if (meta_headers
[index
- previous_msize
] == (previous_msize
| SMALL_IS_FREE
)) {
1485 void *previous
= ptr
- (previous_msize
<< SHIFT_SMALL_QUANTUM
);
1486 // previous is really to be coalesced
1488 if (LOG(szone
, ptr
) || LOG(szone
,previous
)) {
1489 malloc_printf("In small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr
, previous
);
1492 // malloc_printf("In small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous);
1493 small_free_list_remove_ptr(szone
, previous
, previous_msize
);
1494 small_meta_header_set_middle(meta_headers
, index
);
1496 msize
+= previous_msize
;
1497 index
-= previous_msize
;
1500 // We try to coalesce with the next block
1501 if (((vm_address_t
)next_block
< SMALL_REGION_END(*region
)) && (meta_headers
[next_index
] & SMALL_IS_FREE
)) {
1502 // next block is free, we coalesce
1503 msize_t next_msize
= meta_headers
[next_index
] & ~ SMALL_IS_FREE
;
1505 if (LOG(szone
,ptr
)) malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr
, msize
, next_block
, next_msize
);
1507 // malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr, msize, next_block, next_msize);
1508 small_free_list_remove_ptr(szone
, next_block
, next_msize
);
1509 small_meta_header_set_middle(meta_headers
, next_index
);
1510 msize
+= next_msize
;
1512 if (szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) {
1514 szone_error(szone
, "Incorrect size information - block header was damaged", ptr
);
1516 memset(ptr
, 0x55, (msize
<< SHIFT_SMALL_QUANTUM
));
1519 small_free_list_add_ptr(szone
, ptr
, msize
);
1520 small_meta_header_set_is_free(meta_headers
, index
, msize
);
1521 szone
->num_small_objects
--;
1522 szone
->num_bytes_in_small_objects
-= original_size
; // we use original_size and not msize to avoid double counting the coalesced blocks
1526 small_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
) {
1527 // Allocates from the last region or a freshly allocated region
1528 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1529 // Before anything we transform the small_bytes_free_at_end - if any - to a regular free block
1530 if (szone
->small_bytes_free_at_end
) {
1531 small_region_t last_region
= szone
->small_regions
[szone
->num_small_regions
- 1];
1532 void *last_block
= (void *)(SMALL_REGION_END(last_region
) - szone
->small_bytes_free_at_end
);
1533 small_free_list_add_ptr(szone
, last_block
, szone
->small_bytes_free_at_end
>> SHIFT_SMALL_QUANTUM
);
1534 small_meta_header(last_block
)[0] = (szone
->small_bytes_free_at_end
>> SHIFT_SMALL_QUANTUM
) | SMALL_IS_FREE
;
1535 szone
->small_bytes_free_at_end
= 0;
1538 // time to create a new region
1539 vm_address_t new_address
= allocate_pages(szone
, SMALL_REGION_SIZE
, SMALL_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL
));
1544 ptr
= (void *)new_address
;
1545 msize_t
*meta_headers
= small_meta_headers(ptr
);
1547 // malloc_printf("Allocated small region #%d: %p [%y]\n", szone->num_small_regions, new_address, SMALL_REGION_SIZE);
1548 if (szone
->num_small_regions
== INITIAL_NUM_SMALL_REGIONS
) {
1549 // time to grow the number of regions
1550 unsigned region_capacity
= (1 << (32 - SMALL_BLOCKS_ALIGN
)) - 20; // that is for sure the maximum number of small regions we can have
1551 msize_t new_msize
= (region_capacity
* sizeof(small_region_t
) + SMALL_QUANTUM
- 1) / SMALL_QUANTUM
;
1552 small_region_t
*new_regions
= ptr
;
1553 // malloc_printf("Now %d small_regions growing regions %p to %d msize=%d\n", szone->num_small_regions + 1, szone->small_regions, region_capacity, new_msize);
1554 small_meta_header_set_in_use(meta_headers
, index
, new_msize
);
1555 szone
->num_small_objects
++;
1556 szone
->num_bytes_in_small_objects
+= new_msize
<< SHIFT_SMALL_QUANTUM
;
1557 memcpy(new_regions
, szone
->small_regions
, INITIAL_NUM_SMALL_REGIONS
* sizeof(small_region_t
));
1558 // We intentionally leak the previous regions pointer to avoid multi-threading crashes if another thread was reading it (unlocked) while we are changing it.
1559 szone
->small_regions
= new_regions
; // note we set this pointer after it's all set
1560 ptr
+= new_msize
<< SHIFT_SMALL_QUANTUM
;
1562 // malloc_printf("Regions is now %p next ptr is %p\n", szone->small_regions, ptr);
1564 szone
->small_regions
[szone
->num_small_regions
] = new_address
>> SMALL_BLOCKS_ALIGN
;
1565 szone
->num_small_regions
++; // we bump the number of regions AFTER we have changes the regions pointer to enable finding a small region without taking the lock
1566 // malloc_printf("Now %d small regions\n", szone->num_small_regions);
1567 small_meta_header_set_in_use(meta_headers
, index
, msize
);
1568 msize_t msize_left
= NUM_SMALL_BLOCKS
- index
;
1569 szone
->num_small_objects
++;
1570 szone
->num_bytes_in_small_objects
+= msize
<< SHIFT_SMALL_QUANTUM
;
1571 // add a big free block
1572 index
+= msize
; msize_left
-= msize
;
1573 meta_headers
[index
] = msize_left
;
1574 szone
->small_bytes_free_at_end
= msize_left
<< SHIFT_SMALL_QUANTUM
;
1575 // malloc_printf("small_bytes_free_at_end set to %d\n", szone-> small_bytes_free_at_end);
1580 szone_check_small_region(szone_t
*szone
, small_region_t
*region
) {
1581 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1582 void *ptr
= (void *)SMALL_REGION_ADDRESS(*region
);
1583 msize_t
*meta_headers
= small_meta_headers(ptr
);
1584 vm_address_t region_end
= SMALL_REGION_END(*region
);
1585 msize_t prev_free
= 0;
1586 if (region
== szone
->small_regions
+ szone
->num_small_regions
- 1) region_end
-= szone
->small_bytes_free_at_end
;
1587 while ((vm_address_t
)ptr
< region_end
) {
1588 msize_t index
= small_meta_index(ptr
);
1589 msize_t msize_and_free
= meta_headers
[index
];
1590 if (! (msize_and_free
& SMALL_IS_FREE
)) {
1592 msize_t msize
= msize_and_free
;
1594 malloc_printf("*** malloc[%d]: invariant broken: null msize ptr=%p region#=%d num_small_regions=%d end=%p\n", getpid(), ptr
, region
- szone
->small_regions
, szone
->num_small_regions
, (void *)region_end
);
1597 if (msize
> (LARGE_THRESHOLD
/ SMALL_QUANTUM
)) {
1598 malloc_printf("*** malloc[%d]: invariant broken for %p this small msize=%d - size is too large\n", getpid(), ptr
, msize_and_free
);
1601 ptr
+= msize
<< SHIFT_SMALL_QUANTUM
;
1605 msize_t msize
= msize_and_free
& ~ SMALL_IS_FREE
;
1606 free_list_t
*free_head
= ptr
;
1607 msize_t
*follower
= (void *)FOLLOWING_SMALL_PTR(ptr
, msize
);
1609 malloc_printf("*** malloc[%d]: invariant broken for free block %p this msize=%d\n", getpid(), ptr
, msize
);
1613 malloc_printf("*** malloc[%d]: invariant broken for %p (2 free in a row)\n", getpid(), ptr
);
1616 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
1617 if (free_head
->previous
&& !(small_meta_header(free_head
->previous
)[0] & SMALL_IS_FREE
)) {
1618 malloc_printf("*** malloc[%d]: invariant broken for %p (previous %p is not a free pointer)\n", getpid(), ptr
, free_head
->previous
);
1621 if (free_head
->next
&& !(small_meta_header(free_head
->next
)[0] & SMALL_IS_FREE
)) {
1622 malloc_printf("*** malloc[%d]: invariant broken for %p (next is not a free pointer)\n", getpid(), ptr
);
1625 if (SMALL_PREVIOUS_MSIZE(follower
) != msize
) {
1626 malloc_printf("*** malloc[%d]: invariant broken for small free %p followed by %p in region [%p-%p] (end marker incorrect) should be %d; in fact %d\n", getpid(), ptr
, follower
, SMALL_REGION_ADDRESS(*region
), region_end
, msize
, SMALL_PREVIOUS_MSIZE(follower
));
1630 prev_free
= SMALL_IS_FREE
;
1636 static kern_return_t
1637 small_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t small_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
1638 small_region_t
*regions
;
1640 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
1643 err
= reader(task
, region_address
, sizeof(small_region_t
) * num_regions
, (void **)®ions
);
1644 if (err
) return err
;
1645 while (index
< num_regions
) {
1646 small_region_t region
= regions
[index
];
1647 vm_range_t range
= {SMALL_REGION_ADDRESS(region
), SMALL_REGION_SIZE
};
1648 // malloc_printf("Enumerating small ptrs for Region starting at %p\n", range.address);
1649 if (type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) {
1650 vm_range_t admin_range
= {range
.address
+ (1 << SMALL_BLOCKS_ALIGN
), range
.size
- (1 << SMALL_BLOCKS_ALIGN
)};
1651 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &admin_range
, 1);
1653 if (type_mask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
)) {
1654 vm_range_t ptr_range
= {range
.address
, 1 << SMALL_BLOCKS_ALIGN
};
1655 recorder(task
, context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptr_range
, 1);
1657 if (type_mask
& MALLOC_PTR_IN_USE_RANGE_TYPE
) {
1658 unsigned char *mapped_region
;
1659 err
= reader(task
, range
.address
, range
.size
, (void **)&mapped_region
);
1660 if (err
) return err
;
1661 msize_t
*block_header
= (msize_t
*)(mapped_region
+ (1 << SMALL_BLOCKS_ALIGN
));
1662 unsigned block_index
= 0;
1663 unsigned block_limit
= NUM_SMALL_BLOCKS
;
1664 if (index
== num_regions
- 1)
1665 block_limit
-= (small_bytes_free_at_end
>> SHIFT_SMALL_QUANTUM
);
1666 while (block_index
< block_limit
) {
1667 msize_t msize_and_free
= block_header
[block_index
];
1668 msize_t msize
= msize_and_free
& ~ SMALL_IS_FREE
;
1669 if (! (msize_and_free
& SMALL_IS_FREE
)) {
1671 buffer
[count
].address
= range
.address
+ (block_index
<< SHIFT_SMALL_QUANTUM
);
1672 buffer
[count
].size
= msize
<< SHIFT_SMALL_QUANTUM
;
1674 if (count
>= MAX_RECORDER_BUFFER
) {
1675 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
1679 block_index
+= msize
;
1681 // malloc_printf("End small region - count=%d\n", count);
1686 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
1691 static INLINE
void *
1692 small_malloc_from_free_list(szone_t
*szone
, msize_t msize
) {
1694 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1695 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
1696 unsigned bitmap
= szone
->small_bitmap
& ~ ((1 << grain
) - 1);
1699 if (!bitmap
) goto try_small_from_end
;
1700 grain
= BITMAP32_FFS(bitmap
) - 1;
1701 // first try the small grains
1702 free_list_t
**free_list
;
1703 free_list_t
**limit
= szone
->small_free_list
+ NUM_SMALL_SLOTS
- 1;
1704 free_list
= szone
->small_free_list
+ grain
;
1705 while (free_list
< limit
) {
1706 // try bigger grains
1710 next
= ((free_list_t
*)ptr
)->next
;
1712 next
->previous
= NULL
;
1713 free_list_set_checksum(szone
, next
);
1716 this_msize
= small_meta_header(ptr
)[0] & ~ SMALL_IS_FREE
;
1717 // malloc_printf("small_malloc_from_free_list: allocated from free list\n");
1718 goto add_leftover_and_proceed
;
1722 // We now check the large grains for one that is big enough
1725 this_msize
= small_meta_header(ptr
)[0] & ~ SMALL_IS_FREE
;
1726 if (this_msize
>= msize
) {
1727 // malloc_printf("small_malloc_from_free_list: allocated from last free list\n");
1728 small_free_list_remove_ptr(szone
, ptr
, this_msize
);
1729 goto add_leftover_and_proceed
;
1731 ptr
= ((free_list_t
*)ptr
)->next
;
1734 // Let's see if we can use szone->small_bytes_free_at_end
1735 // malloc_printf("Found nothing in free list small_bytes_free_at_end=%y\n", szone-> small_bytes_free_at_end);
1736 if (szone
->small_bytes_free_at_end
>= (msize
<< SHIFT_SMALL_QUANTUM
)) {
1737 ptr
= (void *)(SMALL_REGION_END(szone
->small_regions
[szone
->num_small_regions
-1]) - szone
->small_bytes_free_at_end
);
1738 szone
->small_bytes_free_at_end
-= msize
<< SHIFT_SMALL_QUANTUM
;
1739 if (szone
->small_bytes_free_at_end
) {
1740 // let's mark this block as in use to serve as boundary
1741 small_meta_header(ptr
+ (msize
<< SHIFT_SMALL_QUANTUM
))[0] = szone
->small_bytes_free_at_end
>> SHIFT_SMALL_QUANTUM
;
1744 goto return_small_alloc
;
1747 add_leftover_and_proceed
:
1748 if (this_msize
> msize
) {
1749 msize_t leftover_msize
= this_msize
- msize
;
1750 void *leftover_ptr
= ptr
+ (msize
<< SHIFT_SMALL_QUANTUM
);
1752 if (LOG(szone
,ptr
)) {
1753 malloc_printf("In small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr
, this_msize
);
1756 small_free_list_add_ptr(szone
, leftover_ptr
, leftover_msize
);
1757 msize_t
*meta_headers
= small_meta_headers(leftover_ptr
);
1758 msize_t leftover_index
= small_meta_index(leftover_ptr
);
1759 small_meta_header_set_is_free(meta_headers
, leftover_index
, leftover_msize
);
1763 szone
->num_small_objects
++;
1764 szone
->num_bytes_in_small_objects
+= this_msize
<< SHIFT_SMALL_QUANTUM
;
1766 if (LOG(szone
,ptr
)) {
1767 malloc_printf("In small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr
, this_msize
, msize
);
1770 small_meta_header(ptr
)[0] = this_msize
;
1774 static INLINE
void *
1775 small_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
) {
1776 boolean_t locked
= 0;
1779 ptr
= (void *)szone
->last_small_free
;
1780 if ((((unsigned)ptr
) & (SMALL_QUANTUM
- 1)) == msize
) {
1781 // we have a candidate - let's lock to make sure
1782 LOCK_AND_NOTE_LOCKED(szone
, locked
);
1783 if (ptr
== (void *)szone
->last_small_free
) {
1784 szone
->last_small_free
= NULL
;
1785 // malloc_printf("using last_small_free\n");
1786 SZONE_UNLOCK(szone
);
1787 CHECK(szone
, __PRETTY_FUNCTION__
);
1788 ptr
= (void *)((unsigned)ptr
& ~ (SMALL_QUANTUM
- 1));
1789 if (cleared_requested
) {
1790 memset(ptr
, 0, msize
<< SHIFT_SMALL_QUANTUM
);
1794 // malloc_printf("optimistic locking for last_small_free failed\n");
1797 // Except in rare occasions where we need to add a new region, we are going to end up locking, so we might as well lock right away to avoid doing unnecessary optimistic probes
1798 if (!locked
) LOCK_AND_NOTE_LOCKED(szone
, locked
);
1799 ptr
= small_malloc_from_free_list(szone
, msize
);
1801 SZONE_UNLOCK(szone
);
1802 CHECK(szone
, __PRETTY_FUNCTION__
);
1803 if (cleared_requested
) {
1804 memset(ptr
, 0, msize
<< SHIFT_SMALL_QUANTUM
);
1808 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
1809 // we don't clear because this freshly allocated space is pristine
1810 SZONE_UNLOCK(szone
);
1811 CHECK(szone
, __PRETTY_FUNCTION__
);
1815 static INLINE
void *
1816 small_malloc_cleared_no_lock(szone_t
*szone
, msize_t msize
) {
1817 // tries to allocate a small, cleared block
1818 // Assumes already locked
1819 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1821 ptr
= small_malloc_from_free_list(szone
, msize
);
1823 memset(ptr
, 0, msize
<< SHIFT_SMALL_QUANTUM
);
1826 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
1827 // we don't clear because this freshly allocated space is pristine
1833 free_small(szone_t
*szone
, void *ptr
, small_region_t
*small_region
) {
1834 // ptr is known to be in small_region
1835 msize_t msize_and_free
;
1836 msize_and_free
= small_meta_header(ptr
)[0];
1837 if (msize_and_free
& SMALL_IS_FREE
) {
1838 szone_error(szone
, "Object already freed being freed", ptr
);
1841 CHECK(szone
, __PRETTY_FUNCTION__
);
1842 // malloc_printf("%p[%x]\n", ptr, msize_and_free);
1845 void *ptr2
= szone
->last_small_free
;
1846 szone
->last_small_free
= (void *)(((unsigned)ptr
) | msize_and_free
);
1848 // malloc_printf("stuffing last_small_free\n");
1849 SZONE_UNLOCK(szone
);
1850 CHECK(szone
, __PRETTY_FUNCTION__
);
1853 // malloc_printf("replacing previous last_small_free %p with %p\n", ptr2, szone->last_small_free);
1854 msize_and_free
= (unsigned)ptr2
& (SMALL_QUANTUM
- 1);
1855 ptr
= (void *)(((unsigned)ptr2
) & ~ (SMALL_QUANTUM
- 1));
1856 small_region
= small_region_for_ptr_no_lock(szone
, ptr
);
1857 if (!small_region
) {
1858 szone_error(szone
, "Double free (small cache)", ptr
);
1861 small_free_no_lock(szone
, small_region
, ptr
, msize_and_free
);
1862 SZONE_UNLOCK(szone
);
1863 CHECK(szone
, __PRETTY_FUNCTION__
);
1867 print_small_free_list(szone_t
*szone
) {
1869 malloc_printf("Small free sizes: ");
1870 while (grain
< NUM_SMALL_SLOTS
) {
1871 free_list_t
*ptr
= szone
->small_free_list
[grain
];
1873 malloc_printf("%s%y[%d]; ", (grain
== NUM_SMALL_SLOTS
-1) ? ">=" : "", (grain
+ 1) * SMALL_QUANTUM
, free_list_count(ptr
));
1877 malloc_printf("\n");
1881 print_small_region(szone_t
*szone
, boolean_t verbose
, small_region_t
*region
, size_t bytes_at_end
) {
1882 unsigned counts
[1024];
1883 unsigned in_use
= 0;
1884 vm_address_t start
= SMALL_REGION_ADDRESS(*region
);
1885 vm_address_t limit
= SMALL_REGION_END(*region
) - bytes_at_end
;
1886 memset(counts
, 0, 1024 * sizeof(unsigned));
1887 while (start
< limit
) {
1888 msize_t msize_and_free
= small_meta_header((void *)start
)[0];
1889 msize_t msize
= msize_and_free
& ~ SMALL_IS_FREE
;
1890 if (!(msize_and_free
& SMALL_IS_FREE
)) {
1892 if (msize
< 1024) counts
[msize
]++;
1895 start
+= msize
<< SHIFT_SMALL_QUANTUM
;
1897 malloc_printf("Small region [%p-%p, %y]\tIn_use=%d ", SMALL_REGION_ADDRESS(*region
), SMALL_REGION_END(*region
), (int)SMALL_REGION_SIZE
, in_use
);
1898 if (bytes_at_end
) malloc_printf("Untouched=%y ", bytes_at_end
);
1899 if (verbose
&& in_use
) {
1901 malloc_printf("\n\tSizes in use: ");
1904 malloc_printf("%d[%d] ", ci
<< SHIFT_SMALL_QUANTUM
, counts
[ci
]);
1909 malloc_printf("\n");
1913 small_free_list_check(szone_t
*szone
, grain_t grain
) {
1914 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1916 free_list_t
*ptr
= szone
->small_free_list
[grain
];
1917 free_list_t
*previous
= NULL
;
1919 msize_t msize_and_free
= small_meta_header(ptr
)[0];
1921 if (!(msize_and_free
& SMALL_IS_FREE
)) {
1922 malloc_printf("*** malloc[%d]: In-use ptr in free list grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
1925 if (((unsigned)ptr
) & (SMALL_QUANTUM
- 1)) {
1926 malloc_printf("*** malloc[%d]: Unaligned ptr in free list grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
1929 if (!small_region_for_ptr_no_lock(szone
, ptr
)) {
1930 malloc_printf("*** malloc[%d]: Ptr not in szone grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
1933 free_list_checksum(szone
, ptr
, __PRETTY_FUNCTION__
);
1934 if (ptr
->previous
!= previous
) {
1935 malloc_printf("*** malloc[%d]: Previous incorrectly set grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
1944 /********************* LARGE ENTRY UTILITIES ************************/
1949 large_debug_print(szone_t
*szone
) {
1950 unsigned num_large_entries
= szone
->num_large_entries
;
1951 unsigned index
= num_large_entries
;
1953 large_entry_t
*range
= szone
->large_entries
+ index
;
1954 large_entry_t entry
= *range
;
1955 if (!LARGE_ENTRY_IS_EMPTY(entry
)) {
1956 malloc_printf("%d: %p(%y); ", index
, LARGE_ENTRY_ADDRESS(entry
), LARGE_ENTRY_SIZE(entry
));
1959 malloc_printf("\n");
1963 static large_entry_t
*
1964 large_entry_for_pointer_no_lock(szone_t
*szone
,
1966 // result only valid during a lock
1967 unsigned num_large_entries
= szone
->num_large_entries
;
1968 unsigned hash_index
;
1970 if (!num_large_entries
) return NULL
;
1971 hash_index
= ((unsigned)ptr
>> vm_page_shift
) % num_large_entries
;
1974 large_entry_t
*range
= szone
->large_entries
+ index
;
1975 large_entry_t entry
= *range
;
1976 if (LARGE_ENTRY_MATCHES(entry
, ptr
)) return range
;
1977 if (LARGE_ENTRY_IS_EMPTY(entry
)) return NULL
; // end of chain
1978 index
++; if (index
== num_large_entries
) index
= 0;
1979 } while (index
!= hash_index
);
1984 large_entry_insert_no_lock(szone_t
*szone
, large_entry_t range
) {
1985 unsigned num_large_entries
= szone
->num_large_entries
;
1986 unsigned hash_index
= (range
.address_and_num_pages
>> vm_page_shift
)
1987 % num_large_entries
;
1988 unsigned index
= hash_index
;
1989 // malloc_printf("Before insertion of %p\n", LARGE_ENTRY_ADDRESS(range));
1991 large_entry_t
*entry
= szone
->large_entries
+ index
;
1992 if (LARGE_ENTRY_IS_EMPTY(*entry
)) {
1994 return; // end of chain
1996 index
++; if (index
== num_large_entries
) index
= 0;
1997 } while (index
!= hash_index
);
2001 large_entries_rehash_after_entry_no_lock(szone_t
*szone
,
2002 large_entry_t
*entry
) {
2003 unsigned num_large_entries
= szone
->num_large_entries
;
2004 unsigned hash_index
= entry
- szone
->large_entries
;
2005 unsigned index
= hash_index
;
2007 large_entry_t range
;
2008 index
++; if (index
== num_large_entries
) index
= 0;
2009 range
= szone
->large_entries
[index
];
2010 if (LARGE_ENTRY_IS_EMPTY(range
)) return;
2011 szone
->large_entries
[index
].address_and_num_pages
= 0;
2012 large_entry_insert_no_lock(szone
, range
); // this will reinsert in the
2014 } while (index
!= hash_index
);
2017 static INLINE large_entry_t
*
2018 large_entries_alloc_no_lock(szone_t
*szone
,
2020 size_t size
= num
* sizeof(large_entry_t
);
2021 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
2022 if (is_vm_allocation
) {
2023 // Note that we allocate memory (via a system call) under a spin lock
2024 // That is certainly evil, however it's very rare in the lifetime of a process
2025 // The alternative would slow down the normal case
2026 return (void *)allocate_pages(szone
, round_page(size
), 0, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
2028 return small_malloc_cleared_no_lock(szone
, (size
+ SMALL_QUANTUM
- 1) >> SHIFT_SMALL_QUANTUM
);
2033 large_entries_free_no_lock(szone_t
*szone
, large_entry_t
*entries
, unsigned num
, vm_range_t
*range_to_deallocate
) {
2034 // returns range to deallocate
2035 size_t size
= num
* sizeof(large_entry_t
);
2036 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
2037 // malloc_printf("In large_entries_free_no_lock %d %d\n", num, is_vm_allocation);
2038 if (is_vm_allocation
) {
2039 range_to_deallocate
->address
= (vm_address_t
)entries
;
2040 range_to_deallocate
->size
= round_page(size
);
2042 range_to_deallocate
->size
= 0;
2043 small_region_t
*region
= small_region_for_ptr_no_lock(szone
, entries
);
2044 msize_t msize_and_free
= small_meta_header(entries
)[0];
2045 if (msize_and_free
& SMALL_IS_FREE
) {
2046 szone_error(szone
, "Object already freed being freed", entries
);
2049 small_free_no_lock(szone
, region
, entries
, msize_and_free
);
2054 large_entries_grow_no_lock(szone_t
*szone
, vm_range_t
*range_to_deallocate
) {
2055 // sets range_to_deallocate
2056 unsigned old_num_entries
= szone
->num_large_entries
;
2057 large_entry_t
*old_entries
= szone
->large_entries
;
2058 unsigned new_num_entries
= (old_num_entries
) ? old_num_entries
2059 * 2 + 1 : 63; // always an odd number for good hashing
2060 large_entry_t
*new_entries
= large_entries_alloc_no_lock(szone
, new_num_entries
);
2061 unsigned index
= old_num_entries
;
2062 szone
->num_large_entries
= new_num_entries
;
2063 szone
->large_entries
= new_entries
;
2064 // malloc_printf("_grow_large_entries old_num_entries=%d new_num_entries=%d %p\n", old_num_entries, new_num_entries, old_entries);
2066 large_entry_t oldRange
= old_entries
[index
];
2067 if (!LARGE_ENTRY_IS_EMPTY(oldRange
)) {
2068 large_entry_insert_no_lock(szone
, oldRange
);
2072 large_entries_free_no_lock(szone
, old_entries
, old_num_entries
, range_to_deallocate
);
2074 range_to_deallocate
->size
= 0;
2079 large_free_no_lock(szone_t
*szone
, large_entry_t
*entry
) {
2080 // frees the specific entry in the size table
2081 // returns a range to truly deallocate
2083 range
.address
= LARGE_ENTRY_ADDRESS(*entry
);
2084 range
.size
= LARGE_ENTRY_SIZE(*entry
);
2085 szone
->num_large_objects_in_use
--;
2086 szone
->num_bytes_in_large_objects
-= range
.size
;
2087 if (szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) {
2088 protect(szone
, range
.address
, range
.size
, VM_PROT_READ
| VM_PROT_WRITE
,
2089 szone
->debug_flags
);
2090 range
.address
-= 1 << vm_page_shift
;
2091 range
.size
+= 2 * (1 << vm_page_shift
);
2093 // malloc_printf("Entry is %p=%d; cache is %p ; found=%p\n", entry,
2094 // entry-szone->large_entries, szone->large_entries,
2095 // large_entry_for_pointer_no_lock(szone, (void *)range.address));
2096 entry
->address_and_num_pages
= 0;
2097 large_entries_rehash_after_entry_no_lock(szone
, entry
);
2099 if (large_entry_for_pointer_no_lock(szone
, (void *)range
.address
)) {
2100 malloc_printf("*** malloc[%d]: Freed entry %p still in use; "
2101 "num_large_entries=%d\n", getpid(), range
.address
,
2102 szone
->num_large_entries
);
2103 large_debug_print(szone
);
2110 static INLINE boolean_t
2111 try_realloc_small_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) {
2112 // returns 1 on success
2113 msize_t
*meta_headers
= small_meta_headers(ptr
);
2114 msize_t index
= small_meta_index(ptr
);
2115 msize_t old_msize
= old_size
>> SHIFT_SMALL_QUANTUM
;
2116 msize_t new_msize
= (new_size
+ SMALL_QUANTUM
- 1) >> SHIFT_SMALL_QUANTUM
;
2117 void *next_block
= (char *)ptr
+ old_size
;
2118 msize_t next_index
= index
+ old_msize
;
2119 if (next_index
>= NUM_SMALL_BLOCKS
) {
2120 // malloc_printf("try_realloc_small_in_place can't take place at end %p %d %d %d\n", ptr, old_size, new_size, next_index);
2124 if ((vm_address_t
)next_block
& (SMALL_QUANTUM
- 1)) {
2125 szone_error(szone
, "Internal invariant broken in realloc(next_block)", next_block
);
2127 if (meta_headers
[index
] != old_msize
) malloc_printf("*** try_realloc_small_in_place incorrect old %d %d\n", meta_headers
[index
], old_msize
);
2130 // If the next block is free, we coalesce
2131 msize_t next_msize_and_free
;
2133 next_msize_and_free
= meta_headers
[next_index
];
2134 next_msize
= next_msize_and_free
& ~ SMALL_IS_FREE
;
2135 if (!(next_msize_and_free
& SMALL_IS_FREE
) || (old_msize
+ next_msize
< new_msize
)) {
2136 SZONE_UNLOCK(szone
);
2139 // malloc_printf("Small realloc in place for %p; current msize=%db(%d) next=%p next_msize=%d wanted=%db(%d)\n", ptr, old_size, meta_headers[index], next_block, next_msize, new_size, new_msize);
2140 small_free_list_remove_ptr(szone
, next_block
, next_msize
);
2141 small_meta_header_set_middle(meta_headers
, next_index
);
2142 msize_t leftover_msize
= old_msize
+ next_msize
- new_msize
;
2143 if (leftover_msize
) {
2144 void *leftover
= ptr
+ (new_msize
<< SHIFT_SMALL_QUANTUM
);
2145 // malloc_printf("Leftover in realloc in place %p msize=%d\n", leftover, leftover_msize);
2146 small_free_list_add_ptr(szone
, leftover
, leftover_msize
);
2147 msize_t leftover_index
= index
+ new_msize
;
2148 small_meta_header_set_is_free(meta_headers
, leftover_index
, leftover_msize
);
2151 if ((new_msize
<< SHIFT_SMALL_QUANTUM
) >= LARGE_THRESHOLD
) {
2152 malloc_printf("*** Realloc in place for %p exceeded msize=%d\n", new_msize
);
2155 small_meta_header_set_in_use(meta_headers
, index
, new_msize
);
2157 if (LOG(szone
,ptr
)) {
2158 malloc_printf("In szone_realloc(), ptr=%p, msize=%d\n", ptr
, small_meta_header(ptr
)[0]);
2161 szone
->num_bytes_in_small_objects
+= (new_msize
- old_msize
) << SHIFT_SMALL_QUANTUM
;
2162 SZONE_UNLOCK(szone
);
2163 CHECK(szone
, __PRETTY_FUNCTION__
);
2164 // malloc_printf("Extended ptr %p for realloc old=%d desired=%d new=%d "
2165 // "leftover=%d\n", ptr, (unsigned)old_size, (unsigned)new_size,
2166 // (unsigned)szone_size(szone, ptr), leftover_msize << SHIFT_SMALL_QUANTUM);
2170 static kern_return_t
2171 large_in_use_enumerator(task_t task
, void *context
,
2172 unsigned type_mask
, vm_address_t large_entries_address
, unsigned num_entries
,
2173 memory_reader_t reader
, vm_range_recorder_t recorder
) {
2175 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
2177 large_entry_t
*entries
;
2179 err
= reader(task
, large_entries_address
,
2180 sizeof(large_entry_t
) * num_entries
, (void **)&entries
);
2181 if (err
) return err
;
2182 index
= num_entries
;
2183 if ((type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
)
2184 && (num_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
)) {
2186 range
.address
= large_entries_address
;
2187 range
.size
= round_page(num_entries
* sizeof(large_entry_t
));
2188 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &range
, 1);
2190 if (type_mask
& (MALLOC_PTR_IN_USE_RANGE_TYPE
2191 | MALLOC_PTR_REGION_RANGE_TYPE
))
2193 large_entry_t entry
= entries
[index
];
2194 if (!LARGE_ENTRY_IS_EMPTY(entry
)) {
2196 range
.address
= LARGE_ENTRY_ADDRESS(entry
);
2197 range
.size
= LARGE_ENTRY_SIZE(entry
);
2198 buffer
[count
++] = range
;
2199 if (count
>= MAX_RECORDER_BUFFER
) {
2200 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
2201 | MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
2207 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
2208 | MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
2213 /********************* HUGE ENTRY UTILITIES ************************/
2215 static huge_entry_t
*
2216 huge_entry_for_pointer_no_lock(szone_t
*szone
,
2218 unsigned index
= szone
->num_huge_entries
;
2220 huge_entry_t
*huge
= szone
->huge_entries
+ index
;
2221 if (huge
->address
== (vm_address_t
)ptr
) return huge
;
2227 huge_entry_append(szone_t
*szone
, huge_entry_t huge
) {
2228 // We do a little dance with locking because doing allocation (even in the
2229 // default szone) may cause something to get freed in this szone, with a
2231 // Returns 1 on success
2232 huge_entry_t
*new_huge_entries
= NULL
;
2235 unsigned num_huge_entries
;
2236 num_huge_entries
= szone
->num_huge_entries
;
2237 SZONE_UNLOCK(szone
);
2238 // malloc_printf("In huge_entry_append currentEntries=%d\n", num_huge_entries);
2239 if (new_huge_entries
) szone_free(szone
, new_huge_entries
);
2240 new_huge_entries
= szone_malloc(szone
, (num_huge_entries
+ 1) * sizeof(huge_entry_t
));
2241 if (new_huge_entries
== NULL
) return 0;
2243 if (num_huge_entries
== szone
->num_huge_entries
) {
2244 // No change - our malloc still applies
2245 huge_entry_t
*old_huge_entries
= szone
->huge_entries
;
2246 if (num_huge_entries
) {
2247 memcpy(new_huge_entries
, old_huge_entries
, num_huge_entries
* sizeof(huge_entry_t
));
2249 new_huge_entries
[szone
->num_huge_entries
++] = huge
;
2250 szone
->huge_entries
= new_huge_entries
;
2251 SZONE_UNLOCK(szone
);
2252 szone_free(szone
, old_huge_entries
);
2253 // malloc_printf("Done huge_entry_append now=%d\n", szone->num_huge_entries);
2260 static kern_return_t
2261 huge_in_use_enumerator(task_t task
, void *context
,
2262 unsigned type_mask
, vm_address_t huge_entries_address
, unsigned num_entries
,
2263 memory_reader_t reader
, vm_range_recorder_t recorder
) {
2264 huge_entry_t
*entries
;
2266 err
= reader(task
, huge_entries_address
, sizeof(huge_entry_t
) * num_entries
,
2268 if (err
) return err
;
2270 recorder(task
, context
,
2271 MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, entries
,
2278 large_and_huge_malloc(szone_t
*szone
, unsigned num_pages
) {
2279 vm_address_t addr
= 0;
2280 vm_range_t range_to_deallocate
;
2281 if (!num_pages
) num_pages
= 1; // minimal allocation size for this szone
2282 // malloc_printf("In large_and_huge_malloc for %y\n", num_pages * (1 << vm_page_shift));
2283 range_to_deallocate
.size
= 0;
2284 if (num_pages
>= (1 << vm_page_shift
)) {
2286 huge
.size
= num_pages
<< vm_page_shift
;
2287 addr
= allocate_pages(szone
, huge
.size
, 0, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_HUGE
));
2288 if (!addr
) return NULL
;
2289 huge
.address
= addr
;
2290 if (! huge_entry_append(szone
, huge
)) return NULL
;
2292 szone
->num_bytes_in_huge_objects
+= huge
.size
;
2294 vm_size_t size
= num_pages
<< vm_page_shift
;
2295 large_entry_t entry
;
2296 addr
= allocate_pages(szone
, size
, 0, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
2298 if (LOG(szone
, addr
)) malloc_printf("In szone_malloc true large allocation at %p for %y\n", (void *)addr
, size
);
2302 SZONE_UNLOCK(szone
);
2306 if (large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
2307 malloc_printf("Freshly allocated is already in use: %p\n", addr
);
2308 large_debug_print(szone
);
2312 if ((szone
->num_large_objects_in_use
+ 1) * 4 > szone
->num_large_entries
) {
2313 // density of hash table too high; grow table
2314 // we do that under lock to avoid a race
2315 // malloc_printf("In szone_malloc growing hash table current=%d\n", szone->num_large_entries);
2316 large_entries_grow_no_lock(szone
, &range_to_deallocate
);
2318 // malloc_printf("Inserting large entry (%p, %y)\n", addr, num_pages * (1 << vm_page_shift));
2319 entry
.address_and_num_pages
= addr
| num_pages
;
2321 if (large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
2322 malloc_printf("Entry about to be added already in use: %p\n",
2324 large_debug_print(szone
);
2328 large_entry_insert_no_lock(szone
, entry
);
2330 if (!large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
2331 malloc_printf("Can't find entry just added\n");
2332 large_debug_print(szone
);
2336 // malloc_printf("Inserted large entry (%p, %d pages)\n", addr,
2338 szone
->num_large_objects_in_use
++;
2339 szone
->num_bytes_in_large_objects
+= size
;
2341 SZONE_UNLOCK(szone
);
2342 if (range_to_deallocate
.size
) {
2343 deallocate_pages(szone
, range_to_deallocate
.address
, range_to_deallocate
.size
, 0); // we deallocate outside the lock
2344 // malloc_printf("Deallocated large entries %d\n", range_to_deallocate.size);
2346 return (void *)addr
;
2350 free_large_or_huge(szone_t
*szone
, void *ptr
) {
2351 // We have established ptr is page-aligned and not tiny nor small
2352 large_entry_t
*entry
;
2353 vm_range_t vm_range_to_deallocate
;
2356 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2358 // malloc_printf("Ready for deallocation [%p-%y]\n", LARGE_ENTRY_ADDRESS(*entry), LARGE_ENTRY_SIZE(*entry));
2359 vm_range_to_deallocate
= large_free_no_lock(szone
, entry
);
2361 if (large_entry_for_pointer_no_lock(szone
, ptr
)) {
2362 malloc_printf("*** malloc[%d]: Just after freeing %p still in use num_large_entries=%d\n", getpid(), ptr
, szone
->num_large_entries
);
2363 large_debug_print(szone
);
2367 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
2368 vm_range_to_deallocate
= *huge
;
2369 *huge
= szone
->huge_entries
[--szone
->num_huge_entries
]; // last entry fills that spot
2370 szone
->num_bytes_in_huge_objects
-= vm_range_to_deallocate
.size
;
2373 large_debug_print(szone
);
2375 szone_error(szone
, "Pointer being freed was not allocated", ptr
);
2378 SZONE_UNLOCK(szone
); // we release the lock asap
2379 CHECK(szone
, __PRETTY_FUNCTION__
);
2380 // we deallocate_pages, including guard pages
2381 if (vm_range_to_deallocate
.address
) {
2382 // malloc_printf("About to deallocate %p size %y\n", vm_range_to_deallocate.address, vm_range_to_deallocate.size);
2384 if (large_entry_for_pointer_no_lock(szone
,
2385 (void *)vm_range_to_deallocate
.address
)) {
2386 malloc_printf("*** malloc[%d]: Invariant broken: %p still in use num_large_entries=%d\n", getpid(), vm_range_to_deallocate
.address
, szone
->num_large_entries
);
2387 large_debug_print(szone
);
2391 deallocate_pages(szone
, vm_range_to_deallocate
.address
, vm_range_to_deallocate
.size
, 0);
2396 try_realloc_large_or_huge_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) {
2397 vm_address_t addr
= (vm_address_t
)ptr
+ old_size
;
2398 large_entry_t
*entry
;
2401 if (old_size
!= ((old_size
>> vm_page_shift
) << vm_page_shift
)) {
2402 malloc_printf("*** old_size is %d\n", old_size
);
2405 // malloc_printf("=== Trying (1) to extend %p from %d to %d\n", ptr, old_size, new_size);
2407 entry
= large_entry_for_pointer_no_lock(szone
, (void *)addr
);
2408 SZONE_UNLOCK(szone
);
2410 return 0; // large pointer already exist in table - extension is not going to work
2412 new_size
= round_page(new_size
);
2413 // malloc_printf("=== Trying (2) to extend %p from %d to %d\n", ptr, old_size, new_size);
2415 * Ask for allocation at a specific address, and mark as realloc
2416 * to request coalescing with previous realloc'ed extensions.
2418 err
= vm_allocate(mach_task_self(), &addr
, new_size
- old_size
, VM_MAKE_TAG(VM_MEMORY_REALLOC
));
2419 if (err
!= KERN_SUCCESS
) {
2424 * If the new size is still under the large/huge threshold, we can just
2425 * extend the existing large block.
2427 * Note: this logic is predicated on the understanding that an allocated
2428 * block can never really shrink, so that the new size will always be
2429 * larger than the old size.
2431 if ((new_size
>> vm_page_shift
) < (1 << vm_page_shift
)) {
2432 /* extend existing large entry */
2433 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2435 szone_error(szone
, "large entry reallocated is not properly in table", ptr
);
2436 /* XXX will cause fault on next reference to entry */
2438 entry
->address_and_num_pages
= (vm_address_t
)ptr
| (new_size
>> vm_page_shift
);
2439 szone
->num_bytes_in_large_objects
+= new_size
- old_size
;
2440 } else if ((old_size
>> vm_page_shift
) >= (1 << vm_page_shift
)) {
2441 /* extend existing huge entry */
2442 huge_entry_t
*huge_entry
= huge_entry_for_pointer_no_lock(szone
, ptr
);
2444 szone_error(szone
, "huge entry reallocated is not properly in table", ptr
);
2445 /* XXX will cause fault on next reference to huge_entry */
2447 huge_entry
->size
= new_size
;
2448 szone
->num_bytes_in_huge_objects
+= new_size
- old_size
;
2450 /* need to convert large entry to huge entry */
2453 /* release large entry, note we still have the VM allocation */
2454 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2455 large_entry_t saved_entry
= *entry
; // in case we need to put it back
2456 large_free_no_lock(szone
, entry
);
2457 szone
->num_bytes_in_large_objects
-= old_size
;
2459 /* and get a huge entry */
2460 huge
.address
= (vm_address_t
)ptr
;
2461 huge
.size
= new_size
; /* fix up size */
2462 SZONE_UNLOCK(szone
);
2463 if (huge_entry_append(szone
, huge
)) {
2464 szone
->num_bytes_in_huge_objects
+= new_size
;
2465 return 1; // success!
2468 // we leak memory (the extra space appended) but data structures are correct
2469 large_entry_insert_no_lock(szone
, saved_entry
); // this will reinsert the large entry
2471 // malloc_printf("=== Successfully reallocated at end of %p from %d to %d\n", ptr, old_size, new_size);
2472 SZONE_UNLOCK(szone
); // we release the lock asap
2476 /********************* Zone call backs ************************/
2479 szone_free(szone_t
*szone
, void *ptr
) {
2480 // malloc_printf("szone_free(%p)\n", ptr);
2482 if (LOG(szone
, ptr
)) malloc_printf("In szone_free with %p\n", ptr
);
2485 if ((vm_address_t
)ptr
& (TINY_QUANTUM
- 1)) {
2486 szone_error(szone
, "Non-aligned pointer being freed", ptr
);
2489 // try a tiny pointer
2490 tiny_region_t
*tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
2492 free_tiny(szone
, ptr
, tiny_region
);
2495 if ((vm_address_t
)ptr
& (SMALL_QUANTUM
- 1)) {
2496 szone_error(szone
, "Non-aligned pointer being freed (2)", ptr
);
2499 // try a small pointer
2500 small_region_t
*small_region
= small_region_for_ptr_no_lock(szone
, ptr
);
2502 free_small(szone
, ptr
, small_region
);
2505 if (((unsigned)ptr
) & ((1 << vm_page_shift
) - 1)) {
2506 szone_error(szone
, "Non-page-aligned, non-allocated pointer being freed", ptr
);
2509 free_large_or_huge(szone
, ptr
);
2512 static INLINE
void *
2513 szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
) {
2515 if (size
<= 31*TINY_QUANTUM
) {
2517 msize_t msize
= (size
+ TINY_QUANTUM
- 1) >> SHIFT_TINY_QUANTUM
;
2518 if (! msize
) msize
= 1;
2519 ptr
= tiny_malloc_should_clear(szone
, msize
, cleared_requested
);
2520 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
2522 msize_t msize
= (size
+ SMALL_QUANTUM
- 1) >> SHIFT_SMALL_QUANTUM
;
2523 if (! msize
) msize
= 1;
2524 ptr
= small_malloc_should_clear(szone
, msize
, cleared_requested
);
2527 num_pages
= round_page(size
) >> vm_page_shift
;
2528 ptr
= large_and_huge_malloc(szone
, num_pages
);
2531 if (LOG(szone
, ptr
)) malloc_printf("szone_malloc returned %p\n", ptr
);
2537 szone_malloc(szone_t
*szone
, size_t size
) {
2538 // malloc_printf("szone_malloc(%d)\n", size);
2539 void *ptr
= szone_malloc_should_clear(szone
, size
, 0);
2540 // malloc_printf("szone_malloc(%d) -> %p %d\n", size, ptr, malloc_size(ptr));
2545 szone_calloc(szone_t
*szone
, size_t num_items
, size_t size
) {
2546 // malloc_printf("szone_calloc(%d,%d)\n", num_items, size);
2547 void *ptr
= szone_malloc_should_clear(szone
, num_items
* size
, 1);
2548 // malloc_printf("szone_calloc(%d,%d) -> %p\n", num_items, size, ptr);
2553 szone_valloc(szone_t
*szone
, size_t size
) {
2556 num_pages
= round_page(size
) >> vm_page_shift
;
2557 ptr
= large_and_huge_malloc(szone
, num_pages
);
2559 if (LOG(szone
, ptr
)) malloc_printf("szone_valloc returned %p\n", ptr
);
2565 szone_size(szone_t
*szone
, const void *ptr
) {
2567 large_entry_t
*entry
;
2569 // malloc_printf("szone_size(%p)\n", ptr);
2572 if (LOG(szone
, ptr
)) {
2573 malloc_printf("In szone_size for %p (szone=%p)\n", ptr
, szone
);
2576 if ((vm_address_t
)ptr
& (TINY_QUANTUM
- 1)) return 0;
2578 tiny_region_t
*tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
2580 // this is indeed a valid pointer
2582 msize_t msize
= get_tiny_meta_header(ptr
, &is_free
);
2583 return (is_free
) ? 0 : msize
<< SHIFT_TINY_QUANTUM
;
2585 if ((vm_address_t
)ptr
& (SMALL_QUANTUM
- 1)) return 0;
2587 small_region_t
*small_region
= small_region_for_ptr_no_lock(szone
, ptr
);
2589 // this is indeed a valid pointer
2590 msize_t msize_and_free
= small_meta_header(ptr
)[0];
2591 return (msize_and_free
& SMALL_IS_FREE
) ? 0 : msize_and_free
<< SHIFT_SMALL_QUANTUM
;
2593 if (((unsigned)ptr
) & ((1 << vm_page_shift
) - 1)) {
2594 // malloc_printf("Object %p not found in szone_size\n", ptr);
2598 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2600 size
= LARGE_ENTRY_SIZE(*entry
);
2601 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
2604 SZONE_UNLOCK(szone
);
2605 // malloc_printf("szone_size for large/huge %p returned %d\n", ptr, (unsigned)size);
2607 if (LOG(szone
, ptr
)) {
2608 malloc_printf("szone_size for %p returned %d\n", ptr
, (unsigned)size
);
2615 szone_realloc(szone_t
*szone
, void *ptr
, size_t new_size
) {
2616 size_t old_size
= 0;
2618 // malloc_printf("szone_realloc(%p,%d)\n", ptr, new_size);
2620 if (LOG(szone
, ptr
)) {
2621 malloc_printf("In szone_realloc for %p, %d\n", ptr
, (unsigned)new_size
);
2625 ptr
= szone_malloc(szone
, new_size
);
2626 // malloc_printf("szone_realloc(%p,%d) -> %p\n", ptr, new_size, ptr);
2629 old_size
= szone_size(szone
, ptr
);
2631 szone_error(szone
, "Pointer being reallocated was not allocated", ptr
);
2634 /* we never shrink an allocation */
2635 if (old_size
>= new_size
) return ptr
;
2636 if ((new_size
+ TINY_QUANTUM
- 1) <= 31 * TINY_QUANTUM
) {
2637 // We now try to realloc in place
2638 if (try_realloc_tiny_in_place(szone
, ptr
, old_size
, new_size
)) {
2639 // malloc_printf("szone_realloc(%p,%d) -> %p\n", ptr, new_size, ptr);
2642 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && ((new_size
+ SMALL_QUANTUM
- 1) < LARGE_THRESHOLD
) && (old_size
> 31 * TINY_QUANTUM
)) {
2643 // We now try to realloc in place
2644 if (try_realloc_small_in_place(szone
, ptr
, old_size
, new_size
)) {
2645 // malloc_printf("szone_realloc(%p,%d) small in place -> %p\n", ptr, new_size, ptr);
2648 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (old_size
> LARGE_THRESHOLD
)) {
2649 if (try_realloc_large_or_huge_in_place(szone
, ptr
, old_size
, new_size
)) {
2653 new_ptr
= szone_malloc(szone
, new_size
);
2654 if (new_ptr
== NULL
) return NULL
;
2655 if ((old_size
> VM_COPY_THRESHOLD
) && (new_size
> VM_COPY_THRESHOLD
)) {
2656 // we know everything is page-aligned try vm_copy
2657 kern_return_t err
= 0;
2658 err
= vm_copy(mach_task_self(), (vm_address_t
)ptr
, old_size
, (vm_address_t
)new_ptr
);
2660 szone_error(szone
, "Can't vm_copy region", ptr
);
2663 memcpy(new_ptr
, ptr
, old_size
);
2665 szone_free(szone
, ptr
);
2667 if (LOG(szone
, ptr
)) {
2668 malloc_printf("szone_realloc returned %p for %d\n", new_ptr
, (unsigned)new_size
);
2671 // malloc_printf("szone_realloc(%p,%d) -> %p\n", ptr, new_size, new_ptr);
2676 szone_batch_malloc(szone_t
*szone
, size_t size
, void **results
, unsigned count
) {
2677 // given a size, returns pointers capable of holding that size
2678 // returns the number of pointers allocated
2679 // may return 0 - this function will do best attempts, but just that
2680 // malloc_printf("In szone_batch_malloc(%d, %d)\n", size, count);
2681 if (size
> 31*TINY_QUANTUM
) return 0; // only bother implementing this for tiny
2682 msize_t msize
= (size
+ TINY_QUANTUM
- 1) >> SHIFT_TINY_QUANTUM
;
2683 if (! msize
) msize
= 1;
2684 size_t chunk_size
= msize
<< SHIFT_TINY_QUANTUM
;
2686 CHECK(szone
, __PRETTY_FUNCTION__
);
2687 SZONE_LOCK(szone
); // might as well lock right here to avoid concurrency issues
2688 free_list_t
**free_list
= szone
->tiny_free_list
+ msize
- 1;
2689 free_list_t
*ptr
= *free_list
;
2690 while (found
< count
) {
2692 *results
++ = ptr
; found
++;
2693 set_tiny_meta_header_in_use(ptr
, msize
);
2694 ptr
= ((free_list_t
*)ptr
)->next
;
2697 ((free_list_t
*)ptr
)->previous
= NULL
;
2698 free_list_set_checksum(szone
, (free_list_t
*)ptr
);
2700 *free_list
= (void *)ptr
;
2701 // Note that we could allocate from the free lists for larger msize
2702 // But that may un-necessarily fragment - so we might as well let the client do that
2703 // We could also allocate from szone->tiny_bytes_free_at_end
2704 // But that means we'll "eat-up" the untouched area faster, increasing the working set
2705 // So we just return what we have and just that
2706 szone
->num_tiny_objects
+= found
;
2707 szone
->num_bytes_in_tiny_objects
+= chunk_size
* found
;
2708 SZONE_UNLOCK(szone
);
2709 // malloc_printf("In szone_batch_malloc(%d, %d) -> %d\n", size, count, found);
2714 szone_batch_free(szone_t
*szone
, void **to_be_freed
, unsigned count
) {
2715 // frees all the pointers in to_be_freed
2716 // note that to_be_freed may be overwritten during the process
2718 // malloc_printf("Freeing %d items\n", count);
2720 CHECK(szone
, __PRETTY_FUNCTION__
);
2722 while (cc
< count
) {
2723 void *ptr
= to_be_freed
[cc
];
2724 tiny_region_t
*tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
2726 // this is a tiny pointer
2728 msize_t msize
= get_tiny_meta_header(ptr
, &is_free
);
2729 if (is_free
) break; // a double free; let the standard free deal with it
2730 tiny_free_no_lock(szone
, tiny_region
, ptr
, msize
);
2731 to_be_freed
[cc
] = NULL
;
2735 SZONE_UNLOCK(szone
);
2736 CHECK(szone
, __PRETTY_FUNCTION__
);
2738 void *ptr
= to_be_freed
[count
];
2739 // malloc_printf("Freeing item at %d: %p\n", count, ptr);
2740 if (ptr
) szone_free(szone
, ptr
);
2745 szone_destroy(szone_t
*szone
) {
2747 small_region_t pended_region
= 0;
2748 index
= szone
->num_large_entries
;
2750 large_entry_t
*entry
= szone
->large_entries
+ index
;
2751 if (!LARGE_ENTRY_IS_EMPTY(*entry
)) {
2752 large_entry_t range
;
2754 // we deallocate_pages, including guard pages
2755 deallocate_pages(szone
, LARGE_ENTRY_ADDRESS(range
), LARGE_ENTRY_SIZE(range
), szone
->debug_flags
);
2758 if (szone
->num_large_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
) {
2759 vm_range_t range_to_deallocate
;
2760 large_entries_free_no_lock(szone
, szone
->large_entries
, szone
->num_large_entries
, &range_to_deallocate
); // we do not free in the small chunk case
2761 if (range_to_deallocate
.size
) deallocate_pages(szone
, range_to_deallocate
.address
, range_to_deallocate
.size
, 0);
2764 index
= szone
->num_huge_entries
;
2766 huge_entry_t
*huge
= szone
->huge_entries
+ index
;
2767 deallocate_pages(szone
, huge
->address
, huge
->size
, szone
->debug_flags
);
2770 index
= szone
->num_tiny_regions
;
2772 tiny_region_t tiny_region
= szone
->tiny_regions
[index
];
2773 vm_size_t size_allocated
= ((TINY_REGION_SIZE
+ (1 << vm_page_shift
) - 1) >> vm_page_shift
) << vm_page_shift
;
2774 deallocate_pages(szone
, TINY_REGION_ADDRESS(tiny_region
), size_allocated
, 0);
2776 // and now we free regions, with regions[0] as the last one (the final harakiri)
2777 index
= szone
->num_small_regions
;
2779 small_region_t region
= szone
->small_regions
[index
];
2781 && (void *)szone
->small_regions
>= (void *)(SMALL_REGION_ADDRESS(region
))
2782 && (void *)szone
->small_regions
< (void *)(SMALL_REGION_END(region
))) {
2783 // Pend deallocation of this region, since the region
2784 // bookkeeping array is in it.
2785 pended_region
= region
;
2787 deallocate_pages(szone
, SMALL_REGION_ADDRESS(region
), SMALL_REGION_SIZE
, 0);
2790 if (pended_region
) {
2791 deallocate_pages(szone
, SMALL_REGION_ADDRESS(pended_region
), SMALL_REGION_SIZE
, 0);
2796 szone_good_size(szone_t
*szone
, size_t size
) {
2797 if (size
<= 31 * TINY_QUANTUM
) {
2799 msize_t msize
= (size
+ TINY_QUANTUM
- 1) >> SHIFT_TINY_QUANTUM
;
2800 if (! msize
) msize
= 1;
2801 return msize
<< SHIFT_TINY_QUANTUM
;
2803 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
2805 msize_t msize
= (size
+ SMALL_QUANTUM
- 1) >> SHIFT_SMALL_QUANTUM
;
2806 if (! msize
) msize
= 1;
2807 return msize
<< SHIFT_SMALL_QUANTUM
;
2810 num_pages
= round_page(size
) >> vm_page_shift
;
2811 if (!num_pages
) num_pages
= 1; // minimal allocation size for this
2812 return num_pages
<< vm_page_shift
;
2816 unsigned szone_check_counter
= 0;
2817 unsigned szone_check_start
= 0;
2818 unsigned szone_check_modulo
= 1;
2821 szone_check_all(szone_t
*szone
, const char *function
) {
2824 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
2825 while (index
< szone
->num_tiny_regions
) {
2826 tiny_region_t
*region
= szone
->tiny_regions
+ index
++;
2827 if (! szone_check_tiny_region(szone
, region
)) {
2828 SZONE_UNLOCK(szone
);
2829 szone
->debug_flags
&= ~ CHECK_REGIONS
;
2830 malloc_printf("*** malloc[%d]: Tiny region %d incorrect szone_check_all(%s) counter=%d\n", getpid(), index
-1, function
, szone_check_counter
);
2831 szone_error(szone
, "Check: tiny region incorrect", NULL
);
2837 while (index
< NUM_TINY_SLOTS
) {
2838 if (! tiny_free_list_check(szone
, index
)) {
2839 SZONE_UNLOCK(szone
);
2840 szone
->debug_flags
&= ~ CHECK_REGIONS
;
2841 malloc_printf("*** malloc[%d]: Tiny free list incorrect (slot=%d) szone_check_all(%s) counter=%d\n", getpid(), index
, function
, szone_check_counter
);
2842 szone_error(szone
, "Check: tiny free list incorrect", NULL
);
2848 index
= 0; while (index
< szone
->num_small_regions
) {
2849 small_region_t
*region
= szone
->small_regions
+ index
++;
2850 if (! szone_check_small_region(szone
, region
)) {
2851 SZONE_UNLOCK(szone
);
2852 szone
->debug_flags
&= ~ CHECK_REGIONS
;
2853 malloc_printf("*** malloc[%d]: Small region %d incorrect szone_check_all(%s) counter=%d\n", getpid(), index
-1, function
, szone_check_counter
);
2854 szone_error(szone
, "Check: small region incorrect", NULL
);
2859 while (index
< NUM_SMALL_SLOTS
) {
2860 if (! small_free_list_check(szone
, index
)) {
2861 SZONE_UNLOCK(szone
);
2862 szone
->debug_flags
&= ~ CHECK_REGIONS
;
2863 malloc_printf("*** malloc[%d]: Small free list incorrect (grain=%d) szone_check_all(%s) counter=%d\n", getpid(), index
, function
, szone_check_counter
);
2864 szone_error(szone
, "Check: small free list incorrect", NULL
);
2869 SZONE_UNLOCK(szone
);
2870 // szone_print(szone, 1);
2875 szone_check(szone_t
*szone
) {
2876 if (! (++szone_check_counter
% 10000)) {
2877 malloc_printf("At szone_check counter=%d\n", szone_check_counter
);
2879 if (szone_check_counter
< szone_check_start
) return 1;
2880 if (szone_check_counter
% szone_check_modulo
) return 1;
2881 return szone_check_all(szone
, "");
2884 static kern_return_t
2885 szone_ptr_in_use_enumerator(task_t task
, void *context
,
2886 unsigned type_mask
, vm_address_t zone_address
, memory_reader_t reader
,
2887 vm_range_recorder_t recorder
) {
2890 if (!reader
) reader
= _szone_default_reader
;
2891 // malloc_printf("Enumerator for zone %p\n", zone_address);
2892 err
= reader(task
, zone_address
, sizeof(szone_t
), (void **)&szone
);
2893 if (err
) return err
;
2894 // malloc_printf("Tiny ptrs enumeration for zone %p\n", zone_address);
2895 err
= tiny_in_use_enumerator(task
, context
, type_mask
,
2896 (vm_address_t
)szone
->tiny_regions
, szone
->num_tiny_regions
, szone
->tiny_bytes_free_at_end
, reader
, recorder
);
2897 if (err
) return err
;
2898 // malloc_printf("Small ptrs enumeration for zone %p\n", zone_address);
2899 err
= small_in_use_enumerator(task
, context
, type_mask
,
2900 (vm_address_t
)szone
->small_regions
, szone
->num_small_regions
, szone
->small_bytes_free_at_end
, reader
, recorder
);
2901 if (err
) return err
;
2902 // malloc_printf("Large ptrs enumeration for zone %p\n", zone_address);
2903 err
= large_in_use_enumerator(task
, context
, type_mask
,
2904 (vm_address_t
)szone
->large_entries
, szone
->num_large_entries
, reader
,
2906 if (err
) return err
;
2907 // malloc_printf("Huge ptrs enumeration for zone %p\n", zone_address);
2908 err
= huge_in_use_enumerator(task
, context
, type_mask
,
2909 (vm_address_t
)szone
->huge_entries
, szone
->num_huge_entries
, reader
,
2914 // Following method is deprecated: use scalable_zone_statistics instead
2916 scalable_zone_info(malloc_zone_t
*zone
, unsigned *info_to_fill
, unsigned count
) {
2917 szone_t
*szone
= (void *)zone
;
2919 // We do not lock to facilitate debug
2920 info
[4] = szone
->num_tiny_objects
;
2921 info
[5] = szone
->num_bytes_in_tiny_objects
;
2922 info
[6] = szone
->num_small_objects
;
2923 info
[7] = szone
->num_bytes_in_small_objects
;
2924 info
[8] = szone
->num_large_objects_in_use
;
2925 info
[9] = szone
->num_bytes_in_large_objects
;
2926 info
[10] = szone
->num_huge_entries
;
2927 info
[11] = szone
->num_bytes_in_huge_objects
;
2928 info
[12] = szone
->debug_flags
;
2929 info
[0] = info
[4] + info
[6] + info
[8] + info
[10];
2930 info
[1] = info
[5] + info
[7] + info
[9] + info
[11];
2931 info
[3] = szone
->num_tiny_regions
* TINY_REGION_SIZE
+ szone
->num_small_regions
* SMALL_REGION_SIZE
+ info
[9] + info
[11];
2932 info
[2] = info
[3] - szone
->tiny_bytes_free_at_end
- szone
->small_bytes_free_at_end
;
2933 memcpy(info_to_fill
, info
, sizeof(unsigned)*count
);
2937 szone_print(szone_t
*szone
, boolean_t verbose
) {
2941 scalable_zone_info((void *)szone
, info
, 13);
2942 malloc_printf("Scalable zone %p: inUse=%d(%y) touched=%y allocated=%y flags=%d\n", szone
, info
[0], info
[1], info
[2], info
[3], info
[12]);
2943 malloc_printf("\ttiny=%d(%y) small=%d(%y) large=%d(%y) huge=%d(%y)\n", info
[4], info
[5], info
[6], info
[7], info
[8], info
[9], info
[10], info
[11]);
2945 malloc_printf("%d tiny regions: \n", szone
->num_tiny_regions
);
2946 while (index
< szone
->num_tiny_regions
) {
2947 tiny_region_t
*region
= szone
->tiny_regions
+ index
;
2948 print_tiny_region(verbose
, *region
, (index
== szone
->num_tiny_regions
- 1) ? szone
->tiny_bytes_free_at_end
: 0);
2951 if (verbose
) print_tiny_free_list(szone
);
2953 malloc_printf("%d small regions: \n", szone
->num_small_regions
);
2955 while (index
< szone
->num_small_regions
) {
2956 small_region_t
*region
= szone
->small_regions
+ index
;
2957 print_small_region(szone
, verbose
, region
, (index
== szone
->num_small_regions
- 1) ? szone
->small_bytes_free_at_end
: 0);
2960 if (verbose
) print_small_free_list(szone
);
2961 SZONE_UNLOCK(szone
);
2965 szone_log(malloc_zone_t
*zone
, void *log_address
) {
2966 szone_t
*szone
= (void *)zone
;
2967 szone
->log_address
= log_address
;
2971 szone_force_lock(szone_t
*szone
) {
2972 // malloc_printf("szone_force_lock\n");
2977 szone_force_unlock(szone_t
*szone
) {
2978 // malloc_printf("szone_force_unlock\n");
2979 SZONE_UNLOCK(szone
);
2983 scalable_zone_statistics(malloc_zone_t
*zone
, malloc_statistics_t
*stats
, unsigned subzone
) {
2984 szone_t
*szone
= (void *)zone
;
2987 stats
->blocks_in_use
= szone
->num_tiny_objects
;
2988 stats
->size_in_use
= szone
->num_bytes_in_tiny_objects
;
2989 stats
->size_allocated
= szone
->num_tiny_regions
* TINY_REGION_SIZE
;
2990 stats
->max_size_in_use
= stats
->size_allocated
- szone
->tiny_bytes_free_at_end
;
2993 stats
->blocks_in_use
= szone
->num_small_objects
;
2994 stats
->size_in_use
= szone
->num_bytes_in_small_objects
;
2995 stats
->size_allocated
= szone
->num_small_regions
* SMALL_REGION_SIZE
;
2996 stats
->max_size_in_use
= stats
->size_allocated
- szone
->small_bytes_free_at_end
;
2999 stats
->blocks_in_use
= szone
->num_large_objects_in_use
;
3000 stats
->size_in_use
= szone
->num_bytes_in_large_objects
;
3001 stats
->max_size_in_use
= stats
->size_allocated
= stats
->size_in_use
;
3004 stats
->blocks_in_use
= szone
->num_huge_entries
;
3005 stats
->size_in_use
= szone
->num_bytes_in_huge_objects
;
3006 stats
->max_size_in_use
= stats
->size_allocated
= stats
->size_in_use
;
3013 szone_statistics(szone_t
*szone
, malloc_statistics_t
*stats
) {
3014 stats
->blocks_in_use
= szone
->num_tiny_objects
+ szone
->num_small_objects
+ szone
->num_large_objects_in_use
+ szone
->num_huge_entries
;
3015 size_t big_and_huge
= szone
->num_bytes_in_large_objects
+ szone
->num_bytes_in_huge_objects
;
3016 stats
->size_in_use
= szone
->num_bytes_in_tiny_objects
+ szone
->num_bytes_in_small_objects
+ big_and_huge
;
3017 stats
->max_size_in_use
= stats
->size_allocated
= szone
->num_tiny_regions
* TINY_REGION_SIZE
+ szone
->num_small_regions
* SMALL_REGION_SIZE
+ big_and_huge
;
3018 // Now we account for the untouched areas
3019 stats
->max_size_in_use
-= szone
->tiny_bytes_free_at_end
;
3020 stats
->max_size_in_use
-= szone
->small_bytes_free_at_end
;
3023 static const struct malloc_introspection_t szone_introspect
= {
3024 (void *)szone_ptr_in_use_enumerator
,
3025 (void *)szone_good_size
,
3026 (void *)szone_check
,
3027 (void *)szone_print
,
3029 (void *)szone_force_lock
,
3030 (void *)szone_force_unlock
,
3031 (void *)szone_statistics
3032 }; // marked as const to spare the DATA section
3035 create_scalable_zone(size_t initial_size
, unsigned debug_flags
) {
3039 size_t msize_used
= 0;
3040 // malloc_printf("=== create_scalable_zone(%d,%d) - %s\n", initial_size, debug_flags, (DEBUG_MALLOC) ? "**** DEBUG" : "");
3042 if ((1 << vm_page_shift
) == vm_page_size
) {
3043 // malloc_printf("vm_page_shift validated to be %d\n", vm_page_shift);
3045 malloc_printf("*** vm_page_shift incorrectly set to %d\n", vm_page_shift
);
3049 if (!vm_page_shift
) {
3051 vm_page_shift
= 12; // the minimal for page sizes
3052 page
= 1 << vm_page_shift
;
3053 while (page
!= vm_page_size
) { page
+= page
; vm_page_shift
++;};
3056 addr
= allocate_pages(NULL
, SMALL_REGION_SIZE
, SMALL_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC
));
3057 if (!addr
) return NULL
;
3058 szone
= (void *)addr
;
3059 msize
= (sizeof(szone_t
) + SMALL_QUANTUM
- 1) >> SHIFT_SMALL_QUANTUM
;
3060 // malloc_printf("sizeof(szone_t)=%d msize for 1st block=%d; wasted %d bytes\n", sizeof(szone_t), msize, (msize << SHIFT_SMALL_QUANTUM) - sizeof(szone_t));
3061 small_meta_header(szone
)[0] = msize
;
3062 szone
->tiny_regions
= szone
->initial_tiny_regions
;
3063 szone
->small_regions
= szone
->initial_small_regions
;
3064 msize_used
+= msize
; szone
->num_small_objects
++;
3065 szone
->basic_zone
.version
= 3;
3066 szone
->basic_zone
.size
= (void *)szone_size
;
3067 szone
->basic_zone
.malloc
= (void *)szone_malloc
;
3068 szone
->basic_zone
.calloc
= (void *)szone_calloc
;
3069 szone
->basic_zone
.valloc
= (void *)szone_valloc
;
3070 szone
->basic_zone
.free
= (void *)szone_free
;
3071 szone
->basic_zone
.realloc
= (void *)szone_realloc
;
3072 szone
->basic_zone
.destroy
= (void *)szone_destroy
;
3073 szone
->basic_zone
.batch_malloc
= (void *)szone_batch_malloc
;
3074 szone
->basic_zone
.batch_free
= (void *)szone_batch_free
;
3075 szone
->basic_zone
.introspect
= (struct malloc_introspection_t
*)&szone_introspect
;
3076 LOCK_INIT(szone
->lock
);
3078 #warning CHECK_REGIONS enabled
3079 debug_flags
|= CHECK_REGIONS
;
3082 #warning LOG enabled
3083 szone
->log_address
= ~0;
3085 szone
->debug_flags
= debug_flags
;
3086 szone
->small_regions
[0] = addr
>> SMALL_BLOCKS_ALIGN
;
3087 szone
->num_small_regions
= 1;
3088 msize_t free_msize
= NUM_SMALL_BLOCKS
- msize
;
3089 small_meta_header(szone
)[msize
] = free_msize
;
3090 szone
->small_bytes_free_at_end
= free_msize
<< SHIFT_SMALL_QUANTUM
;
3091 CHECK(szone
, __PRETTY_FUNCTION__
);
3093 write(1, "Malloc szone created\n", 23);
3095 return (malloc_zone_t
*)szone
;
3098 /********* Support code for emacs unexec ************/
3100 /* History of freezedry version numbers:
3102 * 1) Old malloc (before the scalable malloc implementation in this file
3104 * 2) Original freezedrying code for scalable malloc. This code was apparently
3105 * based on the old freezedrying code and was fundamentally flawed in its
3106 * assumption that tracking allocated memory regions was adequate to fake
3107 * operations on freezedried memory. This doesn't work, since scalable
3108 * malloc does not store flags in front of large page-aligned allocations.
3109 * 3) Original szone-based freezedrying code.
3110 * 4) Fresher malloc with tiny zone
3112 * No version backward compatibility is provided, but the version number does
3113 * make it possible for malloc_jumpstart() to return an error if the application
3114 * was freezedried with an older version of malloc.
3116 #define MALLOC_FREEZEDRY_VERSION 4
3125 frozen_malloc(szone_t
*zone
, size_t new_size
) {
3126 return malloc(new_size
);
3130 frozen_calloc(szone_t
*zone
, size_t num_items
, size_t size
) {
3131 return calloc(num_items
, size
);
3135 frozen_valloc(szone_t
*zone
, size_t new_size
) {
3136 return valloc(new_size
);
3140 frozen_realloc(szone_t
*zone
, void *ptr
, size_t new_size
) {
3141 size_t old_size
= szone_size(zone
, ptr
);
3143 if (new_size
<= old_size
) {
3146 new_ptr
= malloc(new_size
);
3148 memcpy(new_ptr
, ptr
, old_size
);
3154 frozen_free(szone_t
*zone
, void *ptr
) {
3158 frozen_destroy(szone_t
*zone
) {
3161 /********* Pseudo-private API for emacs unexec ************/
3164 * malloc_freezedry() records all of the szones in use, so that they can be
3165 * partially reconstituted by malloc_jumpstart(). Due to the differences
3166 * between reconstituted memory regions and those created by the szone code,
3167 * care is taken not to reallocate from the freezedried memory, except in the
3168 * case of a non-growing realloc().
3170 * Due to the flexibility provided by the zone registration mechanism, it is
3171 * impossible to implement generic freezedrying for any zone type. This code
3172 * only handles applications that use the szone allocator, so malloc_freezedry()
3173 * returns 0 (error) if any non-szone zones are encountered.
3177 malloc_freezedry(void) {
3178 extern unsigned malloc_num_zones
;
3179 extern malloc_zone_t
**malloc_zones
;
3180 malloc_frozen
*data
;
3183 /* Allocate space in which to store the freezedry state. */
3184 data
= (malloc_frozen
*) malloc(sizeof(malloc_frozen
));
3186 /* Set freezedry version number so that malloc_jumpstart() can check for compatibility. */
3187 data
->version
= MALLOC_FREEZEDRY_VERSION
;
3189 /* Allocate the array of szone pointers. */
3190 data
->nszones
= malloc_num_zones
;
3191 data
->szones
= (szone_t
*) calloc(malloc_num_zones
, sizeof(szone_t
));
3193 /* Fill in the array of szone structures. They are copied rather than
3194 * referenced, since the originals are likely to be clobbered during malloc
3195 * initialization. */
3196 for (i
= 0; i
< malloc_num_zones
; i
++) {
3197 if (strcmp(malloc_zones
[i
]->zone_name
, "DefaultMallocZone")) {
3198 /* Unknown zone type. */
3203 memcpy(&data
->szones
[i
], malloc_zones
[i
], sizeof(szone_t
));
3210 malloc_jumpstart(int cookie
) {
3211 malloc_frozen
*data
= (malloc_frozen
*) cookie
;
3214 if (data
->version
!= MALLOC_FREEZEDRY_VERSION
) {
3215 /* Unsupported freezedry version. */
3219 for (i
= 0; i
< data
->nszones
; i
++) {
3220 /* Set function pointers. Even the functions that stay the same must be
3221 * set, since there are no guarantees that they will be mapped to the
3222 * same addresses. */
3223 data
->szones
[i
].basic_zone
.size
= (void *) szone_size
;
3224 data
->szones
[i
].basic_zone
.malloc
= (void *) frozen_malloc
;
3225 data
->szones
[i
].basic_zone
.calloc
= (void *) frozen_calloc
;
3226 data
->szones
[i
].basic_zone
.valloc
= (void *) frozen_valloc
;
3227 data
->szones
[i
].basic_zone
.free
= (void *) frozen_free
;
3228 data
->szones
[i
].basic_zone
.realloc
= (void *) frozen_realloc
;
3229 data
->szones
[i
].basic_zone
.destroy
= (void *) frozen_destroy
;
3230 data
->szones
[i
].basic_zone
.introspect
= (struct malloc_introspection_t
*)&szone_introspect
;
3232 /* Register the freezedried zone. */
3233 malloc_zone_register(&data
->szones
[i
].basic_zone
);