2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /* Author: Bertrand Serlet, August 1999 */
26 #import "scalable_malloc.h"
28 #import <pthread_internals.h>
32 #include <mach/vm_statistics.h>
34 /********************* DEFINITIONS ************************/
36 #define DEBUG_MALLOC 0 // set to one to debug malloc itself
38 #define DEBUG_CLIENT 0 // set to one to help debug a nasty memory smasher
41 #warning DEBUG_MALLOC ENABLED
43 #define CHECK_LOCKED(szone, fun) { \
44 if (__is_threaded && TRY_LOCK(szone->lock)) { \
45 malloc_printf("*** lock was not set %p in %s\n", szone->lock, fun); \
49 #define INLINE __inline__
50 #define CHECK_LOCKED(szone, fun) {}
53 #define PAGE_SIZE_FIXED 1 // flip if the page size becomes variable, one day
55 #define vm_page_shift 12
57 static unsigned vm_page_shift
= 0; // guaranteed to be intialized by zone creation
60 typedef unsigned short msize_t
; // a size in multiples of SHIFT_SMALL_QUANTUM or SHIFT_TINY_QUANTUM
69 unsigned address_and_num_pages
;
70 // this type represents both an address and a number of pages
71 // the low bits are the number of pages; the high bits are the address
72 // note that the exact number of bits used for depends on the page size
73 // also, this cannot represent pointers larger than 1 << (vm_page_shift * 2)
76 typedef unsigned char grain_t
;
78 #define CHECK_REGIONS (1 << 31)
80 #define CHECKSUM_MAGIC 0x357B
82 #define MAX_RECORDER_BUFFER 256
84 /********************* DEFINITIONS for tiny ************************/
86 #define SHIFT_TINY_QUANTUM 4 // Required for AltiVec
87 #define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM)
89 #define FOLLOWING_TINY_PTR(ptr,msize) (((char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM))
91 #define NUM_TINY_SLOTS 32 // number of slots for free-lists
93 #define SHIFT_NUM_TINY_BLOCKS 16
94 #define NUM_TINY_BLOCKS (1 << SHIFT_NUM_TINY_BLOCKS)
95 #define TINY_BLOCKS_ALIGN (SHIFT_NUM_TINY_BLOCKS + SHIFT_TINY_QUANTUM)
96 #define TINY_REGION_SIZE ((NUM_TINY_BLOCKS * TINY_QUANTUM + (NUM_TINY_BLOCKS >> 2) + 8 + (1 << vm_page_shift) - 1) & ~ ((1 << vm_page_shift) - 1)) // enough room for the data, followed by the bit arrays (2-bits per block) plus 2 words of padding as our bitmap operators overflow, plus rounding to the nearest page
98 #define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[6])
99 // At the end of free blocks, we stick the size (for enabling coalescing)
100 #define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
103 #define TINY_REGION_ADDRESS(region) ((region) << TINY_BLOCKS_ALIGN)
104 #define TINY_REGION_END(region) (TINY_REGION_ADDRESS(region)+(1 << TINY_BLOCKS_ALIGN))
106 typedef unsigned short tiny_region_t
;
108 #define INITIAL_NUM_TINY_REGIONS 24 // must be even for szone to be aligned
110 #define TINY_CACHE 1 // This governs a last-free cache of 1 that bypasses the free-list
113 #warning TINY_CACHE turned off
116 /********************* DEFINITIONS for small ************************/
118 /* We store the meta bits on the side in two bytes, as follows:
119 - high order bit SMALL_IS_FREE is set when the block is avail (and starts here)
120 - when block size, expressed in SMALL_QUANTUM, is the other 15 bits
121 - else 0 signifies this block is in the middle of another block
124 #define SMALL_IS_FREE (1 << 15)
126 #define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9
127 #define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes
129 #define FOLLOWING_SMALL_PTR(ptr,msize) (((char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM))
131 #define NUM_SMALL_SLOTS 32 // number of slots for free-lists
133 #define SHIFT_NUM_SMALL_BLOCKS 14 // we can only represent up to 1<<15 for msize; but we chose to stay even below that to avoid the convention msize=0 => msize = (1<<15)
134 #define NUM_SMALL_BLOCKS (1 << SHIFT_NUM_SMALL_BLOCKS)
135 #define SMALL_BLOCKS_ALIGN (SHIFT_NUM_SMALL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23
136 #define SMALL_REGION_SIZE (NUM_SMALL_BLOCKS * SMALL_QUANTUM + NUM_SMALL_BLOCKS * 2) // data + meta data
138 #define SMALL_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
140 #define SMALL_REGION_ADDRESS(region) (((unsigned)region) << SMALL_BLOCKS_ALIGN)
141 #define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region)+(1 << SMALL_BLOCKS_ALIGN))
143 typedef unsigned short small_region_t
;
145 #define INITIAL_NUM_SMALL_REGIONS 6 // must be even for szone to be aligned
147 #define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use
149 #define SMALL_CACHE 1
151 #warning SMALL_CACHE turned off
154 /********************* DEFINITIONS for large ************************/
156 #define LARGE_THRESHOLD (15 * 1024) // at or above this use "large"
158 #if (LARGE_THRESHOLD > NUM_SMALL_SLOTS * SMALL_QUANTUM)
159 #error LARGE_THRESHOLD should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM
162 #define VM_COPY_THRESHOLD (40 * 1024)
163 // When all memory is touched after a copy, vm_copy() is always a lose
164 // But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages (on a G3/300MHz)
165 // This must be larger than LARGE_THRESHOLD
167 #define LARGE_ENTRY_ADDRESS(entry) \
168 (((entry).address_and_num_pages >> vm_page_shift) << vm_page_shift)
169 #define LARGE_ENTRY_NUM_PAGES(entry) \
170 ((entry).address_and_num_pages & ((1 << vm_page_shift) - 1))
171 #define LARGE_ENTRY_SIZE(entry) \
172 (LARGE_ENTRY_NUM_PAGES(entry) << vm_page_shift)
173 #define LARGE_ENTRY_MATCHES(entry,ptr) \
174 (!(((entry).address_and_num_pages - (unsigned)(ptr)) >> vm_page_shift))
175 #define LARGE_ENTRY_IS_EMPTY(entry) (!((entry).address_and_num_pages))
177 typedef compact_range_t large_entry_t
;
179 /********************* DEFINITIONS for huge ************************/
181 typedef vm_range_t huge_entry_t
;
183 /********************* zone itself ************************/
186 malloc_zone_t basic_zone
;
188 unsigned debug_flags
;
191 /* Regions for tiny objects */
192 unsigned num_tiny_regions
;
193 tiny_region_t
*tiny_regions
;
194 void *last_tiny_free
; // low SHIFT_TINY_QUANTUM indicate the msize
195 unsigned tiny_bitmap
; // cache of the 32 free lists
196 free_list_t
*tiny_free_list
[NUM_TINY_SLOTS
]; // 31 free lists for 1*TINY_QUANTUM to 31*TINY_QUANTUM plus 1 for larger than 32*SMALL_QUANTUM
197 size_t tiny_bytes_free_at_end
; // the last free region in the last block is treated as a big block in use that is not accounted for
198 unsigned num_tiny_objects
;
199 unsigned num_bytes_in_tiny_objects
;
201 /* Regions for small objects */
202 unsigned num_small_regions
;
203 small_region_t
*small_regions
;
204 void *last_small_free
; // low SHIFT_SMALL_QUANTUM indicate the msize
205 unsigned small_bitmap
; // cache of the free list
206 free_list_t
*small_free_list
[NUM_SMALL_SLOTS
];
207 size_t small_bytes_free_at_end
; // the last free region in the last block is treated as a big block in use that is not accounted for
208 unsigned num_small_objects
;
209 unsigned num_bytes_in_small_objects
;
211 /* large objects: vm_page_shift <= log2(size) < 2 *vm_page_shift */
212 unsigned num_large_objects_in_use
;
213 unsigned num_large_entries
;
214 large_entry_t
*large_entries
; // hashed by location; null entries don't count
215 unsigned num_bytes_in_large_objects
;
217 /* huge objects: log2(size) >= 2 *vm_page_shift */
218 unsigned char num_huge_entries
;
219 huge_entry_t
*huge_entries
;
220 unsigned num_bytes_in_huge_objects
;
222 /* Initial region list */
223 tiny_region_t initial_tiny_regions
[INITIAL_NUM_TINY_REGIONS
];
224 small_region_t initial_small_regions
[INITIAL_NUM_SMALL_REGIONS
];
227 static void *szone_malloc(szone_t
*szone
, size_t size
);
228 static INLINE
void *szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
);
229 static void szone_free(szone_t
*szone
, void *ptr
);
230 static boolean_t
szone_check_all(szone_t
*szone
, const char *function
);
231 static void szone_print(szone_t
*szone
, boolean_t verbose
);
232 static void *small_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
);
235 #define LOG(szone,ptr) \
236 (szone->log_address && (((unsigned)szone->log_address == -1) || (szone->log_address == (void *)(ptr))))
238 #define LOG(szone,ptr) 0
241 #define SZONE_LOCK(szone) { \
245 #define SZONE_UNLOCK(szone) { \
246 UNLOCK(szone->lock); \
249 #define LOCK_AND_NOTE_LOCKED(szone,locked) { \
250 CHECK(szone, __PRETTY_FUNCTION__); \
251 locked = 1; SZONE_LOCK(szone); \
254 #if DEBUG_MALLOC || DEBUG_CLIENT
255 #define CHECK(szone,fun) \
256 if ((szone)->debug_flags & CHECK_REGIONS) szone_check_all(szone, fun)
258 #define CHECK(szone,fun) {}
261 /********************* VERY LOW LEVEL UTILITIES ************************/
263 #if DEBUG_MALLOC || DEBUG_CLIENT
266 if (getenv("MallocErrorSleep")) {
267 malloc_printf("*** Sleeping to help debug\n");
268 sleep(3600); // to help debug
274 szone_error(szone_t
*szone
, const char *msg
, const void *ptr
) {
275 if (szone
) SZONE_UNLOCK(szone
);
277 malloc_printf("*** malloc[%d]: error for object %p: %s\n", getpid(), ptr
, msg
);
279 malloc_printf("*** malloc[%d]: error: %s\n", getpid(), msg
);
282 szone_print(szone
, 1);
291 protect(szone_t
*szone
, vm_address_t address
, vm_size_t size
,
292 unsigned protection
, unsigned debug_flags
) {
294 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_PRELUDE
)) {
295 err
= vm_protect(mach_task_self(), address
- (1 << vm_page_shift
), 1 << vm_page_shift
,
298 malloc_printf("*** malloc[%d]: Can't protect(%p) region for "
299 "prelude guard page at %p\n", getpid(), protection
,
300 address
- (1 << vm_page_shift
));
303 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE
)) {
304 err
= vm_protect(mach_task_self(), (vm_address_t
)(address
+ size
), 1 << vm_page_shift
, 0, protection
);
306 malloc_printf("*** malloc[%d]: Can't protect(%p) region for "
307 "postlude guard page at %p\n", getpid(), protection
,
314 allocate_pages(szone_t
*szone
, size_t size
, unsigned char align
, unsigned debug_flags
, int vm_page_label
) {
315 // align specifies a desired alignment (as a log) or 0 if no alignment requested
318 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
319 size_t allocation_size
= round_page(size
);
320 if (align
) add_guard_pages
= 0; // too cumbersome to deal with that
321 if (!allocation_size
) allocation_size
= 1 << vm_page_shift
;
322 if (add_guard_pages
) allocation_size
+= 2 * (1 << vm_page_shift
);
323 if (align
) allocation_size
+= 1 << align
;
324 err
= vm_allocate(mach_task_self(), &addr
, allocation_size
, vm_page_label
| 1);
326 malloc_printf("*** malloc: vm_allocate(size=%d) failed (error code=%d)\n", size
, err
);
327 szone_error(szone
, "Can't allocate region", NULL
);
331 // malloc_printf("In allocate_pages(size=%d(%p), align=%d) -> %p\n", size, size, align, addr);
332 vm_address_t aligned_address
= (addr
+ (1 << align
) - 1) & ~ ((1 << align
) - 1);
333 if (aligned_address
!= addr
) {
334 size_t delta
= aligned_address
- addr
;
335 err
= vm_deallocate(mach_task_self(), addr
, delta
);
336 if (err
) malloc_printf("*** malloc: freeing unaligned header failed with %d\n", err
);
337 // malloc_printf("deallocated unaligned header %p length=%d(%p)\n", addr, delta, delta);
338 addr
= aligned_address
;
339 allocation_size
-= delta
;
341 if (allocation_size
> size
) {
342 err
= vm_deallocate(mach_task_self(), addr
+size
, allocation_size
- size
);
343 if (err
) malloc_printf("*** malloc: freeing unaligned footer failed with %d\n", err
);
346 if (add_guard_pages
) {
347 addr
+= 1 << vm_page_shift
;
348 protect(szone
, addr
, size
, 0, debug_flags
);
354 deallocate_pages(szone_t
*szone
, vm_address_t addr
, size_t size
, unsigned debug_flags
) {
356 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
357 if (add_guard_pages
) {
358 addr
-= 1 << vm_page_shift
;
359 size
+= 2 * (1 << vm_page_shift
);
361 err
= vm_deallocate(mach_task_self(), addr
, size
);
363 szone_error(szone
, "Can't deallocate_pages region", (void *)addr
);
368 _szone_default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
) {
369 *ptr
= (void *)address
;
374 free_list_checksum(szone_t
*szone
, free_list_t
*ptr
, const char *msg
) {
375 // We always checksum, as testing whether to do it (based on szone->debug_flags) is as fast as doing it
376 if (ptr
->checksum
!= (((unsigned)ptr
->previous
) ^ ((unsigned)ptr
->next
) ^ CHECKSUM_MAGIC
)) {
378 malloc_printf("*** Incorrect checksum: %s\n", msg
);
380 szone_error(szone
, "Incorrect checksum for freed object - object was probably modified after being freed; break at szone_error", ptr
);
385 free_list_set_checksum(szone_t
*szone
, free_list_t
*ptr
) {
386 // We always set checksum, as testing whether to do it (based on
387 // szone->debug_flags) is slower than just doing it
388 ptr
->checksum
= ((unsigned)ptr
->previous
) ^ ((unsigned)ptr
->next
) ^ CHECKSUM_MAGIC
;
392 free_list_count(const free_list_t
*ptr
) {
396 // malloc_printf("%p ", ptr);
402 #define BITMAP32_SET(bitmap,bit) (bitmap |= 1 << (bit))
403 #define BITMAP32_CLR(bitmap,bit) (bitmap &= ~ (1 << (bit)))
404 #define BITMAP32_BIT(bitmap,bit) ((bitmap >> (bit)) & 1)
406 #define BITMAP32_FFS(bitmap) (ffs(bitmap))
407 // returns bit # of first bit that's one, starting at 1 (returns 0 if !bitmap)
409 /********************* TINY FREE LIST UTILITIES ************************/
411 // We encode the meta-headers as follows:
412 // Each quantum has an associated set of 2 bits:
413 // block_header when 1 says this block is the beginning of a block
414 // in_use when 1 says this block is in use
415 // so a block in use of size 3 is 1-1 0-X 0-X
416 // for a free block TINY_FREE_SIZE(ptr) carries the size and the bits are 1-0 X-X X-X
417 // for a block middle the bits are 0-0
419 // Attention double evaluation for these
420 #define BITARRAY_SET(bits,index) (bits[index>>3] |= (1 << (index & 7)))
421 #define BITARRAY_CLR(bits,index) (bits[index>>3] &= ~(1 << (index & 7)))
422 #define BITARRAY_BIT(bits,index) (((bits[index>>3]) >> (index & 7)) & 1)
424 // Following is for start<8 and end<=start+32
425 #define BITARRAY_MCLR_LESS_32(bits,start,end) { \
426 unsigned char *_bits = (bits); \
427 unsigned _end = (end); \
428 switch (_end >> 3) { \
429 case 4: _bits[4] &= ~ ((1 << (_end - 32)) - 1); _end = 32; \
430 case 3: _bits[3] &= ~ ((1 << (_end - 24)) - 1); _end = 24; \
431 case 2: _bits[2] &= ~ ((1 << (_end - 16)) - 1); _end = 16; \
432 case 1: _bits[1] &= ~ ((1 << (_end - 8)) - 1); _end = 8; \
433 case 0: _bits[0] &= ~ ((1 << _end) - (1 << (start))); \
437 #if 0 // Simple but slow version
438 #warning Slow version in effect
439 #define BITARRAY_MCLR(bits,index,num) { \
440 unsigned _ctr = (num); \
441 unsigned _cur = (index); \
442 while (_ctr--) {BITARRAY_CLR(bits,_cur); _cur++; } \
446 // Following is for num <= 32
447 #define BITARRAY_MCLR(bits,index,num) { \
448 unsigned _index = (index); \
449 unsigned char *_rebased = (bits) + (_index >> 3); \
451 BITARRAY_MCLR_LESS_32(_rebased, _index, _index + (num)); \
455 static INLINE msize_t
456 get_tiny_meta_header(const void *ptr
, boolean_t
*is_free
) {
457 // returns msize and is_free
458 // may return 0 for the msize component (meaning 65536)
459 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
460 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
461 unsigned char *block_header
= (unsigned char *)headers_start
;
462 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
463 unsigned byte_index
= index
>> 3;
464 block_header
+= byte_index
;
467 if (!BITMAP32_BIT(*block_header
, index
)) return 0;
468 unsigned char *in_use
= block_header
+ (NUM_TINY_BLOCKS
>> 3) + 4;
469 if (!BITMAP32_BIT(*in_use
, index
)) {
471 return TINY_FREE_SIZE(ptr
);
473 #if defined(__BIG_ENDIAN__)
474 unsigned *addr
= (void *)((unsigned)block_header
& ~3);
475 unsigned word0
= OSReadSwapInt32(addr
, 0);
476 unsigned word1
= OSReadSwapInt32(addr
, 4);
477 unsigned bits
= index
+ (((unsigned)block_header
& 3) * 8);
478 unsigned word
= (word0
>> bits
) | (word1
<< (32 - bits
));
479 unsigned result
= ffs(word
>> 1);
482 malloc_printf("*** get_tiny_meta_header() invariant broken %p %d\n", ptr
, result
);
488 unsigned cur
= index
+ 1;
489 while (!BITARRAY_BIT(block_header
, cur
)) cur
++; // assumes padding at the zone end
491 if (cur
- index
>= 32) {
492 malloc_printf("*** get_tiny_meta_header() invariant broken %p %d %d\n", ptr
, index
, cur
);
501 set_tiny_meta_header_in_use(const void *ptr
, msize_t msize
) {
502 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
503 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
504 unsigned char *block_header
= (unsigned char *)headers_start
;
505 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
507 if (msize
>= 32) malloc_printf("*** set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr
, msize
);
508 if ((unsigned)index
+ (unsigned)msize
> 0x10000) malloc_printf("*** set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr
, msize
);
510 unsigned byte_index
= index
>> 3;
511 block_header
+= byte_index
;
513 BITMAP32_SET(*block_header
, index
);
514 unsigned char *in_use
= block_header
+ (NUM_TINY_BLOCKS
>> 3) + 4;
515 BITMAP32_SET(*in_use
, index
);
517 msize_t clr_msize
= msize
-1;
519 byte_index
= index
>> 3;
520 block_header
+= byte_index
; in_use
+= byte_index
;
522 unsigned end_bit
= index
+ clr_msize
;
523 BITARRAY_MCLR_LESS_32(block_header
, index
, end_bit
);
524 BITARRAY_MCLR_LESS_32(in_use
, index
, end_bit
);
526 BITARRAY_SET(block_header
, index
+clr_msize
); // we set the block_header bit for the following block to reaffirm next block is a block
529 msize_t mf
= get_tiny_meta_header(ptr
, &ff
);
531 malloc_printf("*** setting header for tiny in_use %p : %d\n", ptr
, msize
);
532 malloc_printf("reading header for tiny %p : %d %d\n", ptr
, mf
, ff
);
538 set_tiny_meta_header_middle(const void *ptr
) {
539 // indicates this block is in the middle of an in use block
540 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
541 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
542 unsigned char *block_header
= (unsigned char *)headers_start
;
543 unsigned char *in_use
= (unsigned char *)(headers_start
+ (NUM_TINY_BLOCKS
>> 3) + 4);
544 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
545 BITARRAY_CLR(block_header
, index
); BITARRAY_CLR(in_use
, index
);
546 TINY_FREE_SIZE(ptr
) = 0;
550 set_tiny_meta_header_free(const void *ptr
, msize_t msize
) {
551 // !msize is acceptable and means 65536
552 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
553 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
554 unsigned char *block_header
= (unsigned char *)headers_start
;
555 unsigned char *in_use
= (unsigned char *)(headers_start
+ (NUM_TINY_BLOCKS
>> 3) + 4);
556 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
558 if ((unsigned)index
+ (unsigned)msize
> 0x10000) {
559 malloc_printf("*** setting header for tiny free %p msize too large: %d\n", ptr
, msize
);
562 BITARRAY_SET(block_header
, index
); BITARRAY_CLR(in_use
, index
);
563 TINY_FREE_SIZE(ptr
) = msize
;
564 // mark the end of this block
565 if (msize
) { // msize==0 => the whole region is free
566 void *follower
= FOLLOWING_TINY_PTR(ptr
, msize
);
567 TINY_PREVIOUS_MSIZE(follower
) = msize
;
571 msize_t mf
= get_tiny_meta_header(ptr
, &ff
);
572 if ((msize
!= mf
) || !ff
) {
573 malloc_printf("*** setting header for tiny free %p : %d\n", ptr
, msize
);
574 malloc_printf("reading header for tiny %p : %d %d\n", ptr
, mf
, ff
);
579 static INLINE boolean_t
580 tiny_meta_header_is_free(const void *ptr
) {
581 // returns msize and is_free shifted by 16
582 // may return 0 for the msize component (meaning 65536)
583 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
584 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
585 unsigned char *block_header
= (unsigned char *)headers_start
;
586 unsigned char *in_use
= (unsigned char *)(headers_start
+ (NUM_TINY_BLOCKS
>> 3) + 4);
587 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
588 if (!BITARRAY_BIT(block_header
, index
)) return 0;
589 return !BITARRAY_BIT(in_use
, index
);
593 tiny_previous_preceding_free(void *ptr
, msize_t
*prev_msize
) {
594 // returns the previous block, assuming and verifying it's free
595 unsigned short shifted_base
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
596 unsigned headers_start
= (shifted_base
+ 1) << TINY_BLOCKS_ALIGN
;
597 unsigned char *block_header
= (unsigned char *)headers_start
;
598 unsigned char *in_use
= (unsigned char *)(headers_start
+ (NUM_TINY_BLOCKS
>> 3) + 4);
599 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
600 if (!index
) return NULL
;
601 msize_t previous_msize
= TINY_PREVIOUS_MSIZE(ptr
);
602 if (previous_msize
> index
) return NULL
;
603 msize_t previous_index
= index
- previous_msize
;
604 void *previous_ptr
= (void *)((shifted_base
<< TINY_BLOCKS_ALIGN
) + (previous_index
<< SHIFT_TINY_QUANTUM
));
605 if (TINY_FREE_SIZE(previous_ptr
) != previous_msize
) return NULL
;
606 if (!BITARRAY_BIT(block_header
, previous_index
)) return NULL
;
607 if (BITARRAY_BIT(in_use
, previous_index
)) return NULL
;
608 // conservative check did match true check
609 *prev_msize
= previous_msize
;
610 // malloc_printf("tiny_previous_preceding_free(%p) -> %p,%d\n", ptr, previous_ptr, previous_msize);
615 tiny_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
616 // Adds an item to the proper free list
617 // Also marks the meta-header of the block properly
618 // Assumes szone has been locked
619 grain_t slot
= (!msize
|| (msize
>= NUM_TINY_SLOTS
)) ? NUM_TINY_SLOTS
- 1 : msize
- 1;
620 free_list_t
*free_ptr
= ptr
;
621 free_list_t
*free_head
= szone
->tiny_free_list
[slot
];
623 if (LOG(szone
,ptr
)) {
624 malloc_printf("In tiny_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
626 if (((unsigned)ptr
) & (TINY_QUANTUM
- 1)) {
627 szone_error(szone
, "tiny_free_list_add_ptr: Unaligned ptr", ptr
);
630 set_tiny_meta_header_free(ptr
, msize
);
632 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
634 if (free_head
->previous
) {
635 malloc_printf("ptr=%p slot=%d free_head=%p previous=%p\n", ptr
, slot
, free_head
, free_head
->previous
);
636 szone_error(szone
, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr
);
638 if (! tiny_meta_header_is_free(free_head
)) {
639 malloc_printf("ptr=%p slot=%d free_head=%p\n", ptr
, slot
, free_head
);
640 szone_error(szone
, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr
);
643 free_head
->previous
= free_ptr
;
644 free_list_set_checksum(szone
, free_head
);
646 BITMAP32_SET(szone
->tiny_bitmap
, slot
);
648 free_ptr
->previous
= NULL
;
649 free_ptr
->next
= free_head
;
650 free_list_set_checksum(szone
, free_ptr
);
651 szone
->tiny_free_list
[slot
] = free_ptr
;
652 // malloc_printf("Setting head of free list for slot=%d to %p\n", slot, free_ptr);
656 tiny_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
657 // Removes item in the proper free list
658 // msize could be read, but all callers have it so we pass it in
659 // Assumes szone has been locked
660 grain_t slot
= (!msize
|| (msize
>= NUM_TINY_SLOTS
)) ? NUM_TINY_SLOTS
- 1 : msize
- 1;
661 free_list_t
*free_ptr
= ptr
;
662 free_list_t
*next
= free_ptr
->next
;
663 free_list_t
*previous
= free_ptr
->previous
;
665 if (LOG(szone
,ptr
)) {
666 malloc_printf("In tiny_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
669 free_list_checksum(szone
, free_ptr
, __PRETTY_FUNCTION__
);
671 // The block to remove is the head of the free list
673 if (szone
->tiny_free_list
[slot
] != ptr
) {
674 malloc_printf("ptr=%p slot=%d msize=%d szone->tiny_free_list[slot]=%p\n", ptr
, slot
, msize
, szone
->tiny_free_list
[slot
]);
675 szone_error(szone
, "tiny_free_list_remove_ptr: Internal invariant broken (szone->tiny_free_list[slot])", ptr
);
679 szone
->tiny_free_list
[slot
] = next
;
680 if (!next
) BITMAP32_CLR(szone
->tiny_bitmap
, slot
);
682 previous
->next
= next
;
683 free_list_set_checksum(szone
, previous
);
686 next
->previous
= previous
;
687 free_list_set_checksum(szone
, next
);
691 static INLINE tiny_region_t
*
692 tiny_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
) {
693 tiny_region_t
*region
= szone
->tiny_regions
;
694 unsigned num_regions
= szone
->num_tiny_regions
;
695 unsigned ptr_shifted
= ((unsigned)ptr
) >> TINY_BLOCKS_ALIGN
;
696 while (num_regions
--) {
697 tiny_region_t
this = *region
;
698 if (ptr_shifted
== this) return region
;
705 tiny_free_no_lock(szone_t
*szone
, tiny_region_t
*region
, void *ptr
, msize_t msize
) {
706 size_t original_size
= msize
<< SHIFT_TINY_QUANTUM
;
707 void *next_block
= ((char *)ptr
+ original_size
);
709 if (LOG(szone
,ptr
)) {
710 malloc_printf("In tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
713 malloc_printf("In tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
714 szone_error(szone
, "Trying to free tiny block that is too small", ptr
);
717 // We try to coalesce this block with the preceeding one
718 msize_t previous_msize
;
719 void *previous
= tiny_previous_preceding_free(ptr
, &previous_msize
);
722 if (LOG(szone
, ptr
) || LOG(szone
,previous
)) {
723 malloc_printf("In tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr
, previous
);
726 tiny_free_list_remove_ptr(szone
, previous
, previous_msize
);
728 msize
+= previous_msize
;
730 // We try to coalesce with the next block
731 if (((vm_address_t
)next_block
< TINY_REGION_END(*region
)) && tiny_meta_header_is_free(next_block
)) {
732 // The next block is free, we coalesce
733 msize_t next_msize
= TINY_FREE_SIZE(next_block
);
735 if (LOG(szone
, ptr
) || LOG(szone
, next_block
)) {
736 malloc_printf("In tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n", ptr
, msize
, next_block
, next_msize
);
739 if (next_msize
>= NUM_TINY_SLOTS
) {
740 // we take a short cut here to avoid removing next_block from the slot 31 freelist and then adding ptr back to slot 31
741 // malloc_printf("Replacing %p(msize=%d) with %p(msize=%d) in freelist\n", next_block, next_msize, ptr, msize+next_msize);
743 free_list_t
*big_free_block
= (free_list_t
*)next_block
;
744 free_list_t
*after_next_block
= big_free_block
->next
;
745 free_list_t
*before_next_block
= big_free_block
->previous
;
746 free_list_checksum(szone
, big_free_block
, __PRETTY_FUNCTION__
);
747 if (!before_next_block
) {
748 szone
->tiny_free_list
[NUM_TINY_SLOTS
-1] = ptr
;
750 before_next_block
->next
= ptr
;
751 free_list_set_checksum(szone
, before_next_block
);
753 if (after_next_block
) {
754 after_next_block
->previous
= ptr
;
755 free_list_set_checksum(szone
, after_next_block
);
757 ((free_list_t
*)ptr
)->previous
= before_next_block
;
758 ((free_list_t
*)ptr
)->next
= after_next_block
;
759 free_list_set_checksum(szone
, ptr
);
760 set_tiny_meta_header_free(ptr
, msize
);
761 set_tiny_meta_header_middle(big_free_block
); // clear the meta_header to enable coalescing backwards
762 goto tiny_free_ending
;
764 tiny_free_list_remove_ptr(szone
, next_block
, next_msize
);
765 set_tiny_meta_header_middle(next_block
); // clear the meta_header to enable coalescing backwards
768 if ((szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) && msize
) {
769 memset(ptr
, 0x55, msize
<< SHIFT_TINY_QUANTUM
);
771 tiny_free_list_add_ptr(szone
, ptr
, msize
);
773 // When in proper debug mode we write on the memory to help debug memory smashers
774 szone
->num_tiny_objects
--;
775 szone
->num_bytes_in_tiny_objects
-= original_size
; // we use original_size and not msize to avoid double counting the coalesced blocks
779 tiny_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
) {
780 // Allocates from the last region or a freshly allocated region
781 // Before anything we transform the tiny_bytes_free_at_end - if any - to a regular free block
782 if (szone
->tiny_bytes_free_at_end
) {
783 tiny_region_t last_region
= szone
-> tiny_regions
[szone
->num_tiny_regions
-1];
784 void *last_block
= (void *)(TINY_REGION_END(last_region
) - szone
->tiny_bytes_free_at_end
);
785 tiny_free_list_add_ptr(szone
, last_block
, szone
->tiny_bytes_free_at_end
>> SHIFT_TINY_QUANTUM
);
786 szone
->tiny_bytes_free_at_end
= 0;
789 // time to create a new region
790 vm_address_t aligned_address
= allocate_pages(szone
, TINY_REGION_SIZE
, TINY_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_TINY
));
791 if (! aligned_address
) {
795 // malloc_printf("Allocated tiny region #%d: %p [%y]\n", szone->num_tiny_regions, aligned_address, TINY_REGION_SIZE);
796 // We set the padding after block_header to be all 1
797 ((unsigned *)(aligned_address
+ (1 << TINY_BLOCKS_ALIGN
) + (NUM_TINY_BLOCKS
>> 3)))[0] = ~0;
798 if (szone
->num_tiny_regions
== INITIAL_NUM_TINY_REGIONS
) {
799 tiny_region_t
*new_regions
;
800 // malloc_printf("=== Growing tiny_regions (%d regions)\n", szone->num_tiny_regions);
801 new_regions
= small_malloc_from_region_no_lock(szone
, 16); // 16 * 512 bytes is plenty of tiny regions (more than 4,000)
802 if (!new_regions
) return NULL
;
803 memcpy(new_regions
, szone
->tiny_regions
, INITIAL_NUM_TINY_REGIONS
* sizeof(tiny_region_t
));
804 szone
->tiny_regions
= new_regions
; // we set the pointer after it's all ready to enable enumeration from another thread without locking
806 szone
->tiny_regions
[szone
->num_tiny_regions
] = aligned_address
>> TINY_BLOCKS_ALIGN
;
807 szone
->num_tiny_regions
++; // we set the number after the pointer is all ready to enable enumeration from another thread without taking the lock
808 ptr
= (void *)aligned_address
;
809 set_tiny_meta_header_in_use(ptr
, msize
);
810 szone
->num_tiny_objects
++;
811 szone
->num_bytes_in_tiny_objects
+= msize
<< SHIFT_TINY_QUANTUM
;
812 // We put a header on the last block so that it appears in use (for coalescing, etc...)
813 set_tiny_meta_header_in_use(ptr
+ (msize
<< SHIFT_TINY_QUANTUM
), 1);
814 szone
->tiny_bytes_free_at_end
= (NUM_TINY_BLOCKS
- msize
) << SHIFT_TINY_QUANTUM
;
816 if (LOG(szone
,ptr
)) {
817 malloc_printf("In tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
823 static INLINE boolean_t
824 try_realloc_tiny_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) {
825 // returns 1 on success
826 msize_t index
= (((unsigned)ptr
) >> SHIFT_TINY_QUANTUM
) & (NUM_TINY_BLOCKS
- 1);
827 msize_t old_msize
= old_size
>> SHIFT_TINY_QUANTUM
;
828 unsigned next_index
= index
+ old_msize
;
829 // malloc_printf("try_realloc_tiny_in_place %p %d %d\n", ptr, old_size, new_size);
830 if (next_index
>= NUM_TINY_BLOCKS
) {
831 // malloc_printf("try_realloc_tiny_in_place can't take place at end %p %d %d %d\n", ptr, old_size, new_size, next_index);
834 void *next_block
= (char *)ptr
+ old_size
;
836 boolean_t is_free
= tiny_meta_header_is_free(next_block
);
839 return 0; // next_block is in use;
841 msize_t next_msize
= TINY_FREE_SIZE(next_block
);
842 if (old_size
+ (next_msize
>> SHIFT_TINY_QUANTUM
) < new_size
) {
843 // malloc_printf("try_realloc_tiny_in_place can't %p too small %d\n", next_block, next_msize);
845 return 0; // even with next block, not enough
847 tiny_free_list_remove_ptr(szone
, next_block
, next_msize
);
848 set_tiny_meta_header_middle(next_block
); // clear the meta_header to enable coalescing backwards
849 msize_t coalesced_msize
= (new_size
- old_size
+ TINY_QUANTUM
- 1) >> SHIFT_TINY_QUANTUM
;
850 msize_t leftover_msize
= next_msize
- coalesced_msize
;
851 // malloc_printf("Realloc in place for %p; current size=%d next_msize=%d wanted=%d\n", ptr, old_size, next_msize, new_size);
852 if (leftover_msize
) {
853 void *leftover
= next_block
+ (coalesced_msize
<< SHIFT_TINY_QUANTUM
);
854 // malloc_printf("Leftover in realloc in place %p leftover_msize=%d\n", leftover, leftover_msize);
855 tiny_free_list_add_ptr(szone
, leftover
, leftover_msize
);
857 set_tiny_meta_header_in_use(ptr
, old_msize
+ coalesced_msize
);
859 if (LOG(szone
,ptr
)) {
860 malloc_printf("In try_realloc_tiny_in_place(), ptr=%p, msize=%d\n", ptr
, old_msize
+ coalesced_msize
);
863 szone
->num_bytes_in_tiny_objects
+= coalesced_msize
<< SHIFT_TINY_QUANTUM
;
865 CHECK(szone
, __PRETTY_FUNCTION__
);
866 // malloc_printf("Extended ptr %p for realloc old=%d desired=%d new=%d leftover=%d\n", ptr, (unsigned)old_size, (unsigned)new_size, (unsigned)szone_size(szone, ptr), leftover_msize << SHIFT_TINY_QUANTUM);
871 szone_check_tiny_region(szone_t
*szone
, tiny_region_t
*region
) {
872 vm_address_t start
= TINY_REGION_ADDRESS(*region
);
873 void *ptr
= (void *)start
;
874 vm_address_t region_end
= TINY_REGION_END(*region
);
875 boolean_t prev_free
= 0;
876 if (region
== szone
->tiny_regions
+ szone
->num_tiny_regions
- 1) region_end
-= szone
->tiny_bytes_free_at_end
;
877 // malloc_printf("szone_check_tiny_region: szone=%p region=%p start=%p ptr=%p region_end=%p\n", szone, region, start, ptr, region_end);
878 while ((vm_address_t
)ptr
< region_end
) {
880 msize_t msize
= get_tiny_meta_header(ptr
, &is_free
);
881 if (is_free
&& !msize
&& (ptr
== (void *)start
)) {
882 // the entire region is free
885 // malloc_printf("tiny %p [%d %d]\n", ptr, msize, is_free);
887 malloc_printf("*** malloc[%d]: invariant broken for tiny block %p this msize=%d - size is too small\n", getpid(), ptr
, msize
);
891 // this block is in use
893 if (msize
> 31*TINY_QUANTUM
) {
894 malloc_printf("*** malloc[%d]: invariant broken for %p this tiny msize=%d[%p] - size is too large\n", getpid(), ptr
, msize
, msize
);
897 ptr
+= msize
<< SHIFT_TINY_QUANTUM
;
901 malloc_printf("*** malloc[%d]: invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n", getpid(), ptr
, msize
);
905 free_list_t
*free_head
= ptr
;
906 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
907 if (free_head
->previous
&& !tiny_meta_header_is_free(free_head
->previous
)) {
908 malloc_printf("*** malloc[%d]: invariant broken for %p (previous %p is not a free pointer)\n", getpid(), ptr
, free_head
->previous
);
911 if (free_head
->next
&& !tiny_meta_header_is_free(free_head
->next
)) {
912 malloc_printf("*** malloc[%d]: invariant broken for %p (next in free list %p is not a free pointer)\n", getpid(), ptr
, free_head
->next
);
915 void *follower
= FOLLOWING_TINY_PTR(ptr
, msize
);
916 if ((follower
!= (void *)region_end
) && (TINY_PREVIOUS_MSIZE(follower
) != msize
)) {
917 malloc_printf("*** malloc[%d]: invariant broken for tiny free %p followed by %p in region [%p-%p] (end marker incorrect) should be %d; in fact %d\n", getpid(), ptr
, follower
, TINY_REGION_ADDRESS(*region
), region_end
, msize
, TINY_PREVIOUS_MSIZE(follower
));
923 if (ptr
!= (void *)region_end
) {
924 malloc_printf("*** malloc[%d]: invariant broken for region end %p - %p\n", getpid(), ptr
, region_end
);
927 if (region
== szone
->tiny_regions
+ szone
->num_tiny_regions
- 1) {
928 if (szone
->tiny_bytes_free_at_end
) {
930 msize_t msize
= get_tiny_meta_header(ptr
, &is_free
);
931 if (is_free
|| (msize
!= 1)) {
932 malloc_printf("*** malloc[%d]: invariant broken for blocker block %p - %d %d\n", getpid(), ptr
, msize
, is_free
);
940 tiny_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t tiny_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
941 tiny_region_t
*regions
;
943 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
946 err
= reader(task
, region_address
, sizeof(tiny_region_t
) * num_regions
, (void **)®ions
);
948 while (index
< num_regions
) {
949 // unsigned num_in_use = 0;
950 // unsigned num_free = 0;
951 tiny_region_t region
= regions
[index
];
952 vm_range_t range
= {TINY_REGION_ADDRESS(region
), TINY_REGION_SIZE
};
953 // malloc_printf("Enumerating tiny ptrs for tiny region starting at %p\n", range.address);
954 if (type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) {
955 vm_range_t admin_range
= {range
.address
+ (1 << TINY_BLOCKS_ALIGN
), range
.size
- (1 << TINY_BLOCKS_ALIGN
)};
956 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &admin_range
, 1);
958 if (type_mask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
)) {
959 vm_range_t ptr_range
= {range
.address
, 1 << TINY_BLOCKS_ALIGN
};
960 recorder(task
, context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptr_range
, 1);
962 if (type_mask
& MALLOC_PTR_IN_USE_RANGE_TYPE
) {
963 unsigned char *mapped_region
;
964 err
= reader(task
, range
.address
, range
.size
, (void **)&mapped_region
);
966 unsigned char *block_header
= (unsigned char *)(mapped_region
+ (1 << TINY_BLOCKS_ALIGN
));
967 unsigned char *in_use
= block_header
+ (NUM_TINY_BLOCKS
>> 3) + 4;
968 unsigned block_index
= 0;
969 unsigned block_limit
= NUM_TINY_BLOCKS
;
970 if (index
== num_regions
- 1)
971 block_limit
-= (tiny_bytes_free_at_end
>> SHIFT_TINY_QUANTUM
);
972 while (block_index
< block_limit
) {
973 boolean_t is_free
= ! BITARRAY_BIT(in_use
, block_index
);
976 void *mapped_ptr
= mapped_region
+ (block_index
<< SHIFT_TINY_QUANTUM
);
977 msize
= TINY_FREE_SIZE(mapped_ptr
);
978 // printf("free: index=%x mapped=%p true_addr=%p msize=%d\n", block_index, mapped_ptr, (void *)range.address + (block_index << SHIFT_TINY_QUANTUM), msize);
983 unsigned bit
= block_index
+ 1;
984 while (! BITARRAY_BIT(block_header
, bit
)) { bit
++; msize
++; }
985 // printf("in_use: index=%x true_addr=%p msize=%d\n", block_index, (void *)range.address + (block_index << SHIFT_TINY_QUANTUM), msize);
987 buffer
[count
].address
= range
.address
+ (block_index
<< SHIFT_TINY_QUANTUM
);
988 buffer
[count
].size
= msize
<< SHIFT_TINY_QUANTUM
;
990 if (count
>= MAX_RECORDER_BUFFER
) {
991 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
995 block_index
+= msize
;
998 // malloc_printf("Found in tiny region %d in_use and %d free\n", num_in_use, num_free);
1002 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
1007 static INLINE
void *
1008 tiny_malloc_from_free_list(szone_t
*szone
, msize_t msize
) {
1009 // Assumes we've locked the region
1012 grain_t slot
= msize
-1;
1013 free_list_t
**free_list
= szone
->tiny_free_list
;
1014 free_list_t
**the_slot
= free_list
+ slot
;
1018 next
= ((free_list_t
*)ptr
)->next
;
1020 next
->previous
= NULL
;
1021 free_list_set_checksum(szone
, next
);
1026 if (LOG(szone
,ptr
)) {
1027 malloc_printf("In tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr
, this_msize
);
1030 goto return_tiny_alloc
;
1032 // adjust slot based on bitmap
1033 unsigned bitmap
= szone
->tiny_bitmap
& ~ ((1 << slot
) - 1);
1034 if (! bitmap
) goto try_tiny_malloc_from_end
;
1035 slot
= BITMAP32_FFS(bitmap
) - 1;
1036 free_list_t
**limit
= free_list
+ NUM_TINY_SLOTS
- 1;
1038 while (free_list
< limit
) {
1039 // try bigger grains
1043 next
= ((free_list_t
*)ptr
)->next
;
1045 next
->previous
= NULL
;
1046 free_list_set_checksum(szone
, next
);
1049 this_msize
= TINY_FREE_SIZE(ptr
);
1051 if (LOG(szone
,ptr
)) {
1052 malloc_printf("In tiny_malloc_from_free_list(), bigger grain ptr=%p, msize=%d this_msize=%d\n", ptr
, msize
, this_msize
);
1055 goto add_leftover_and_proceed
;
1059 // we are now looking at the last slot (31)
1063 this_msize
= TINY_FREE_SIZE(ptr
);
1064 next
= ((free_list_t
*)ptr
)->next
;
1065 if (this_msize
- msize
>= NUM_TINY_SLOTS
) {
1066 // the leftover will go back to the free list, so we optimize by modifying the free list rather than removing the head and then adding back
1067 // malloc_printf("Allocation from largest tiny slot %p optimized\n", ptr);
1068 msize_t leftover_msize
= this_msize
- msize
;
1069 void *leftover_ptr
= ptr
+ (msize
<< SHIFT_TINY_QUANTUM
);
1070 *limit
= leftover_ptr
;
1072 next
->previous
= leftover_ptr
;
1073 free_list_set_checksum(szone
, next
);
1075 ((free_list_t
*)leftover_ptr
)->next
= next
;
1076 ((free_list_t
*)leftover_ptr
)->previous
= NULL
;
1077 free_list_set_checksum(szone
, leftover_ptr
);
1078 set_tiny_meta_header_free(leftover_ptr
, leftover_msize
);
1080 if (LOG(szone
,ptr
)) {
1081 malloc_printf("In tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr
, msize
, this_msize
);
1085 goto return_tiny_alloc
;
1089 next
->previous
= NULL
;
1090 free_list_set_checksum(szone
, next
);
1092 goto add_leftover_and_proceed
;
1094 try_tiny_malloc_from_end
:
1095 // Let's see if we can use szone->tiny_bytes_free_at_end
1096 if (szone
->tiny_bytes_free_at_end
>= (msize
<< SHIFT_TINY_QUANTUM
)) {
1097 ptr
= (void *)(TINY_REGION_END(szone
->tiny_regions
[szone
->num_tiny_regions
-1]) - szone
->tiny_bytes_free_at_end
);
1098 szone
->tiny_bytes_free_at_end
-= msize
<< SHIFT_TINY_QUANTUM
;
1099 if (szone
->tiny_bytes_free_at_end
) {
1100 // let's add an in use block after ptr to serve as boundary
1101 set_tiny_meta_header_in_use(ptr
+ (msize
<< SHIFT_TINY_QUANTUM
), 1);
1105 if (LOG(szone
,ptr
)) {
1106 malloc_printf("In tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr
, msize
);
1109 goto return_tiny_alloc
;
1112 add_leftover_and_proceed
:
1113 // malloc_printf("For msize=%d found tiny in free_list (slot=%d) this_msize=%d\n", msize, free_list - szone->tiny_free_list, this_msize);
1114 if (!this_msize
|| (this_msize
> msize
)) {
1115 msize_t leftover_msize
= this_msize
- msize
;
1116 void *leftover_ptr
= ptr
+ (msize
<< SHIFT_TINY_QUANTUM
);
1118 if (LOG(szone
,ptr
)) {
1119 malloc_printf("In tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr
, this_msize
);
1122 tiny_free_list_add_ptr(szone
, leftover_ptr
, leftover_msize
);
1126 szone
->num_tiny_objects
++;
1127 szone
->num_bytes_in_tiny_objects
+= this_msize
<< SHIFT_TINY_QUANTUM
;
1129 if (LOG(szone
,ptr
)) {
1130 malloc_printf("In tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr
, this_msize
, msize
);
1133 set_tiny_meta_header_in_use(ptr
, this_msize
);
1137 static INLINE
void *
1138 tiny_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
) {
1139 boolean_t locked
= 0;
1143 szone_error(szone
, "Invariant broken (!msize) in allocation (region)", NULL
);
1147 ptr
= (void *)szone
->last_tiny_free
;
1148 if ((((unsigned)ptr
) & (TINY_QUANTUM
- 1)) == msize
) {
1149 // we have a candidate - let's lock to make sure
1150 LOCK_AND_NOTE_LOCKED(szone
, locked
);
1151 if (ptr
== (void *)szone
->last_tiny_free
) {
1152 szone
->last_tiny_free
= NULL
;
1153 // malloc_printf("using last_tiny_free\n");
1154 SZONE_UNLOCK(szone
);
1155 CHECK(szone
, __PRETTY_FUNCTION__
);
1156 ptr
= (void *)((unsigned)ptr
& ~ (TINY_QUANTUM
- 1));
1157 if (cleared_requested
) {
1158 memset(ptr
, 0, msize
<< SHIFT_TINY_QUANTUM
);
1161 if (LOG(szone
,ptr
)) {
1162 malloc_printf("In tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr
, msize
);
1167 // malloc_printf("optimistic locking for last_tiny_free failed\n");
1170 // Except in rare occasions where we need to add a new region, we are going to end up locking, so we might as well lock right away to avoid doing unnecessary optimistic probes
1171 if (!locked
) LOCK_AND_NOTE_LOCKED(szone
, locked
);
1172 ptr
= tiny_malloc_from_free_list(szone
, msize
);
1173 // malloc_printf("tiny_malloc_from_free_list(%d) returned %p\n", msize, ptr);
1175 SZONE_UNLOCK(szone
);
1176 CHECK(szone
, __PRETTY_FUNCTION__
);
1177 if (cleared_requested
) {
1178 memset(ptr
, 0, msize
<< SHIFT_TINY_QUANTUM
);
1182 ptr
= tiny_malloc_from_region_no_lock(szone
, msize
);
1183 // malloc_printf("tiny_malloc_from_region_no_lock returned %p for msize=%d\n", ptr, msize);
1184 // we don't clear because this freshly allocated space is pristine
1185 SZONE_UNLOCK(szone
);
1186 CHECK(szone
, __PRETTY_FUNCTION__
);
1191 free_tiny(szone_t
*szone
, void *ptr
, tiny_region_t
*tiny_region
) {
1192 // ptr is known to be in tiny_region
1195 void *ptr2
= szone
->last_tiny_free
;
1196 if (ptr
== (void *)((unsigned)ptr2
& ~ (TINY_QUANTUM
- 1))) {
1197 szone_error(szone
, "Double free", ptr
);
1200 #endif /* TINY_CACHE */
1202 msize_t msize
= get_tiny_meta_header(ptr
, &is_free
);
1204 szone_error(szone
, "Double free", ptr
);
1207 // malloc_printf("%p[%x]\n", ptr, msize);
1210 malloc_printf("*** szone_free() block in use is too large: %p\n", ptr
);
1214 if (msize
< TINY_QUANTUM
) { // to see if the bits fit in the last 4 bits
1215 szone
->last_tiny_free
= (void *)(((unsigned)ptr
) | msize
);
1217 // malloc_printf("stuffing last_tiny_free\n");
1218 SZONE_UNLOCK(szone
);
1219 CHECK(szone
, __PRETTY_FUNCTION__
);
1222 // malloc_printf("replacing previous last_tiny_free %p with %p\n", ptr2, szone->last_tiny_free);
1223 msize
= (unsigned)ptr2
& (TINY_QUANTUM
- 1);
1224 ptr
= (void *)(((unsigned)ptr2
) & ~ (TINY_QUANTUM
- 1));
1225 tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
1227 szone_error(szone
, "Double free (tiny cache)", ptr
);
1231 tiny_free_no_lock(szone
, tiny_region
, ptr
, msize
);
1232 SZONE_UNLOCK(szone
);
1233 CHECK(szone
, __PRETTY_FUNCTION__
);
1237 print_tiny_free_list(szone_t
*szone
) {
1239 malloc_printf("Tiny free sizes: ");
1240 while (slot
< NUM_TINY_SLOTS
) {
1241 free_list_t
*ptr
= szone
->tiny_free_list
[slot
];
1243 malloc_printf("%s%y[%d]; ", (slot
== NUM_TINY_SLOTS
-1) ? ">=" : "", (slot
+1)*TINY_QUANTUM
, free_list_count(ptr
));
1247 malloc_printf("\n");
1251 print_tiny_region(boolean_t verbose
, tiny_region_t region
, size_t bytes_at_end
) {
1252 unsigned counts
[1024];
1253 unsigned in_use
= 0;
1254 vm_address_t start
= TINY_REGION_ADDRESS(region
);
1255 vm_address_t current
= start
;
1256 vm_address_t limit
= TINY_REGION_END(region
) - bytes_at_end
;
1257 memset(counts
, 0, 1024 * sizeof(unsigned));
1258 while (current
< limit
) {
1260 msize_t msize
= get_tiny_meta_header((void *)current
, &is_free
);
1261 // malloc_printf("%p [%d %d]; ", current, msize, is_free);
1262 if (is_free
& !msize
&& (current
== start
)) {
1263 // first block is all free
1267 malloc_printf("*** Error with %p: msize=%d\n", current
, msize
);
1272 if (msize
> 32) malloc_printf("*** Error at %p msize for in_use is %d\n", current
, msize
);
1273 if (msize
< 1024) counts
[msize
]++;
1276 current
+= msize
<< SHIFT_TINY_QUANTUM
;
1278 malloc_printf("Tiny region [%p-%p, %y]\t", start
, TINY_REGION_END(region
), (int)TINY_REGION_SIZE
);
1279 malloc_printf("In_use=%d ", in_use
);
1280 if (bytes_at_end
) malloc_printf("Untouched=%y ", bytes_at_end
);
1281 if (verbose
&& in_use
) {
1283 malloc_printf("\n\tSizes in use: ");
1286 malloc_printf("%d[%d] ", ci
<< SHIFT_TINY_QUANTUM
, counts
[ci
]);
1291 malloc_printf("\n");
1295 tiny_free_list_check(szone_t
*szone
, grain_t slot
) {
1296 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1298 free_list_t
*ptr
= szone
->tiny_free_list
[slot
];
1299 free_list_t
*previous
= NULL
;
1301 free_list_checksum(szone
, ptr
, __PRETTY_FUNCTION__
);
1302 boolean_t is_free
= tiny_meta_header_is_free(ptr
);
1304 malloc_printf("*** malloc[%d]: In-use ptr in free list slot=%d count=%d ptr=%p\n", getpid(), slot
, count
, ptr
);
1307 if (((unsigned)ptr
) & (TINY_QUANTUM
- 1)) {
1308 malloc_printf("*** malloc[%d]: Unaligned ptr in free list slot=%d count=%d ptr=%p\n", getpid(), slot
, count
, ptr
);
1311 if (!tiny_region_for_ptr_no_lock(szone
, ptr
)) {
1312 malloc_printf("*** malloc[%d]: Ptr not in szone slot=%d count=%d ptr=%p\n", getpid(), slot
, count
, ptr
);
1315 if (ptr
->previous
!= previous
) {
1316 malloc_printf("*** malloc[%d]: Previous incorrectly set slot=%d count=%d ptr=%p\n", getpid(), slot
, count
, ptr
);
1323 // malloc_printf("tiny_free_list_check passed\n");
1327 /********************* SMALL FREE LIST UTILITIES ************************/
1329 static INLINE msize_t
*
1330 small_meta_headers(const void *ptr
) {
1331 // returns address of meta info
1332 unsigned short shifted_base
= ((unsigned)ptr
) >> SMALL_BLOCKS_ALIGN
;
1333 unsigned headers_start
= (shifted_base
+ 1) << SMALL_BLOCKS_ALIGN
;
1334 return (msize_t
*)headers_start
;
1337 static INLINE msize_t
1338 small_meta_index(const void *ptr
) {
1339 // returns address of meta info
1340 return (((unsigned)ptr
) >> SHIFT_SMALL_QUANTUM
) & (NUM_SMALL_BLOCKS
- 1);
1343 static INLINE msize_t
*
1344 small_meta_header(const void *ptr
) {
1345 // returns address of meta info
1346 msize_t
*meta_headers
= small_meta_headers(ptr
);
1347 msize_t index
= small_meta_index(ptr
);
1348 return meta_headers
+ index
;
1352 small_meta_header_set_is_free(msize_t
*meta_headers
, msize_t index
, msize_t msize
) {
1353 // Indicates that the meta_header for index says 'is free'
1354 meta_headers
[index
] = msize
| SMALL_IS_FREE
;
1358 small_meta_header_set_in_use(msize_t
*meta_headers
, msize_t index
, msize_t msize
) {
1359 // Indicates that the meta_header for index says 'in use'
1360 meta_headers
[index
] = msize
;
1364 small_meta_header_set_middle(msize_t
*meta_headers
, msize_t index
) {
1365 // Indicates that the meta_header for index says 'in the middle of a block'
1366 meta_headers
[index
] = 0;
1370 small_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
1371 // Adds an item to the proper free list
1372 // Also marks the header of the block properly
1373 // Assumes szone has been locked
1374 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1375 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
1376 free_list_t
*free_ptr
= ptr
;
1377 free_list_t
*free_head
= szone
->small_free_list
[grain
];
1379 if (LOG(szone
,ptr
)) {
1380 malloc_printf("In small_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
1382 if (((unsigned)ptr
) & (SMALL_QUANTUM
- 1)) {
1383 szone_error(szone
, "small_free_list_add_ptr: Unaligned ptr", ptr
);
1387 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
1389 if (free_head
->previous
) {
1390 malloc_printf("ptr=%p grain=%d free_head=%p previous=%p\n", ptr
, grain
, free_head
, free_head
->previous
);
1391 szone_error(szone
, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr
);
1393 if (!(small_meta_header(free_head
)[0] & SMALL_IS_FREE
)) {
1394 malloc_printf("ptr=%p grain=%d free_head=%p\n", ptr
, grain
, free_head
);
1395 szone_error(szone
, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr
);
1398 free_head
->previous
= free_ptr
;
1399 free_list_set_checksum(szone
, free_head
);
1401 BITMAP32_SET(szone
->small_bitmap
, grain
);
1403 free_ptr
->previous
= NULL
;
1404 free_ptr
->next
= free_head
;
1405 free_list_set_checksum(szone
, free_ptr
);
1406 szone
->small_free_list
[grain
] = free_ptr
;
1407 void *follower
= ptr
+ (msize
<< SHIFT_SMALL_QUANTUM
);
1408 SMALL_PREVIOUS_MSIZE(follower
) = msize
;
1412 small_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
1413 // Removes item in the proper free list
1414 // msize could be read, but all callers have it so we pass it in
1415 // Assumes szone has been locked
1416 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1417 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
1418 free_list_t
*free_ptr
= ptr
;
1419 free_list_t
*next
= free_ptr
->next
;
1420 free_list_t
*previous
= free_ptr
->previous
;
1422 if (LOG(szone
,ptr
)) {
1423 malloc_printf("In small_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
1426 free_list_checksum(szone
, free_ptr
, __PRETTY_FUNCTION__
);
1429 if (szone
->small_free_list
[grain
] != ptr
) {
1430 malloc_printf("ptr=%p grain=%d msize=%d szone->small_free_list[grain]=%p\n", ptr
, grain
, msize
, szone
->small_free_list
[grain
]);
1431 szone_error(szone
, "small_free_list_remove_ptr: Internal invariant broken (szone->small_free_list[grain])", ptr
);
1435 szone
->small_free_list
[grain
] = next
;
1436 if (!next
) BITMAP32_CLR(szone
->small_bitmap
, grain
);
1438 previous
->next
= next
;
1439 free_list_set_checksum(szone
, previous
);
1442 next
->previous
= previous
;
1443 free_list_set_checksum(szone
, next
);
1447 static INLINE small_region_t
*
1448 small_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
) {
1449 small_region_t
*region
= szone
->small_regions
;
1450 unsigned num_regions
= szone
->num_small_regions
;
1451 unsigned ptr_shifted
= ((unsigned)ptr
) >> SMALL_BLOCKS_ALIGN
;
1452 while (num_regions
--) {
1453 small_region_t
this = *region
;
1454 if (ptr_shifted
== this) return region
;
1461 small_free_no_lock(szone_t
*szone
, small_region_t
*region
, void *ptr
, msize_t msize
) {
1463 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1464 msize_t
*meta_headers
= small_meta_headers(ptr
);
1465 msize_t index
= small_meta_index(ptr
);
1466 size_t original_size
= msize
<< SHIFT_SMALL_QUANTUM
;
1467 void *next_block
= ((char *)ptr
+ original_size
);
1468 msize_t next_index
= index
+ msize
;
1470 if (LOG(szone
,ptr
)) {
1471 malloc_printf("In small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1474 malloc_printf("In small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1475 szone_error(szone
, "Trying to free small block that is too small", ptr
);
1477 // printf("In small_free_no_lock %p - msize=%d\n", ptr, msize);
1479 // We try to coalesce this block with the preceeding one
1480 if (index
&& (SMALL_PREVIOUS_MSIZE(ptr
) <= index
)) {
1481 msize_t previous_msize
= SMALL_PREVIOUS_MSIZE(ptr
);
1482 if (meta_headers
[index
- previous_msize
] == (previous_msize
| SMALL_IS_FREE
)) {
1483 void *previous
= ptr
- (previous_msize
<< SHIFT_SMALL_QUANTUM
);
1484 // previous is really to be coalesced
1486 if (LOG(szone
, ptr
) || LOG(szone
,previous
)) {
1487 malloc_printf("In small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr
, previous
);
1490 // malloc_printf("In small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous);
1491 small_free_list_remove_ptr(szone
, previous
, previous_msize
);
1492 small_meta_header_set_middle(meta_headers
, index
);
1494 msize
+= previous_msize
;
1495 index
-= previous_msize
;
1498 // We try to coalesce with the next block
1499 if (((vm_address_t
)next_block
< SMALL_REGION_END(*region
)) && (meta_headers
[next_index
] & SMALL_IS_FREE
)) {
1500 // next block is free, we coalesce
1501 msize_t next_msize
= meta_headers
[next_index
] & ~ SMALL_IS_FREE
;
1503 if (LOG(szone
,ptr
)) malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr
, msize
, next_block
, next_msize
);
1505 // malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr, msize, next_block, next_msize);
1506 small_free_list_remove_ptr(szone
, next_block
, next_msize
);
1507 small_meta_header_set_middle(meta_headers
, next_index
);
1508 msize
+= next_msize
;
1510 if (szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) {
1512 szone_error(szone
, "Incorrect size information - block header was damaged", ptr
);
1514 memset(ptr
, 0x55, (msize
<< SHIFT_SMALL_QUANTUM
));
1517 small_free_list_add_ptr(szone
, ptr
, msize
);
1518 small_meta_header_set_is_free(meta_headers
, index
, msize
);
1519 szone
->num_small_objects
--;
1520 szone
->num_bytes_in_small_objects
-= original_size
; // we use original_size and not msize to avoid double counting the coalesced blocks
1524 small_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
) {
1525 // Allocates from the last region or a freshly allocated region
1526 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1527 // Before anything we transform the small_bytes_free_at_end - if any - to a regular free block
1528 if (szone
->small_bytes_free_at_end
) {
1529 small_region_t last_region
= szone
->small_regions
[szone
->num_small_regions
- 1];
1530 void *last_block
= (void *)(SMALL_REGION_END(last_region
) - szone
->small_bytes_free_at_end
);
1531 small_free_list_add_ptr(szone
, last_block
, szone
->small_bytes_free_at_end
>> SHIFT_SMALL_QUANTUM
);
1532 small_meta_header(last_block
)[0] = (szone
->small_bytes_free_at_end
>> SHIFT_SMALL_QUANTUM
) | SMALL_IS_FREE
;
1533 szone
->small_bytes_free_at_end
= 0;
1536 // time to create a new region
1537 vm_address_t new_address
= allocate_pages(szone
, SMALL_REGION_SIZE
, SMALL_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL
));
1542 ptr
= (void *)new_address
;
1543 msize_t
*meta_headers
= small_meta_headers(ptr
);
1545 // malloc_printf("Allocated small region #%d: %p [%y]\n", szone->num_small_regions, new_address, SMALL_REGION_SIZE);
1546 if (szone
->num_small_regions
== INITIAL_NUM_SMALL_REGIONS
) {
1547 // time to grow the number of regions
1548 unsigned region_capacity
= (1 << (32 - SMALL_BLOCKS_ALIGN
)) - 20; // that is for sure the maximum number of small regions we can have
1549 msize_t new_msize
= (region_capacity
* sizeof(small_region_t
) + SMALL_QUANTUM
- 1) / SMALL_QUANTUM
;
1550 small_region_t
*new_regions
= ptr
;
1551 // malloc_printf("Now %d small_regions growing regions %p to %d msize=%d\n", szone->num_small_regions + 1, szone->small_regions, region_capacity, new_msize);
1552 small_meta_header_set_in_use(meta_headers
, index
, new_msize
);
1553 szone
->num_small_objects
++;
1554 szone
->num_bytes_in_small_objects
+= new_msize
<< SHIFT_SMALL_QUANTUM
;
1555 memcpy(new_regions
, szone
->small_regions
, INITIAL_NUM_SMALL_REGIONS
* sizeof(small_region_t
));
1556 // We intentionally leak the previous regions pointer to avoid multi-threading crashes if another thread was reading it (unlocked) while we are changing it.
1557 szone
->small_regions
= new_regions
; // note we set this pointer after it's all set
1558 ptr
+= new_msize
<< SHIFT_SMALL_QUANTUM
;
1560 // malloc_printf("Regions is now %p next ptr is %p\n", szone->small_regions, ptr);
1562 szone
->small_regions
[szone
->num_small_regions
] = new_address
>> SMALL_BLOCKS_ALIGN
;
1563 szone
->num_small_regions
++; // we bump the number of regions AFTER we have changes the regions pointer to enable finding a small region without taking the lock
1564 // malloc_printf("Now %d small regions\n", szone->num_small_regions);
1565 small_meta_header_set_in_use(meta_headers
, index
, msize
);
1566 msize_t msize_left
= NUM_SMALL_BLOCKS
- index
;
1567 szone
->num_small_objects
++;
1568 szone
->num_bytes_in_small_objects
+= msize
<< SHIFT_SMALL_QUANTUM
;
1569 // add a big free block
1570 index
+= msize
; msize_left
-= msize
;
1571 meta_headers
[index
] = msize_left
;
1572 szone
->small_bytes_free_at_end
= msize_left
<< SHIFT_SMALL_QUANTUM
;
1573 // malloc_printf("small_bytes_free_at_end set to %d\n", szone-> small_bytes_free_at_end);
1578 szone_check_small_region(szone_t
*szone
, small_region_t
*region
) {
1579 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1580 void *ptr
= (void *)SMALL_REGION_ADDRESS(*region
);
1581 msize_t
*meta_headers
= small_meta_headers(ptr
);
1582 vm_address_t region_end
= SMALL_REGION_END(*region
);
1583 msize_t prev_free
= 0;
1584 if (region
== szone
->small_regions
+ szone
->num_small_regions
- 1) region_end
-= szone
->small_bytes_free_at_end
;
1585 while ((vm_address_t
)ptr
< region_end
) {
1586 msize_t index
= small_meta_index(ptr
);
1587 msize_t msize_and_free
= meta_headers
[index
];
1588 if (! (msize_and_free
& SMALL_IS_FREE
)) {
1590 msize_t msize
= msize_and_free
;
1592 malloc_printf("*** malloc[%d]: invariant broken: null msize ptr=%p region#=%d num_small_regions=%d end=%p\n", getpid(), ptr
, region
- szone
->small_regions
, szone
->num_small_regions
, (void *)region_end
);
1595 if (msize
> (LARGE_THRESHOLD
/ SMALL_QUANTUM
)) {
1596 malloc_printf("*** malloc[%d]: invariant broken for %p this small msize=%d - size is too large\n", getpid(), ptr
, msize_and_free
);
1599 ptr
+= msize
<< SHIFT_SMALL_QUANTUM
;
1603 msize_t msize
= msize_and_free
& ~ SMALL_IS_FREE
;
1604 free_list_t
*free_head
= ptr
;
1605 msize_t
*follower
= (void *)FOLLOWING_SMALL_PTR(ptr
, msize
);
1607 malloc_printf("*** malloc[%d]: invariant broken for free block %p this msize=%d\n", getpid(), ptr
, msize
);
1611 malloc_printf("*** malloc[%d]: invariant broken for %p (2 free in a row)\n", getpid(), ptr
);
1614 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
1615 if (free_head
->previous
&& !(small_meta_header(free_head
->previous
)[0] & SMALL_IS_FREE
)) {
1616 malloc_printf("*** malloc[%d]: invariant broken for %p (previous %p is not a free pointer)\n", getpid(), ptr
, free_head
->previous
);
1619 if (free_head
->next
&& !(small_meta_header(free_head
->next
)[0] & SMALL_IS_FREE
)) {
1620 malloc_printf("*** malloc[%d]: invariant broken for %p (next is not a free pointer)\n", getpid(), ptr
);
1623 if (SMALL_PREVIOUS_MSIZE(follower
) != msize
) {
1624 malloc_printf("*** malloc[%d]: invariant broken for small free %p followed by %p in region [%p-%p] (end marker incorrect) should be %d; in fact %d\n", getpid(), ptr
, follower
, SMALL_REGION_ADDRESS(*region
), region_end
, msize
, SMALL_PREVIOUS_MSIZE(follower
));
1628 prev_free
= SMALL_IS_FREE
;
1634 static kern_return_t
1635 small_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t small_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
1636 small_region_t
*regions
;
1638 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
1641 err
= reader(task
, region_address
, sizeof(small_region_t
) * num_regions
, (void **)®ions
);
1642 if (err
) return err
;
1643 while (index
< num_regions
) {
1644 small_region_t region
= regions
[index
];
1645 vm_range_t range
= {SMALL_REGION_ADDRESS(region
), SMALL_REGION_SIZE
};
1646 // malloc_printf("Enumerating small ptrs for Region starting at %p\n", range.address);
1647 if (type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) {
1648 vm_range_t admin_range
= {range
.address
+ (1 << SMALL_BLOCKS_ALIGN
), range
.size
- (1 << SMALL_BLOCKS_ALIGN
)};
1649 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &admin_range
, 1);
1651 if (type_mask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
)) {
1652 vm_range_t ptr_range
= {range
.address
, 1 << SMALL_BLOCKS_ALIGN
};
1653 recorder(task
, context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptr_range
, 1);
1655 if (type_mask
& MALLOC_PTR_IN_USE_RANGE_TYPE
) {
1656 unsigned char *mapped_region
;
1657 err
= reader(task
, range
.address
, range
.size
, (void **)&mapped_region
);
1658 if (err
) return err
;
1659 msize_t
*block_header
= (msize_t
*)(mapped_region
+ (1 << SMALL_BLOCKS_ALIGN
));
1660 unsigned block_index
= 0;
1661 unsigned block_limit
= NUM_SMALL_BLOCKS
;
1662 if (index
== num_regions
- 1)
1663 block_limit
-= (small_bytes_free_at_end
>> SHIFT_SMALL_QUANTUM
);
1664 while (block_index
< block_limit
) {
1665 msize_t msize_and_free
= block_header
[block_index
];
1666 msize_t msize
= msize_and_free
& ~ SMALL_IS_FREE
;
1667 if (! (msize_and_free
& SMALL_IS_FREE
)) {
1669 buffer
[count
].address
= range
.address
+ (block_index
<< SHIFT_SMALL_QUANTUM
);
1670 buffer
[count
].size
= msize
<< SHIFT_SMALL_QUANTUM
;
1672 if (count
>= MAX_RECORDER_BUFFER
) {
1673 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
1677 block_index
+= msize
;
1679 // malloc_printf("End small region - count=%d\n", count);
1684 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
1689 static INLINE
void *
1690 small_malloc_from_free_list(szone_t
*szone
, msize_t msize
) {
1692 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1693 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
1694 unsigned bitmap
= szone
->small_bitmap
& ~ ((1 << grain
) - 1);
1697 if (!bitmap
) goto try_small_from_end
;
1698 grain
= BITMAP32_FFS(bitmap
) - 1;
1699 // first try the small grains
1700 free_list_t
**free_list
;
1701 free_list_t
**limit
= szone
->small_free_list
+ NUM_SMALL_SLOTS
- 1;
1702 free_list
= szone
->small_free_list
+ grain
;
1703 while (free_list
< limit
) {
1704 // try bigger grains
1708 next
= ((free_list_t
*)ptr
)->next
;
1710 next
->previous
= NULL
;
1711 free_list_set_checksum(szone
, next
);
1714 this_msize
= small_meta_header(ptr
)[0] & ~ SMALL_IS_FREE
;
1715 // malloc_printf("small_malloc_from_free_list: allocated from free list\n");
1716 goto add_leftover_and_proceed
;
1720 // We now check the large grains for one that is big enough
1723 this_msize
= small_meta_header(ptr
)[0] & ~ SMALL_IS_FREE
;
1724 if (this_msize
>= msize
) {
1725 // malloc_printf("small_malloc_from_free_list: allocated from last free list\n");
1726 small_free_list_remove_ptr(szone
, ptr
, this_msize
);
1727 goto add_leftover_and_proceed
;
1729 ptr
= ((free_list_t
*)ptr
)->next
;
1732 // Let's see if we can use szone->small_bytes_free_at_end
1733 // malloc_printf("Found nothing in free list small_bytes_free_at_end=%y\n", szone-> small_bytes_free_at_end);
1734 if (szone
->small_bytes_free_at_end
>= (msize
<< SHIFT_SMALL_QUANTUM
)) {
1735 ptr
= (void *)(SMALL_REGION_END(szone
->small_regions
[szone
->num_small_regions
-1]) - szone
->small_bytes_free_at_end
);
1736 szone
->small_bytes_free_at_end
-= msize
<< SHIFT_SMALL_QUANTUM
;
1737 if (szone
->small_bytes_free_at_end
) {
1738 // let's mark this block as in use to serve as boundary
1739 small_meta_header(ptr
+ (msize
<< SHIFT_SMALL_QUANTUM
))[0] = szone
->small_bytes_free_at_end
>> SHIFT_SMALL_QUANTUM
;
1742 goto return_small_alloc
;
1745 add_leftover_and_proceed
:
1746 if (this_msize
> msize
) {
1747 msize_t leftover_msize
= this_msize
- msize
;
1748 void *leftover_ptr
= ptr
+ (msize
<< SHIFT_SMALL_QUANTUM
);
1750 if (LOG(szone
,ptr
)) {
1751 malloc_printf("In small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr
, this_msize
);
1754 small_free_list_add_ptr(szone
, leftover_ptr
, leftover_msize
);
1755 msize_t
*meta_headers
= small_meta_headers(leftover_ptr
);
1756 msize_t leftover_index
= small_meta_index(leftover_ptr
);
1757 small_meta_header_set_is_free(meta_headers
, leftover_index
, leftover_msize
);
1761 szone
->num_small_objects
++;
1762 szone
->num_bytes_in_small_objects
+= this_msize
<< SHIFT_SMALL_QUANTUM
;
1764 if (LOG(szone
,ptr
)) {
1765 malloc_printf("In small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr
, this_msize
, msize
);
1768 small_meta_header(ptr
)[0] = this_msize
;
1772 static INLINE
void *
1773 small_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
) {
1774 boolean_t locked
= 0;
1777 ptr
= (void *)szone
->last_small_free
;
1778 if ((((unsigned)ptr
) & (SMALL_QUANTUM
- 1)) == msize
) {
1779 // we have a candidate - let's lock to make sure
1780 LOCK_AND_NOTE_LOCKED(szone
, locked
);
1781 if (ptr
== (void *)szone
->last_small_free
) {
1782 szone
->last_small_free
= NULL
;
1783 // malloc_printf("using last_small_free\n");
1784 SZONE_UNLOCK(szone
);
1785 CHECK(szone
, __PRETTY_FUNCTION__
);
1786 ptr
= (void *)((unsigned)ptr
& ~ (SMALL_QUANTUM
- 1));
1787 if (cleared_requested
) {
1788 memset(ptr
, 0, msize
<< SHIFT_SMALL_QUANTUM
);
1792 // malloc_printf("optimistic locking for last_small_free failed\n");
1795 // Except in rare occasions where we need to add a new region, we are going to end up locking, so we might as well lock right away to avoid doing unnecessary optimistic probes
1796 if (!locked
) LOCK_AND_NOTE_LOCKED(szone
, locked
);
1797 ptr
= small_malloc_from_free_list(szone
, msize
);
1799 SZONE_UNLOCK(szone
);
1800 CHECK(szone
, __PRETTY_FUNCTION__
);
1801 if (cleared_requested
) {
1802 memset(ptr
, 0, msize
<< SHIFT_SMALL_QUANTUM
);
1806 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
1807 // we don't clear because this freshly allocated space is pristine
1808 SZONE_UNLOCK(szone
);
1809 CHECK(szone
, __PRETTY_FUNCTION__
);
1813 static INLINE
void *
1814 small_malloc_cleared_no_lock(szone_t
*szone
, msize_t msize
) {
1815 // tries to allocate a small, cleared block
1816 // Assumes already locked
1817 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1819 ptr
= small_malloc_from_free_list(szone
, msize
);
1821 memset(ptr
, 0, msize
<< SHIFT_SMALL_QUANTUM
);
1824 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
1825 // we don't clear because this freshly allocated space is pristine
1831 free_small(szone_t
*szone
, void *ptr
, small_region_t
*small_region
) {
1832 // ptr is known to be in small_region
1833 msize_t msize_and_free
;
1834 msize_and_free
= small_meta_header(ptr
)[0];
1835 if (msize_and_free
& SMALL_IS_FREE
) {
1836 szone_error(szone
, "Object already freed being freed", ptr
);
1839 CHECK(szone
, __PRETTY_FUNCTION__
);
1840 // malloc_printf("%p[%x]\n", ptr, msize_and_free);
1843 void *ptr2
= szone
->last_small_free
;
1844 szone
->last_small_free
= (void *)(((unsigned)ptr
) | msize_and_free
);
1846 // malloc_printf("stuffing last_small_free\n");
1847 SZONE_UNLOCK(szone
);
1848 CHECK(szone
, __PRETTY_FUNCTION__
);
1851 // malloc_printf("replacing previous last_small_free %p with %p\n", ptr2, szone->last_small_free);
1852 msize_and_free
= (unsigned)ptr2
& (SMALL_QUANTUM
- 1);
1853 ptr
= (void *)(((unsigned)ptr2
) & ~ (SMALL_QUANTUM
- 1));
1854 small_region
= small_region_for_ptr_no_lock(szone
, ptr
);
1855 if (!small_region
) {
1856 szone_error(szone
, "Double free (small cache)", ptr
);
1859 small_free_no_lock(szone
, small_region
, ptr
, msize_and_free
);
1860 SZONE_UNLOCK(szone
);
1861 CHECK(szone
, __PRETTY_FUNCTION__
);
1865 print_small_free_list(szone_t
*szone
) {
1867 malloc_printf("Small free sizes: ");
1868 while (grain
< NUM_SMALL_SLOTS
) {
1869 free_list_t
*ptr
= szone
->small_free_list
[grain
];
1871 malloc_printf("%s%y[%d]; ", (grain
== NUM_SMALL_SLOTS
-1) ? ">=" : "", (grain
+ 1) * SMALL_QUANTUM
, free_list_count(ptr
));
1875 malloc_printf("\n");
1879 print_small_region(szone_t
*szone
, boolean_t verbose
, small_region_t
*region
, size_t bytes_at_end
) {
1880 unsigned counts
[1024];
1881 unsigned in_use
= 0;
1882 vm_address_t start
= SMALL_REGION_ADDRESS(*region
);
1883 vm_address_t limit
= SMALL_REGION_END(*region
) - bytes_at_end
;
1884 memset(counts
, 0, 1024 * sizeof(unsigned));
1885 while (start
< limit
) {
1886 msize_t msize_and_free
= small_meta_header((void *)start
)[0];
1887 msize_t msize
= msize_and_free
& ~ SMALL_IS_FREE
;
1888 if (!(msize_and_free
& SMALL_IS_FREE
)) {
1890 if (msize
< 1024) counts
[msize
]++;
1893 start
+= msize
<< SHIFT_SMALL_QUANTUM
;
1895 malloc_printf("Small region [%p-%p, %y]\tIn_use=%d ", SMALL_REGION_ADDRESS(*region
), SMALL_REGION_END(*region
), (int)SMALL_REGION_SIZE
, in_use
);
1896 if (bytes_at_end
) malloc_printf("Untouched=%y ", bytes_at_end
);
1897 if (verbose
&& in_use
) {
1899 malloc_printf("\n\tSizes in use: ");
1902 malloc_printf("%d[%d] ", ci
<< SHIFT_SMALL_QUANTUM
, counts
[ci
]);
1907 malloc_printf("\n");
1911 small_free_list_check(szone_t
*szone
, grain_t grain
) {
1912 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1914 free_list_t
*ptr
= szone
->small_free_list
[grain
];
1915 free_list_t
*previous
= NULL
;
1917 msize_t msize_and_free
= small_meta_header(ptr
)[0];
1919 if (!(msize_and_free
& SMALL_IS_FREE
)) {
1920 malloc_printf("*** malloc[%d]: In-use ptr in free list grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
1923 if (((unsigned)ptr
) & (SMALL_QUANTUM
- 1)) {
1924 malloc_printf("*** malloc[%d]: Unaligned ptr in free list grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
1927 if (!small_region_for_ptr_no_lock(szone
, ptr
)) {
1928 malloc_printf("*** malloc[%d]: Ptr not in szone grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
1931 free_list_checksum(szone
, ptr
, __PRETTY_FUNCTION__
);
1932 if (ptr
->previous
!= previous
) {
1933 malloc_printf("*** malloc[%d]: Previous incorrectly set grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
1942 /********************* LARGE ENTRY UTILITIES ************************/
1947 large_debug_print(szone_t
*szone
) {
1948 unsigned num_large_entries
= szone
->num_large_entries
;
1949 unsigned index
= num_large_entries
;
1951 large_entry_t
*range
= szone
->large_entries
+ index
;
1952 large_entry_t entry
= *range
;
1953 if (!LARGE_ENTRY_IS_EMPTY(entry
)) {
1954 malloc_printf("%d: %p(%y); ", index
, LARGE_ENTRY_ADDRESS(entry
), LARGE_ENTRY_SIZE(entry
));
1957 malloc_printf("\n");
1961 static large_entry_t
*
1962 large_entry_for_pointer_no_lock(szone_t
*szone
,
1964 // result only valid during a lock
1965 unsigned num_large_entries
= szone
->num_large_entries
;
1966 unsigned hash_index
;
1968 if (!num_large_entries
) return NULL
;
1969 hash_index
= ((unsigned)ptr
>> vm_page_shift
) % num_large_entries
;
1972 large_entry_t
*range
= szone
->large_entries
+ index
;
1973 large_entry_t entry
= *range
;
1974 if (LARGE_ENTRY_MATCHES(entry
, ptr
)) return range
;
1975 if (LARGE_ENTRY_IS_EMPTY(entry
)) return NULL
; // end of chain
1976 index
++; if (index
== num_large_entries
) index
= 0;
1977 } while (index
!= hash_index
);
1982 large_entry_insert_no_lock(szone_t
*szone
, large_entry_t range
) {
1983 unsigned num_large_entries
= szone
->num_large_entries
;
1984 unsigned hash_index
= (range
.address_and_num_pages
>> vm_page_shift
)
1985 % num_large_entries
;
1986 unsigned index
= hash_index
;
1987 // malloc_printf("Before insertion of %p\n", LARGE_ENTRY_ADDRESS(range));
1989 large_entry_t
*entry
= szone
->large_entries
+ index
;
1990 if (LARGE_ENTRY_IS_EMPTY(*entry
)) {
1992 return; // end of chain
1994 index
++; if (index
== num_large_entries
) index
= 0;
1995 } while (index
!= hash_index
);
1999 large_entries_rehash_after_entry_no_lock(szone_t
*szone
,
2000 large_entry_t
*entry
) {
2001 unsigned num_large_entries
= szone
->num_large_entries
;
2002 unsigned hash_index
= entry
- szone
->large_entries
;
2003 unsigned index
= hash_index
;
2005 large_entry_t range
;
2006 index
++; if (index
== num_large_entries
) index
= 0;
2007 range
= szone
->large_entries
[index
];
2008 if (LARGE_ENTRY_IS_EMPTY(range
)) return;
2009 szone
->large_entries
[index
].address_and_num_pages
= 0;
2010 large_entry_insert_no_lock(szone
, range
); // this will reinsert in the
2012 } while (index
!= hash_index
);
2015 static INLINE large_entry_t
*
2016 large_entries_alloc_no_lock(szone_t
*szone
,
2018 size_t size
= num
* sizeof(large_entry_t
);
2019 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
2020 if (is_vm_allocation
) {
2021 // Note that we allocate memory (via a system call) under a spin lock
2022 // That is certainly evil, however it's very rare in the lifetime of a process
2023 // The alternative would slow down the normal case
2024 return (void *)allocate_pages(szone
, round_page(size
), 0, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
2026 return small_malloc_cleared_no_lock(szone
, (size
+ SMALL_QUANTUM
- 1) >> SHIFT_SMALL_QUANTUM
);
2031 large_entries_free_no_lock(szone_t
*szone
, large_entry_t
*entries
, unsigned num
, vm_range_t
*range_to_deallocate
) {
2032 // returns range to deallocate
2033 size_t size
= num
* sizeof(large_entry_t
);
2034 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
2035 // malloc_printf("In large_entries_free_no_lock %d %d\n", num, is_vm_allocation);
2036 if (is_vm_allocation
) {
2037 range_to_deallocate
->address
= (vm_address_t
)entries
;
2038 range_to_deallocate
->size
= round_page(size
);
2040 range_to_deallocate
->size
= 0;
2041 small_region_t
*region
= small_region_for_ptr_no_lock(szone
, entries
);
2042 msize_t msize_and_free
= small_meta_header(entries
)[0];
2043 if (msize_and_free
& SMALL_IS_FREE
) {
2044 szone_error(szone
, "Object already freed being freed", entries
);
2047 small_free_no_lock(szone
, region
, entries
, msize_and_free
);
2052 large_entries_grow_no_lock(szone_t
*szone
, vm_range_t
*range_to_deallocate
) {
2053 // sets range_to_deallocate
2054 unsigned old_num_entries
= szone
->num_large_entries
;
2055 large_entry_t
*old_entries
= szone
->large_entries
;
2056 unsigned new_num_entries
= (old_num_entries
) ? old_num_entries
2057 * 2 + 1 : 63; // always an odd number for good hashing
2058 large_entry_t
*new_entries
= large_entries_alloc_no_lock(szone
, new_num_entries
);
2059 unsigned index
= old_num_entries
;
2060 szone
->num_large_entries
= new_num_entries
;
2061 szone
->large_entries
= new_entries
;
2062 // malloc_printf("_grow_large_entries old_num_entries=%d new_num_entries=%d %p\n", old_num_entries, new_num_entries, old_entries);
2064 large_entry_t oldRange
= old_entries
[index
];
2065 if (!LARGE_ENTRY_IS_EMPTY(oldRange
)) {
2066 large_entry_insert_no_lock(szone
, oldRange
);
2070 large_entries_free_no_lock(szone
, old_entries
, old_num_entries
, range_to_deallocate
);
2072 range_to_deallocate
->size
= 0;
2077 large_free_no_lock(szone_t
*szone
, large_entry_t
*entry
) {
2078 // frees the specific entry in the size table
2079 // returns a range to truly deallocate
2081 range
.address
= LARGE_ENTRY_ADDRESS(*entry
);
2082 range
.size
= LARGE_ENTRY_SIZE(*entry
);
2083 szone
->num_large_objects_in_use
--;
2084 szone
->num_bytes_in_large_objects
-= range
.size
;
2085 if (szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) {
2086 protect(szone
, range
.address
, range
.size
, VM_PROT_READ
| VM_PROT_WRITE
,
2087 szone
->debug_flags
);
2088 range
.address
-= 1 << vm_page_shift
;
2089 range
.size
+= 2 * (1 << vm_page_shift
);
2091 // malloc_printf("Entry is %p=%d; cache is %p ; found=%p\n", entry,
2092 // entry-szone->large_entries, szone->large_entries,
2093 // large_entry_for_pointer_no_lock(szone, (void *)range.address));
2094 entry
->address_and_num_pages
= 0;
2095 large_entries_rehash_after_entry_no_lock(szone
, entry
);
2097 if (large_entry_for_pointer_no_lock(szone
, (void *)range
.address
)) {
2098 malloc_printf("*** malloc[%d]: Freed entry %p still in use; "
2099 "num_large_entries=%d\n", getpid(), range
.address
,
2100 szone
->num_large_entries
);
2101 large_debug_print(szone
);
2108 static INLINE boolean_t
2109 try_realloc_small_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) {
2110 // returns 1 on success
2111 msize_t
*meta_headers
= small_meta_headers(ptr
);
2112 msize_t index
= small_meta_index(ptr
);
2113 msize_t old_msize
= old_size
>> SHIFT_SMALL_QUANTUM
;
2114 msize_t new_msize
= (new_size
+ SMALL_QUANTUM
- 1) >> SHIFT_SMALL_QUANTUM
;
2115 void *next_block
= (char *)ptr
+ old_size
;
2116 msize_t next_index
= index
+ old_msize
;
2117 if (next_index
>= NUM_SMALL_BLOCKS
) {
2118 // malloc_printf("try_realloc_small_in_place can't take place at end %p %d %d %d\n", ptr, old_size, new_size, next_index);
2122 if ((vm_address_t
)next_block
& (SMALL_QUANTUM
- 1)) {
2123 szone_error(szone
, "Internal invariant broken in realloc(next_block)", next_block
);
2125 if (meta_headers
[index
] != old_msize
) malloc_printf("*** try_realloc_small_in_place incorrect old %d %d\n", meta_headers
[index
], old_msize
);
2128 // If the next block is free, we coalesce
2129 msize_t next_msize_and_free
;
2131 next_msize_and_free
= meta_headers
[next_index
];
2132 next_msize
= next_msize_and_free
& ~ SMALL_IS_FREE
;
2133 if (!(next_msize_and_free
& SMALL_IS_FREE
) || (old_msize
+ next_msize
< new_msize
)) {
2134 SZONE_UNLOCK(szone
);
2137 // malloc_printf("Small realloc in place for %p; current msize=%db(%d) next=%p next_msize=%d wanted=%db(%d)\n", ptr, old_size, meta_headers[index], next_block, next_msize, new_size, new_msize);
2138 small_free_list_remove_ptr(szone
, next_block
, next_msize
);
2139 small_meta_header_set_middle(meta_headers
, next_index
);
2140 msize_t leftover_msize
= old_msize
+ next_msize
- new_msize
;
2141 if (leftover_msize
) {
2142 void *leftover
= ptr
+ (new_msize
<< SHIFT_SMALL_QUANTUM
);
2143 // malloc_printf("Leftover in realloc in place %p msize=%d\n", leftover, leftover_msize);
2144 small_free_list_add_ptr(szone
, leftover
, leftover_msize
);
2145 msize_t leftover_index
= index
+ new_msize
;
2146 small_meta_header_set_is_free(meta_headers
, leftover_index
, leftover_msize
);
2149 if ((new_msize
<< SHIFT_SMALL_QUANTUM
) >= LARGE_THRESHOLD
) {
2150 malloc_printf("*** Realloc in place for %p exceeded msize=%d\n", new_msize
);
2153 small_meta_header_set_in_use(meta_headers
, index
, new_msize
);
2155 if (LOG(szone
,ptr
)) {
2156 malloc_printf("In szone_realloc(), ptr=%p, msize=%d\n", ptr
, small_meta_header(ptr
)[0]);
2159 szone
->num_bytes_in_small_objects
+= (new_msize
- old_msize
) << SHIFT_SMALL_QUANTUM
;
2160 SZONE_UNLOCK(szone
);
2161 CHECK(szone
, __PRETTY_FUNCTION__
);
2162 // malloc_printf("Extended ptr %p for realloc old=%d desired=%d new=%d "
2163 // "leftover=%d\n", ptr, (unsigned)old_size, (unsigned)new_size,
2164 // (unsigned)szone_size(szone, ptr), leftover_msize << SHIFT_SMALL_QUANTUM);
2168 static kern_return_t
2169 large_in_use_enumerator(task_t task
, void *context
,
2170 unsigned type_mask
, vm_address_t large_entries_address
, unsigned num_entries
,
2171 memory_reader_t reader
, vm_range_recorder_t recorder
) {
2173 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
2175 large_entry_t
*entries
;
2177 err
= reader(task
, large_entries_address
,
2178 sizeof(large_entry_t
) * num_entries
, (void **)&entries
);
2179 if (err
) return err
;
2180 index
= num_entries
;
2181 if ((type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
)
2182 && (num_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
)) {
2184 range
.address
= large_entries_address
;
2185 range
.size
= round_page(num_entries
* sizeof(large_entry_t
));
2186 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &range
, 1);
2188 if (type_mask
& (MALLOC_PTR_IN_USE_RANGE_TYPE
2189 | MALLOC_PTR_REGION_RANGE_TYPE
))
2191 large_entry_t entry
= entries
[index
];
2192 if (!LARGE_ENTRY_IS_EMPTY(entry
)) {
2194 range
.address
= LARGE_ENTRY_ADDRESS(entry
);
2195 range
.size
= LARGE_ENTRY_SIZE(entry
);
2196 buffer
[count
++] = range
;
2197 if (count
>= MAX_RECORDER_BUFFER
) {
2198 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
2199 | MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
2205 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
2206 | MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
2211 /********************* HUGE ENTRY UTILITIES ************************/
2213 static huge_entry_t
*
2214 huge_entry_for_pointer_no_lock(szone_t
*szone
,
2216 unsigned index
= szone
->num_huge_entries
;
2218 huge_entry_t
*huge
= szone
->huge_entries
+ index
;
2219 if (huge
->address
== (vm_address_t
)ptr
) return huge
;
2225 huge_entry_append(szone_t
*szone
, huge_entry_t huge
) {
2226 // We do a little dance with locking because doing allocation (even in the
2227 // default szone) may cause something to get freed in this szone, with a
2229 // Returns 1 on success
2230 huge_entry_t
*new_huge_entries
= NULL
;
2233 unsigned num_huge_entries
;
2234 num_huge_entries
= szone
->num_huge_entries
;
2235 SZONE_UNLOCK(szone
);
2236 // malloc_printf("In huge_entry_append currentEntries=%d\n", num_huge_entries);
2237 if (new_huge_entries
) szone_free(szone
, new_huge_entries
);
2238 new_huge_entries
= szone_malloc(szone
, (num_huge_entries
+ 1) * sizeof(huge_entry_t
));
2239 if (new_huge_entries
== NULL
) return 0;
2241 if (num_huge_entries
== szone
->num_huge_entries
) {
2242 // No change - our malloc still applies
2243 huge_entry_t
*old_huge_entries
= szone
->huge_entries
;
2244 if (num_huge_entries
) {
2245 memcpy(new_huge_entries
, old_huge_entries
, num_huge_entries
* sizeof(huge_entry_t
));
2247 new_huge_entries
[szone
->num_huge_entries
++] = huge
;
2248 szone
->huge_entries
= new_huge_entries
;
2249 SZONE_UNLOCK(szone
);
2250 szone_free(szone
, old_huge_entries
);
2251 // malloc_printf("Done huge_entry_append now=%d\n", szone->num_huge_entries);
2258 static kern_return_t
2259 huge_in_use_enumerator(task_t task
, void *context
,
2260 unsigned type_mask
, vm_address_t huge_entries_address
, unsigned num_entries
,
2261 memory_reader_t reader
, vm_range_recorder_t recorder
) {
2262 huge_entry_t
*entries
;
2264 err
= reader(task
, huge_entries_address
, sizeof(huge_entry_t
) * num_entries
,
2266 if (err
) return err
;
2268 recorder(task
, context
,
2269 MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, entries
,
2276 large_and_huge_malloc(szone_t
*szone
, unsigned num_pages
) {
2277 vm_address_t addr
= 0;
2278 vm_range_t range_to_deallocate
;
2279 if (!num_pages
) num_pages
= 1; // minimal allocation size for this szone
2280 // malloc_printf("In large_and_huge_malloc for %y\n", num_pages * (1 << vm_page_shift));
2281 range_to_deallocate
.size
= 0;
2282 if (num_pages
>= (1 << vm_page_shift
)) {
2284 huge
.size
= num_pages
<< vm_page_shift
;
2285 addr
= allocate_pages(szone
, huge
.size
, 0, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_HUGE
));
2286 if (!addr
) return NULL
;
2287 huge
.address
= addr
;
2288 if (! huge_entry_append(szone
, huge
)) return NULL
;
2290 szone
->num_bytes_in_huge_objects
+= huge
.size
;
2292 vm_size_t size
= num_pages
<< vm_page_shift
;
2293 large_entry_t entry
;
2294 addr
= allocate_pages(szone
, size
, 0, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
2296 if (LOG(szone
, addr
)) malloc_printf("In szone_malloc true large allocation at %p for %y\n", (void *)addr
, size
);
2300 SZONE_UNLOCK(szone
);
2304 if (large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
2305 malloc_printf("Freshly allocated is already in use: %p\n", addr
);
2306 large_debug_print(szone
);
2310 if ((szone
->num_large_objects_in_use
+ 1) * 4 > szone
->num_large_entries
) {
2311 // density of hash table too high; grow table
2312 // we do that under lock to avoid a race
2313 // malloc_printf("In szone_malloc growing hash table current=%d\n", szone->num_large_entries);
2314 large_entries_grow_no_lock(szone
, &range_to_deallocate
);
2316 // malloc_printf("Inserting large entry (%p, %y)\n", addr, num_pages * (1 << vm_page_shift));
2317 entry
.address_and_num_pages
= addr
| num_pages
;
2319 if (large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
2320 malloc_printf("Entry about to be added already in use: %p\n",
2322 large_debug_print(szone
);
2326 large_entry_insert_no_lock(szone
, entry
);
2328 if (!large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
2329 malloc_printf("Can't find entry just added\n");
2330 large_debug_print(szone
);
2334 // malloc_printf("Inserted large entry (%p, %d pages)\n", addr,
2336 szone
->num_large_objects_in_use
++;
2337 szone
->num_bytes_in_large_objects
+= size
;
2339 SZONE_UNLOCK(szone
);
2340 if (range_to_deallocate
.size
) {
2341 deallocate_pages(szone
, range_to_deallocate
.address
, range_to_deallocate
.size
, 0); // we deallocate outside the lock
2342 // malloc_printf("Deallocated large entries %d\n", range_to_deallocate.size);
2344 return (void *)addr
;
2348 free_large_or_huge(szone_t
*szone
, void *ptr
) {
2349 // We have established ptr is page-aligned and not tiny nor small
2350 large_entry_t
*entry
;
2351 vm_range_t vm_range_to_deallocate
;
2354 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2356 // malloc_printf("Ready for deallocation [%p-%y]\n", LARGE_ENTRY_ADDRESS(*entry), LARGE_ENTRY_SIZE(*entry));
2357 vm_range_to_deallocate
= large_free_no_lock(szone
, entry
);
2359 if (large_entry_for_pointer_no_lock(szone
, ptr
)) {
2360 malloc_printf("*** malloc[%d]: Just after freeing %p still in use num_large_entries=%d\n", getpid(), ptr
, szone
->num_large_entries
);
2361 large_debug_print(szone
);
2365 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
2366 vm_range_to_deallocate
= *huge
;
2367 *huge
= szone
->huge_entries
[--szone
->num_huge_entries
]; // last entry fills that spot
2368 szone
->num_bytes_in_huge_objects
-= vm_range_to_deallocate
.size
;
2371 large_debug_print(szone
);
2373 szone_error(szone
, "Pointer being freed was not allocated", ptr
);
2376 SZONE_UNLOCK(szone
); // we release the lock asap
2377 CHECK(szone
, __PRETTY_FUNCTION__
);
2378 // we deallocate_pages, including guard pages
2379 if (vm_range_to_deallocate
.address
) {
2380 // malloc_printf("About to deallocate %p size %y\n", vm_range_to_deallocate.address, vm_range_to_deallocate.size);
2382 if (large_entry_for_pointer_no_lock(szone
,
2383 (void *)vm_range_to_deallocate
.address
)) {
2384 malloc_printf("*** malloc[%d]: Invariant broken: %p still in use num_large_entries=%d\n", getpid(), vm_range_to_deallocate
.address
, szone
->num_large_entries
);
2385 large_debug_print(szone
);
2389 deallocate_pages(szone
, vm_range_to_deallocate
.address
, vm_range_to_deallocate
.size
, 0);
2394 try_realloc_large_or_huge_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) {
2395 vm_address_t addr
= (vm_address_t
)ptr
+ old_size
;
2396 large_entry_t
*entry
;
2399 if (old_size
!= ((old_size
>> vm_page_shift
) << vm_page_shift
)) {
2400 malloc_printf("*** old_size is %d\n", old_size
);
2403 // malloc_printf("=== Trying (1) to extend %p from %d to %d\n", ptr, old_size, new_size);
2405 entry
= large_entry_for_pointer_no_lock(szone
, (void *)addr
);
2406 SZONE_UNLOCK(szone
);
2408 return 0; // large pointer already exist in table - extension is not going to work
2410 new_size
= round_page(new_size
);
2411 // malloc_printf("=== Trying (2) to extend %p from %d to %d\n", ptr, old_size, new_size);
2413 * Ask for allocation at a specific address, and mark as realloc
2414 * to request coalescing with previous realloc'ed extensions.
2416 err
= vm_allocate(mach_task_self(), &addr
, new_size
- old_size
, VM_MAKE_TAG(VM_MEMORY_REALLOC
));
2417 if (err
!= KERN_SUCCESS
) {
2422 * If the new size is still under the large/huge threshold, we can just
2423 * extend the existing large block.
2425 * Note: this logic is predicated on the understanding that an allocated
2426 * block can never really shrink, so that the new size will always be
2427 * larger than the old size.
2429 if ((new_size
>> vm_page_shift
) < (1 << vm_page_shift
)) {
2430 /* extend existing large entry */
2431 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2433 szone_error(szone
, "large entry reallocated is not properly in table", ptr
);
2434 /* XXX will cause fault on next reference to entry */
2436 entry
->address_and_num_pages
= (vm_address_t
)ptr
| (new_size
>> vm_page_shift
);
2437 szone
->num_bytes_in_large_objects
+= new_size
- old_size
;
2438 } else if ((old_size
>> vm_page_shift
) >= (1 << vm_page_shift
)) {
2439 /* extend existing huge entry */
2440 huge_entry_t
*huge_entry
= huge_entry_for_pointer_no_lock(szone
, ptr
);
2442 szone_error(szone
, "huge entry reallocated is not properly in table", ptr
);
2443 /* XXX will cause fault on next reference to huge_entry */
2445 huge_entry
->size
= new_size
;
2446 szone
->num_bytes_in_huge_objects
+= new_size
- old_size
;
2448 /* need to convert large entry to huge entry */
2451 /* release large entry, note we still have the VM allocation */
2452 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2453 large_entry_t saved_entry
= *entry
; // in case we need to put it back
2454 large_free_no_lock(szone
, entry
);
2455 szone
->num_bytes_in_large_objects
-= old_size
;
2457 /* and get a huge entry */
2458 huge
.address
= (vm_address_t
)ptr
;
2459 huge
.size
= new_size
; /* fix up size */
2460 SZONE_UNLOCK(szone
);
2461 if (huge_entry_append(szone
, huge
)) {
2462 szone
->num_bytes_in_huge_objects
+= new_size
;
2463 return 1; // success!
2466 // we leak memory (the extra space appended) but data structures are correct
2467 large_entry_insert_no_lock(szone
, saved_entry
); // this will reinsert the large entry
2469 // malloc_printf("=== Successfully reallocated at end of %p from %d to %d\n", ptr, old_size, new_size);
2470 SZONE_UNLOCK(szone
); // we release the lock asap
2474 /********************* Zone call backs ************************/
2477 szone_free(szone_t
*szone
, void *ptr
) {
2478 // malloc_printf("szone_free(%p)\n", ptr);
2480 if (LOG(szone
, ptr
)) malloc_printf("In szone_free with %p\n", ptr
);
2483 if ((vm_address_t
)ptr
& (TINY_QUANTUM
- 1)) {
2484 szone_error(szone
, "Non-aligned pointer being freed", ptr
);
2487 // try a tiny pointer
2488 tiny_region_t
*tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
2490 free_tiny(szone
, ptr
, tiny_region
);
2493 if ((vm_address_t
)ptr
& (SMALL_QUANTUM
- 1)) {
2494 szone_error(szone
, "Non-aligned pointer being freed (2)", ptr
);
2497 // try a small pointer
2498 small_region_t
*small_region
= small_region_for_ptr_no_lock(szone
, ptr
);
2500 free_small(szone
, ptr
, small_region
);
2503 if (((unsigned)ptr
) & ((1 << vm_page_shift
) - 1)) {
2504 szone_error(szone
, "Non-page-aligned, non-allocated pointer being freed", ptr
);
2507 free_large_or_huge(szone
, ptr
);
2510 static INLINE
void *
2511 szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
) {
2513 if (size
<= 31*TINY_QUANTUM
) {
2515 msize_t msize
= (size
+ TINY_QUANTUM
- 1) >> SHIFT_TINY_QUANTUM
;
2516 if (! msize
) msize
= 1;
2517 ptr
= tiny_malloc_should_clear(szone
, msize
, cleared_requested
);
2518 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
2520 msize_t msize
= (size
+ SMALL_QUANTUM
- 1) >> SHIFT_SMALL_QUANTUM
;
2521 if (! msize
) msize
= 1;
2522 ptr
= small_malloc_should_clear(szone
, msize
, cleared_requested
);
2525 num_pages
= round_page(size
) >> vm_page_shift
;
2526 ptr
= large_and_huge_malloc(szone
, num_pages
);
2529 if (LOG(szone
, ptr
)) malloc_printf("szone_malloc returned %p\n", ptr
);
2535 szone_malloc(szone_t
*szone
, size_t size
) {
2536 // malloc_printf("szone_malloc(%d)\n", size);
2537 void *ptr
= szone_malloc_should_clear(szone
, size
, 0);
2538 // malloc_printf("szone_malloc(%d) -> %p %d\n", size, ptr, malloc_size(ptr));
2543 szone_calloc(szone_t
*szone
, size_t num_items
, size_t size
) {
2544 // malloc_printf("szone_calloc(%d,%d)\n", num_items, size);
2545 void *ptr
= szone_malloc_should_clear(szone
, num_items
* size
, 1);
2546 // malloc_printf("szone_calloc(%d,%d) -> %p\n", num_items, size, ptr);
2551 szone_valloc(szone_t
*szone
, size_t size
) {
2554 num_pages
= round_page(size
) >> vm_page_shift
;
2555 ptr
= large_and_huge_malloc(szone
, num_pages
);
2557 if (LOG(szone
, ptr
)) malloc_printf("szone_valloc returned %p\n", ptr
);
2563 szone_size(szone_t
*szone
, const void *ptr
) {
2565 large_entry_t
*entry
;
2567 // malloc_printf("szone_size(%p)\n", ptr);
2570 if (LOG(szone
, ptr
)) {
2571 malloc_printf("In szone_size for %p (szone=%p)\n", ptr
, szone
);
2574 if ((vm_address_t
)ptr
& (TINY_QUANTUM
- 1)) return 0;
2576 tiny_region_t
*tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
2578 // this is indeed a valid pointer
2580 msize_t msize
= get_tiny_meta_header(ptr
, &is_free
);
2581 return (is_free
) ? 0 : msize
<< SHIFT_TINY_QUANTUM
;
2583 if ((vm_address_t
)ptr
& (SMALL_QUANTUM
- 1)) return 0;
2585 small_region_t
*small_region
= small_region_for_ptr_no_lock(szone
, ptr
);
2587 // this is indeed a valid pointer
2588 msize_t msize_and_free
= small_meta_header(ptr
)[0];
2589 return (msize_and_free
& SMALL_IS_FREE
) ? 0 : msize_and_free
<< SHIFT_SMALL_QUANTUM
;
2591 if (((unsigned)ptr
) & ((1 << vm_page_shift
) - 1)) {
2592 // malloc_printf("Object %p not found in szone_size\n", ptr);
2596 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2598 size
= LARGE_ENTRY_SIZE(*entry
);
2599 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
2602 SZONE_UNLOCK(szone
);
2603 // malloc_printf("szone_size for large/huge %p returned %d\n", ptr, (unsigned)size);
2605 if (LOG(szone
, ptr
)) {
2606 malloc_printf("szone_size for %p returned %d\n", ptr
, (unsigned)size
);
2613 szone_realloc(szone_t
*szone
, void *ptr
, size_t new_size
) {
2614 size_t old_size
= 0;
2616 // malloc_printf("szone_realloc(%p,%d)\n", ptr, new_size);
2618 if (LOG(szone
, ptr
)) {
2619 malloc_printf("In szone_realloc for %p, %d\n", ptr
, (unsigned)new_size
);
2623 ptr
= szone_malloc(szone
, new_size
);
2624 // malloc_printf("szone_realloc(%p,%d) -> %p\n", ptr, new_size, ptr);
2627 old_size
= szone_size(szone
, ptr
);
2629 szone_error(szone
, "Pointer being reallocated was not allocated", ptr
);
2632 /* we never shrink an allocation */
2633 if (old_size
>= new_size
) return ptr
;
2634 if ((new_size
+ TINY_QUANTUM
- 1) <= 31 * TINY_QUANTUM
) {
2635 // We now try to realloc in place
2636 if (try_realloc_tiny_in_place(szone
, ptr
, old_size
, new_size
)) {
2637 // malloc_printf("szone_realloc(%p,%d) -> %p\n", ptr, new_size, ptr);
2640 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && ((new_size
+ SMALL_QUANTUM
- 1) < LARGE_THRESHOLD
) && (old_size
> 31 * TINY_QUANTUM
)) {
2641 // We now try to realloc in place
2642 if (try_realloc_small_in_place(szone
, ptr
, old_size
, new_size
)) {
2643 // malloc_printf("szone_realloc(%p,%d) small in place -> %p\n", ptr, new_size, ptr);
2646 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (old_size
> LARGE_THRESHOLD
)) {
2647 if (try_realloc_large_or_huge_in_place(szone
, ptr
, old_size
, new_size
)) {
2651 new_ptr
= szone_malloc(szone
, new_size
);
2652 if (new_ptr
== NULL
) return NULL
;
2653 if ((old_size
> VM_COPY_THRESHOLD
) && (new_size
> VM_COPY_THRESHOLD
)) {
2654 // we know everything is page-aligned try vm_copy
2655 kern_return_t err
= 0;
2656 err
= vm_copy(mach_task_self(), (vm_address_t
)ptr
, old_size
, (vm_address_t
)new_ptr
);
2658 szone_error(szone
, "Can't vm_copy region", ptr
);
2661 memcpy(new_ptr
, ptr
, old_size
);
2663 szone_free(szone
, ptr
);
2665 if (LOG(szone
, ptr
)) {
2666 malloc_printf("szone_realloc returned %p for %d\n", new_ptr
, (unsigned)new_size
);
2669 // malloc_printf("szone_realloc(%p,%d) -> %p\n", ptr, new_size, new_ptr);
2674 szone_batch_malloc(szone_t
*szone
, size_t size
, void **results
, unsigned count
) {
2675 // given a size, returns pointers capable of holding that size
2676 // returns the number of pointers allocated
2677 // may return 0 - this function will do best attempts, but just that
2678 // malloc_printf("In szone_batch_malloc(%d, %d)\n", size, count);
2679 if (size
> 31*TINY_QUANTUM
) return 0; // only bother implementing this for tiny
2680 msize_t msize
= (size
+ TINY_QUANTUM
- 1) >> SHIFT_TINY_QUANTUM
;
2681 if (! msize
) msize
= 1;
2682 size_t chunk_size
= msize
<< SHIFT_TINY_QUANTUM
;
2684 CHECK(szone
, __PRETTY_FUNCTION__
);
2685 SZONE_LOCK(szone
); // might as well lock right here to avoid concurrency issues
2686 free_list_t
**free_list
= szone
->tiny_free_list
+ msize
- 1;
2687 free_list_t
*ptr
= *free_list
;
2688 while (found
< count
) {
2690 *results
++ = ptr
; found
++;
2691 set_tiny_meta_header_in_use(ptr
, msize
);
2692 ptr
= ((free_list_t
*)ptr
)->next
;
2695 ((free_list_t
*)ptr
)->previous
= NULL
;
2696 free_list_set_checksum(szone
, (free_list_t
*)ptr
);
2698 *free_list
= (void *)ptr
;
2699 // Note that we could allocate from the free lists for larger msize
2700 // But that may un-necessarily fragment - so we might as well let the client do that
2701 // We could also allocate from szone->tiny_bytes_free_at_end
2702 // But that means we'll "eat-up" the untouched area faster, increasing the working set
2703 // So we just return what we have and just that
2704 szone
->num_tiny_objects
+= found
;
2705 szone
->num_bytes_in_tiny_objects
+= chunk_size
* found
;
2706 SZONE_UNLOCK(szone
);
2707 // malloc_printf("In szone_batch_malloc(%d, %d) -> %d\n", size, count, found);
2712 szone_batch_free(szone_t
*szone
, void **to_be_freed
, unsigned count
) {
2713 // frees all the pointers in to_be_freed
2714 // note that to_be_freed may be overwritten during the process
2716 // malloc_printf("Freeing %d items\n", count);
2718 CHECK(szone
, __PRETTY_FUNCTION__
);
2720 while (cc
< count
) {
2721 void *ptr
= to_be_freed
[cc
];
2722 tiny_region_t
*tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
2724 // this is a tiny pointer
2726 msize_t msize
= get_tiny_meta_header(ptr
, &is_free
);
2727 if (is_free
) break; // a double free; let the standard free deal with it
2728 tiny_free_no_lock(szone
, tiny_region
, ptr
, msize
);
2729 to_be_freed
[cc
] = NULL
;
2733 SZONE_UNLOCK(szone
);
2734 CHECK(szone
, __PRETTY_FUNCTION__
);
2736 void *ptr
= to_be_freed
[count
];
2737 // malloc_printf("Freeing item at %d: %p\n", count, ptr);
2738 if (ptr
) szone_free(szone
, ptr
);
2743 szone_destroy(szone_t
*szone
) {
2745 small_region_t pended_region
= 0;
2746 index
= szone
->num_large_entries
;
2748 large_entry_t
*entry
= szone
->large_entries
+ index
;
2749 if (!LARGE_ENTRY_IS_EMPTY(*entry
)) {
2750 large_entry_t range
;
2752 // we deallocate_pages, including guard pages
2753 deallocate_pages(szone
, LARGE_ENTRY_ADDRESS(range
), LARGE_ENTRY_SIZE(range
), szone
->debug_flags
);
2756 if (szone
->num_large_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
) {
2757 vm_range_t range_to_deallocate
;
2758 large_entries_free_no_lock(szone
, szone
->large_entries
, szone
->num_large_entries
, &range_to_deallocate
); // we do not free in the small chunk case
2759 if (range_to_deallocate
.size
) deallocate_pages(szone
, range_to_deallocate
.address
, range_to_deallocate
.size
, 0);
2762 index
= szone
->num_huge_entries
;
2764 huge_entry_t
*huge
= szone
->huge_entries
+ index
;
2765 deallocate_pages(szone
, huge
->address
, huge
->size
, szone
->debug_flags
);
2768 index
= szone
->num_tiny_regions
;
2770 tiny_region_t tiny_region
= szone
->tiny_regions
[index
];
2771 vm_size_t size_allocated
= ((TINY_REGION_SIZE
+ (1 << vm_page_shift
) - 1) >> vm_page_shift
) << vm_page_shift
;
2772 deallocate_pages(szone
, TINY_REGION_ADDRESS(tiny_region
), size_allocated
, 0);
2774 // and now we free regions, with regions[0] as the last one (the final harakiri)
2775 index
= szone
->num_small_regions
;
2777 small_region_t region
= szone
->small_regions
[index
];
2779 && (void *)szone
->small_regions
>= (void *)(SMALL_REGION_ADDRESS(region
))
2780 && (void *)szone
->small_regions
< (void *)(SMALL_REGION_END(region
))) {
2781 // Pend deallocation of this region, since the region
2782 // bookkeeping array is in it.
2783 pended_region
= region
;
2785 deallocate_pages(szone
, SMALL_REGION_ADDRESS(region
), SMALL_REGION_SIZE
, 0);
2788 if (pended_region
) {
2789 deallocate_pages(szone
, SMALL_REGION_ADDRESS(pended_region
), SMALL_REGION_SIZE
, 0);
2794 szone_good_size(szone_t
*szone
, size_t size
) {
2795 if (size
<= 31 * TINY_QUANTUM
) {
2797 msize_t msize
= (size
+ TINY_QUANTUM
- 1) >> SHIFT_TINY_QUANTUM
;
2798 if (! msize
) msize
= 1;
2799 return msize
<< SHIFT_TINY_QUANTUM
;
2801 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
2803 msize_t msize
= (size
+ SMALL_QUANTUM
- 1) >> SHIFT_SMALL_QUANTUM
;
2804 if (! msize
) msize
= 1;
2805 return msize
<< SHIFT_SMALL_QUANTUM
;
2808 num_pages
= round_page(size
) >> vm_page_shift
;
2809 if (!num_pages
) num_pages
= 1; // minimal allocation size for this
2810 return num_pages
<< vm_page_shift
;
2814 unsigned szone_check_counter
= 0;
2815 unsigned szone_check_start
= 0;
2816 unsigned szone_check_modulo
= 1;
2819 szone_check_all(szone_t
*szone
, const char *function
) {
2822 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
2823 while (index
< szone
->num_tiny_regions
) {
2824 tiny_region_t
*region
= szone
->tiny_regions
+ index
++;
2825 if (! szone_check_tiny_region(szone
, region
)) {
2826 SZONE_UNLOCK(szone
);
2827 szone
->debug_flags
&= ~ CHECK_REGIONS
;
2828 malloc_printf("*** malloc[%d]: Tiny region %d incorrect szone_check_all(%s) counter=%d\n", getpid(), index
-1, function
, szone_check_counter
);
2829 szone_error(szone
, "Check: tiny region incorrect", NULL
);
2835 while (index
< NUM_TINY_SLOTS
) {
2836 if (! tiny_free_list_check(szone
, index
)) {
2837 SZONE_UNLOCK(szone
);
2838 szone
->debug_flags
&= ~ CHECK_REGIONS
;
2839 malloc_printf("*** malloc[%d]: Tiny free list incorrect (slot=%d) szone_check_all(%s) counter=%d\n", getpid(), index
, function
, szone_check_counter
);
2840 szone_error(szone
, "Check: tiny free list incorrect", NULL
);
2846 index
= 0; while (index
< szone
->num_small_regions
) {
2847 small_region_t
*region
= szone
->small_regions
+ index
++;
2848 if (! szone_check_small_region(szone
, region
)) {
2849 SZONE_UNLOCK(szone
);
2850 szone
->debug_flags
&= ~ CHECK_REGIONS
;
2851 malloc_printf("*** malloc[%d]: Small region %d incorrect szone_check_all(%s) counter=%d\n", getpid(), index
-1, function
, szone_check_counter
);
2852 szone_error(szone
, "Check: small region incorrect", NULL
);
2857 while (index
< NUM_SMALL_SLOTS
) {
2858 if (! small_free_list_check(szone
, index
)) {
2859 SZONE_UNLOCK(szone
);
2860 szone
->debug_flags
&= ~ CHECK_REGIONS
;
2861 malloc_printf("*** malloc[%d]: Small free list incorrect (grain=%d) szone_check_all(%s) counter=%d\n", getpid(), index
, function
, szone_check_counter
);
2862 szone_error(szone
, "Check: small free list incorrect", NULL
);
2867 SZONE_UNLOCK(szone
);
2868 // szone_print(szone, 1);
2873 szone_check(szone_t
*szone
) {
2874 if (! (++szone_check_counter
% 10000)) {
2875 malloc_printf("At szone_check counter=%d\n", szone_check_counter
);
2877 if (szone_check_counter
< szone_check_start
) return 1;
2878 if (szone_check_counter
% szone_check_modulo
) return 1;
2879 return szone_check_all(szone
, "");
2882 static kern_return_t
2883 szone_ptr_in_use_enumerator(task_t task
, void *context
,
2884 unsigned type_mask
, vm_address_t zone_address
, memory_reader_t reader
,
2885 vm_range_recorder_t recorder
) {
2888 if (!reader
) reader
= _szone_default_reader
;
2889 // malloc_printf("Enumerator for zone %p\n", zone_address);
2890 err
= reader(task
, zone_address
, sizeof(szone_t
), (void **)&szone
);
2891 if (err
) return err
;
2892 // malloc_printf("Tiny ptrs enumeration for zone %p\n", zone_address);
2893 err
= tiny_in_use_enumerator(task
, context
, type_mask
,
2894 (vm_address_t
)szone
->tiny_regions
, szone
->num_tiny_regions
, szone
->tiny_bytes_free_at_end
, reader
, recorder
);
2895 if (err
) return err
;
2896 // malloc_printf("Small ptrs enumeration for zone %p\n", zone_address);
2897 err
= small_in_use_enumerator(task
, context
, type_mask
,
2898 (vm_address_t
)szone
->small_regions
, szone
->num_small_regions
, szone
->small_bytes_free_at_end
, reader
, recorder
);
2899 if (err
) return err
;
2900 // malloc_printf("Large ptrs enumeration for zone %p\n", zone_address);
2901 err
= large_in_use_enumerator(task
, context
, type_mask
,
2902 (vm_address_t
)szone
->large_entries
, szone
->num_large_entries
, reader
,
2904 if (err
) return err
;
2905 // malloc_printf("Huge ptrs enumeration for zone %p\n", zone_address);
2906 err
= huge_in_use_enumerator(task
, context
, type_mask
,
2907 (vm_address_t
)szone
->huge_entries
, szone
->num_huge_entries
, reader
,
2912 // Following method is deprecated: use scalable_zone_statistics instead
2914 scalable_zone_info(malloc_zone_t
*zone
, unsigned *info_to_fill
, unsigned count
) {
2915 szone_t
*szone
= (void *)zone
;
2917 // We do not lock to facilitate debug
2918 info
[4] = szone
->num_tiny_objects
;
2919 info
[5] = szone
->num_bytes_in_tiny_objects
;
2920 info
[6] = szone
->num_small_objects
;
2921 info
[7] = szone
->num_bytes_in_small_objects
;
2922 info
[8] = szone
->num_large_objects_in_use
;
2923 info
[9] = szone
->num_bytes_in_large_objects
;
2924 info
[10] = szone
->num_huge_entries
;
2925 info
[11] = szone
->num_bytes_in_huge_objects
;
2926 info
[12] = szone
->debug_flags
;
2927 info
[0] = info
[4] + info
[6] + info
[8] + info
[10];
2928 info
[1] = info
[5] + info
[7] + info
[9] + info
[11];
2929 info
[3] = szone
->num_tiny_regions
* TINY_REGION_SIZE
+ szone
->num_small_regions
* SMALL_REGION_SIZE
+ info
[9] + info
[11];
2930 info
[2] = info
[3] - szone
->tiny_bytes_free_at_end
- szone
->small_bytes_free_at_end
;
2931 memcpy(info_to_fill
, info
, sizeof(unsigned)*count
);
2935 szone_print(szone_t
*szone
, boolean_t verbose
) {
2939 scalable_zone_info((void *)szone
, info
, 13);
2940 malloc_printf("Scalable zone %p: inUse=%d(%y) touched=%y allocated=%y flags=%d\n", szone
, info
[0], info
[1], info
[2], info
[3], info
[12]);
2941 malloc_printf("\ttiny=%d(%y) small=%d(%y) large=%d(%y) huge=%d(%y)\n", info
[4], info
[5], info
[6], info
[7], info
[8], info
[9], info
[10], info
[11]);
2943 malloc_printf("%d tiny regions: \n", szone
->num_tiny_regions
);
2944 while (index
< szone
->num_tiny_regions
) {
2945 tiny_region_t
*region
= szone
->tiny_regions
+ index
;
2946 print_tiny_region(verbose
, *region
, (index
== szone
->num_tiny_regions
- 1) ? szone
->tiny_bytes_free_at_end
: 0);
2949 if (verbose
) print_tiny_free_list(szone
);
2951 malloc_printf("%d small regions: \n", szone
->num_small_regions
);
2953 while (index
< szone
->num_small_regions
) {
2954 small_region_t
*region
= szone
->small_regions
+ index
;
2955 print_small_region(szone
, verbose
, region
, (index
== szone
->num_small_regions
- 1) ? szone
->small_bytes_free_at_end
: 0);
2958 if (verbose
) print_small_free_list(szone
);
2959 SZONE_UNLOCK(szone
);
2963 szone_log(malloc_zone_t
*zone
, void *log_address
) {
2964 szone_t
*szone
= (void *)zone
;
2965 szone
->log_address
= log_address
;
2969 szone_force_lock(szone_t
*szone
) {
2970 // malloc_printf("szone_force_lock\n");
2975 szone_force_unlock(szone_t
*szone
) {
2976 // malloc_printf("szone_force_unlock\n");
2977 SZONE_UNLOCK(szone
);
2981 scalable_zone_statistics(malloc_zone_t
*zone
, malloc_statistics_t
*stats
, unsigned subzone
) {
2982 szone_t
*szone
= (void *)zone
;
2985 stats
->blocks_in_use
= szone
->num_tiny_objects
;
2986 stats
->size_in_use
= szone
->num_bytes_in_tiny_objects
;
2987 stats
->size_allocated
= szone
->num_tiny_regions
* TINY_REGION_SIZE
;
2988 stats
->max_size_in_use
= stats
->size_allocated
- szone
->tiny_bytes_free_at_end
;
2991 stats
->blocks_in_use
= szone
->num_small_objects
;
2992 stats
->size_in_use
= szone
->num_bytes_in_small_objects
;
2993 stats
->size_allocated
= szone
->num_small_regions
* SMALL_REGION_SIZE
;
2994 stats
->max_size_in_use
= stats
->size_allocated
- szone
->small_bytes_free_at_end
;
2997 stats
->blocks_in_use
= szone
->num_large_objects_in_use
;
2998 stats
->size_in_use
= szone
->num_bytes_in_large_objects
;
2999 stats
->max_size_in_use
= stats
->size_allocated
= stats
->size_in_use
;
3002 stats
->blocks_in_use
= szone
->num_huge_entries
;
3003 stats
->size_in_use
= szone
->num_bytes_in_huge_objects
;
3004 stats
->max_size_in_use
= stats
->size_allocated
= stats
->size_in_use
;
3011 szone_statistics(szone_t
*szone
, malloc_statistics_t
*stats
) {
3012 stats
->blocks_in_use
= szone
->num_tiny_objects
+ szone
->num_small_objects
+ szone
->num_large_objects_in_use
+ szone
->num_huge_entries
;
3013 size_t big_and_huge
= szone
->num_bytes_in_large_objects
+ szone
->num_bytes_in_huge_objects
;
3014 stats
->size_in_use
= szone
->num_bytes_in_tiny_objects
+ szone
->num_bytes_in_small_objects
+ big_and_huge
;
3015 stats
->max_size_in_use
= stats
->size_allocated
= szone
->num_tiny_regions
* TINY_REGION_SIZE
+ szone
->num_small_regions
* SMALL_REGION_SIZE
+ big_and_huge
;
3016 // Now we account for the untouched areas
3017 stats
->max_size_in_use
-= szone
->tiny_bytes_free_at_end
;
3018 stats
->max_size_in_use
-= szone
->small_bytes_free_at_end
;
3021 static const struct malloc_introspection_t szone_introspect
= {
3022 (void *)szone_ptr_in_use_enumerator
,
3023 (void *)szone_good_size
,
3024 (void *)szone_check
,
3025 (void *)szone_print
,
3027 (void *)szone_force_lock
,
3028 (void *)szone_force_unlock
,
3029 (void *)szone_statistics
3030 }; // marked as const to spare the DATA section
3033 create_scalable_zone(size_t initial_size
, unsigned debug_flags
) {
3037 size_t msize_used
= 0;
3038 // malloc_printf("=== create_scalable_zone(%d,%d) - %s\n", initial_size, debug_flags, (DEBUG_MALLOC) ? "**** DEBUG" : "");
3040 if ((1 << vm_page_shift
) == vm_page_size
) {
3041 // malloc_printf("vm_page_shift validated to be %d\n", vm_page_shift);
3043 malloc_printf("*** vm_page_shift incorrectly set to %d\n", vm_page_shift
);
3047 if (!vm_page_shift
) {
3049 vm_page_shift
= 12; // the minimal for page sizes
3050 page
= 1 << vm_page_shift
;
3051 while (page
!= vm_page_size
) { page
+= page
; vm_page_shift
++;};
3054 addr
= allocate_pages(NULL
, SMALL_REGION_SIZE
, SMALL_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC
));
3055 if (!addr
) return NULL
;
3056 szone
= (void *)addr
;
3057 msize
= (sizeof(szone_t
) + SMALL_QUANTUM
- 1) >> SHIFT_SMALL_QUANTUM
;
3058 // malloc_printf("sizeof(szone_t)=%d msize for 1st block=%d; wasted %d bytes\n", sizeof(szone_t), msize, (msize << SHIFT_SMALL_QUANTUM) - sizeof(szone_t));
3059 small_meta_header(szone
)[0] = msize
;
3060 szone
->tiny_regions
= szone
->initial_tiny_regions
;
3061 szone
->small_regions
= szone
->initial_small_regions
;
3062 msize_used
+= msize
; szone
->num_small_objects
++;
3063 szone
->basic_zone
.version
= 3;
3064 szone
->basic_zone
.size
= (void *)szone_size
;
3065 szone
->basic_zone
.malloc
= (void *)szone_malloc
;
3066 szone
->basic_zone
.calloc
= (void *)szone_calloc
;
3067 szone
->basic_zone
.valloc
= (void *)szone_valloc
;
3068 szone
->basic_zone
.free
= (void *)szone_free
;
3069 szone
->basic_zone
.realloc
= (void *)szone_realloc
;
3070 szone
->basic_zone
.destroy
= (void *)szone_destroy
;
3071 szone
->basic_zone
.batch_malloc
= (void *)szone_batch_malloc
;
3072 szone
->basic_zone
.batch_free
= (void *)szone_batch_free
;
3073 szone
->basic_zone
.introspect
= (struct malloc_introspection_t
*)&szone_introspect
;
3074 LOCK_INIT(szone
->lock
);
3076 #warning CHECK_REGIONS enabled
3077 debug_flags
|= CHECK_REGIONS
;
3080 #warning LOG enabled
3081 szone
->log_address
= ~0;
3083 szone
->debug_flags
= debug_flags
;
3084 szone
->small_regions
[0] = addr
>> SMALL_BLOCKS_ALIGN
;
3085 szone
->num_small_regions
= 1;
3086 msize_t free_msize
= NUM_SMALL_BLOCKS
- msize
;
3087 small_meta_header(szone
)[msize
] = free_msize
;
3088 szone
->small_bytes_free_at_end
= free_msize
<< SHIFT_SMALL_QUANTUM
;
3089 CHECK(szone
, __PRETTY_FUNCTION__
);
3091 write(1, "Malloc szone created\n", 23);
3093 return (malloc_zone_t
*)szone
;
3096 /********* Support code for emacs unexec ************/
3098 /* History of freezedry version numbers:
3100 * 1) Old malloc (before the scalable malloc implementation in this file
3102 * 2) Original freezedrying code for scalable malloc. This code was apparently
3103 * based on the old freezedrying code and was fundamentally flawed in its
3104 * assumption that tracking allocated memory regions was adequate to fake
3105 * operations on freezedried memory. This doesn't work, since scalable
3106 * malloc does not store flags in front of large page-aligned allocations.
3107 * 3) Original szone-based freezedrying code.
3108 * 4) Fresher malloc with tiny zone
3110 * No version backward compatibility is provided, but the version number does
3111 * make it possible for malloc_jumpstart() to return an error if the application
3112 * was freezedried with an older version of malloc.
3114 #define MALLOC_FREEZEDRY_VERSION 4
3123 frozen_malloc(szone_t
*zone
, size_t new_size
) {
3124 return malloc(new_size
);
3128 frozen_calloc(szone_t
*zone
, size_t num_items
, size_t size
) {
3129 return calloc(num_items
, size
);
3133 frozen_valloc(szone_t
*zone
, size_t new_size
) {
3134 return valloc(new_size
);
3138 frozen_realloc(szone_t
*zone
, void *ptr
, size_t new_size
) {
3139 size_t old_size
= szone_size(zone
, ptr
);
3141 if (new_size
<= old_size
) {
3144 new_ptr
= malloc(new_size
);
3146 memcpy(new_ptr
, ptr
, old_size
);
3152 frozen_free(szone_t
*zone
, void *ptr
) {
3156 frozen_destroy(szone_t
*zone
) {
3159 /********* Pseudo-private API for emacs unexec ************/
3162 * malloc_freezedry() records all of the szones in use, so that they can be
3163 * partially reconstituted by malloc_jumpstart(). Due to the differences
3164 * between reconstituted memory regions and those created by the szone code,
3165 * care is taken not to reallocate from the freezedried memory, except in the
3166 * case of a non-growing realloc().
3168 * Due to the flexibility provided by the zone registration mechanism, it is
3169 * impossible to implement generic freezedrying for any zone type. This code
3170 * only handles applications that use the szone allocator, so malloc_freezedry()
3171 * returns 0 (error) if any non-szone zones are encountered.
3175 malloc_freezedry(void) {
3176 extern unsigned malloc_num_zones
;
3177 extern malloc_zone_t
**malloc_zones
;
3178 malloc_frozen
*data
;
3181 /* Allocate space in which to store the freezedry state. */
3182 data
= (malloc_frozen
*) malloc(sizeof(malloc_frozen
));
3184 /* Set freezedry version number so that malloc_jumpstart() can check for compatibility. */
3185 data
->version
= MALLOC_FREEZEDRY_VERSION
;
3187 /* Allocate the array of szone pointers. */
3188 data
->nszones
= malloc_num_zones
;
3189 data
->szones
= (szone_t
*) calloc(malloc_num_zones
, sizeof(szone_t
));
3191 /* Fill in the array of szone structures. They are copied rather than
3192 * referenced, since the originals are likely to be clobbered during malloc
3193 * initialization. */
3194 for (i
= 0; i
< malloc_num_zones
; i
++) {
3195 if (strcmp(malloc_zones
[i
]->zone_name
, "DefaultMallocZone")) {
3196 /* Unknown zone type. */
3201 memcpy(&data
->szones
[i
], malloc_zones
[i
], sizeof(szone_t
));
3208 malloc_jumpstart(int cookie
) {
3209 malloc_frozen
*data
= (malloc_frozen
*) cookie
;
3212 if (data
->version
!= MALLOC_FREEZEDRY_VERSION
) {
3213 /* Unsupported freezedry version. */
3217 for (i
= 0; i
< data
->nszones
; i
++) {
3218 /* Set function pointers. Even the functions that stay the same must be
3219 * set, since there are no guarantees that they will be mapped to the
3220 * same addresses. */
3221 data
->szones
[i
].basic_zone
.size
= (void *) szone_size
;
3222 data
->szones
[i
].basic_zone
.malloc
= (void *) frozen_malloc
;
3223 data
->szones
[i
].basic_zone
.calloc
= (void *) frozen_calloc
;
3224 data
->szones
[i
].basic_zone
.valloc
= (void *) frozen_valloc
;
3225 data
->szones
[i
].basic_zone
.free
= (void *) frozen_free
;
3226 data
->szones
[i
].basic_zone
.realloc
= (void *) frozen_realloc
;
3227 data
->szones
[i
].basic_zone
.destroy
= (void *) frozen_destroy
;
3228 data
->szones
[i
].basic_zone
.introspect
= (struct malloc_introspection_t
*)&szone_introspect
;
3230 /* Register the freezedried zone. */
3231 malloc_zone_register(&data
->szones
[i
].basic_zone
);