2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 /* Author: Bertrand Serlet, August 1999 */
25 #import "scalable_malloc.h"
29 #import <pthread_internals.h> // for spin lock
31 #include <mach/vm_statistics.h>
33 /********************* DEFINITIONS ************************/
35 static unsigned vm_page_shift
= 0; // guaranteed to be intialized by zone creation
37 #define DEBUG_MALLOC 0 // set to one to debug malloc itself
38 #define DEBUG_CLIENT 0 // set to one to help debug a nasty memory smasher
41 #warning DEBUG ENABLED
47 #define CHECK_REGIONS (1 << 31)
49 #define VM_COPY_THRESHOLD (40 * 1024)
50 // When all memory is touched after a copy, vm_copy() is always a lose
51 // But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages (on a G3/300MHz)
52 // This must be larger than LARGE_THRESHOLD
54 #define KILL_THRESHOLD (32 * 1024)
56 #define LARGE_THRESHOLD (3 * vm_page_size) // at or above this use "large"
58 #define SHIFT_QUANTUM 4 // Required for AltiVec
59 #define QUANTUM (1 << SHIFT_QUANTUM) // allocation quantum
60 #define MIN_BLOCK 1 // minimum size, in QUANTUM multiples
62 /* The header of a small block in use contains its size (expressed as multiples of QUANTUM, and header included), or 0;
63 If 0 then the block is either free (in which case the size is directly at the block itself), or the last block (indicated by either being beyond range, or having 0 in the block itself) */
65 #define PTR_HEADER_SIZE (sizeof(msize_t))
66 #define FOLLOWING_PTR(ptr,msize) (((char *)(ptr)) + ((msize) << SHIFT_QUANTUM))
67 #define PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-2]
69 #define THIS_FREE 0x8000 // indicates this block is free
70 #define PREV_FREE 0x4000 // indicates previous block is free
71 #define MSIZE_FLAGS_FOR_PTR(ptr) (((msize_t *)(ptr))[-1])
73 #define REGION_SIZE (1 << (16 - 2 + SHIFT_QUANTUM)) // since we only have 16 bits for msize_t, and 1 bit is taken by THIS_FREE, and 1 by PREV_FREE
75 #define INITIAL_NUM_REGIONS 8 // must always be at least 2 to always have 1 slot empty
77 #define CHECKSUM_MAGIC 0x357B
79 #define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use
81 #define MAX_RECORDER_BUFFER 256
85 typedef unsigned short msize_t
; // a size in multiples of SHIFT_QUANTUM
94 unsigned address_and_num_pages
;
95 // this type represents both an address and a number of pages
96 // the low bits are the number of pages
97 // the high bits are the address
98 // note that the exact number of bits used for depends on the page size
99 // also, this cannot represent pointers larger than 1 << (vm_page_shift * 2)
102 typedef vm_address_t region_t
;
104 typedef compact_range_t large_entry_t
;
106 typedef vm_range_t huge_entry_t
;
108 typedef unsigned short grain_t
;
111 malloc_zone_t basic_zone
;
113 unsigned debug_flags
;
116 /* Regions for small objects */
117 unsigned num_regions
;
119 // this array is always created with 1 extra slot to be able to add a region without taking memory right away
120 unsigned last_region_hit
;
121 free_list_t
*free_list
[MAX_GRAIN
];
122 unsigned num_bytes_free_in_last_region
; // these bytes are cleared
123 unsigned num_small_objects
;
124 unsigned num_bytes_in_small_objects
;
126 /* large objects: vm_page_shift <= log2(size) < 2 *vm_page_shift */
127 unsigned num_large_objects_in_use
;
128 unsigned num_large_entries
;
129 unsigned num_bytes_in_large_objects
;
130 large_entry_t
*large_entries
;
131 // large_entries are hashed by location
132 // large_entries that are 0 should be discarded
134 /* huge objects: log2(size) >= 2 *vm_page_shift */
135 unsigned num_bytes_in_huge_objects
;
136 unsigned num_huge_entries
;
137 huge_entry_t
*huge_entries
;
140 static void *szone_malloc(szone_t
*szone
, size_t size
);
141 static void *szone_valloc(szone_t
*szone
, size_t size
);
142 static INLINE
void *szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
);
143 static void szone_free(szone_t
*szone
, void *ptr
);
144 static size_t szone_good_size(szone_t
*szone
, size_t size
);
145 static boolean_t
szone_check_all(szone_t
*szone
, const char *function
);
146 static void szone_print(szone_t
*szone
, boolean_t verbose
);
147 static INLINE region_t
*region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
);
148 static vm_range_t
large_free_no_lock(szone_t
*szone
, large_entry_t
*entry
);
150 #define LOG(szone,ptr) (szone->log_address && (szone->num_small_objects > 8) && (((unsigned)szone->log_address == -1) || (szone->log_address == (void *)(ptr))))
152 /********************* ACCESSOR MACROS ************************/
154 #define SZONE_LOCK(szone) LOCK(szone->lock)
155 #define SZONE_UNLOCK(szone) UNLOCK(szone->lock)
157 #define CHECK(szone,fun) if ((szone)->debug_flags & CHECK_REGIONS) szone_check_all(szone, fun)
159 #define REGION_ADDRESS(region) (region)
160 #define REGION_END(region) (region+REGION_SIZE)
162 #define LARGE_ENTRY_ADDRESS(entry) (((entry).address_and_num_pages >> vm_page_shift) << vm_page_shift)
163 #define LARGE_ENTRY_NUM_PAGES(entry) ((entry).address_and_num_pages & ((1 << vm_page_shift) - 1))
164 #define LARGE_ENTRY_SIZE(entry) (LARGE_ENTRY_NUM_PAGES(entry) << vm_page_shift)
165 #define LARGE_ENTRY_MATCHES(entry,ptr) (!(((entry).address_and_num_pages - (unsigned)(ptr)) >> vm_page_shift))
166 #define LARGE_ENTRY_IS_EMPTY(entry) (!((entry).address_and_num_pages))
168 /********************* VERY LOW LEVEL UTILITIES ************************/
170 static void szone_error(szone_t
*szone
, const char *msg
, const void *ptr
) {
171 if (szone
) SZONE_UNLOCK(szone
);
173 malloc_printf("*** malloc[%d]: error for object %p: %s\n", getpid(), ptr
, msg
);
175 szone_print(szone
, 1);
178 malloc_printf("*** malloc[%d]: error: %s\n", getpid(), msg
);
181 malloc_printf("*** Sleeping to help debug\n");
182 sleep(3600); // to help debug
186 static void protect(szone_t
*szone
, vm_address_t address
, vm_size_t size
, unsigned protection
, unsigned debug_flags
) {
188 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_PRELUDE
)) {
189 err
= vm_protect(mach_task_self(), address
- vm_page_size
, vm_page_size
, 0, protection
);
190 if (err
) malloc_printf("*** malloc[%d]: Can't protect(%x) region for prelude guard page at 0x%x\n", getpid(), protection
, address
- vm_page_size
);
192 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE
)) {
193 err
= vm_protect(mach_task_self(), (vm_address_t
)(address
+ size
), vm_page_size
, 0, protection
);
194 if (err
) malloc_printf("*** malloc[%d]: Can't protect(%x) region for postlude guard page at 0x%x\n", getpid(), protection
, address
+ size
);
198 static vm_address_t
allocate_pages(szone_t
*szone
, size_t size
, unsigned debug_flags
, int vm_page_label
) {
201 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
202 size_t allocation_size
= round_page(size
);
203 if (!allocation_size
) allocation_size
= vm_page_size
;
204 if (add_guard_pages
) allocation_size
+= 2 * vm_page_size
;
205 err
= vm_allocate(mach_task_self(), &addr
, allocation_size
, vm_page_label
| 1);
207 malloc_printf("*** malloc: vm_allocate(size=%d) failed with %d\n", size
, err
);
208 szone_error(szone
, "Can't allocate region", NULL
);
211 if (add_guard_pages
) {
212 addr
+= vm_page_size
;
213 protect(szone
, addr
, size
, 0, debug_flags
);
218 static void deallocate_pages(szone_t
*szone
, vm_address_t addr
, size_t size
, unsigned debug_flags
) {
220 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
221 if (add_guard_pages
) {
222 addr
-= vm_page_size
;
223 size
+= 2 * vm_page_size
;
225 err
= vm_deallocate(mach_task_self(), addr
, size
);
227 szone_error(szone
, "Can't deallocate_pages region", (void *)addr
);
231 static kern_return_t
_szone_default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
) {
232 *ptr
= (void *)address
;
236 /********************* FREE LIST UTILITIES ************************/
238 static INLINE grain_t
grain_for_msize(szone_t
*szone
, msize_t msize
) {
239 // assumes msize >= MIN_BLOCK
241 if (msize
< MIN_BLOCK
) {
242 szone_error(szone
, "grain_for_msize: msize too small", NULL
);
245 return (msize
< MAX_GRAIN
+ MIN_BLOCK
) ? msize
- MIN_BLOCK
: MAX_GRAIN
- 1;
248 static INLINE msize_t
msize_for_grain(szone_t
*szone
, grain_t grain
) {
249 // 0 if multiple sizes
250 return (grain
< MAX_GRAIN
- 1) ? grain
+ MIN_BLOCK
: 0;
253 static INLINE
void free_list_checksum(szone_t
*szone
, free_list_t
*ptr
) {
254 // We always checksum, as testing whether to do it (based on szone->debug_flags) is as fast as doing it
255 if (ptr
->checksum
!= (((unsigned)ptr
->previous
) ^ ((unsigned)ptr
->next
) ^ CHECKSUM_MAGIC
)) {
256 szone_error(szone
, "Incorrect checksum for freed object - object was probably modified after being freed; break at szone_error", ptr
);
260 static INLINE
void free_list_set_checksum(szone_t
*szone
, free_list_t
*ptr
) {
261 // We always set checksum, as testing whether to do it (based on szone->debug_flags) is slower than just doing it
262 ptr
->checksum
= ((unsigned)ptr
->previous
) ^ ((unsigned)ptr
->next
) ^ CHECKSUM_MAGIC
;
265 static void free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
266 // Adds an item to the proper free list
267 // Also marks the header of the block properly
268 grain_t grain
= grain_for_msize(szone
, msize
);
269 free_list_t
*free_ptr
= ptr
;
270 free_list_t
*free_head
= szone
->free_list
[grain
];
271 msize_t
*follower
= (msize_t
*)FOLLOWING_PTR(ptr
, msize
);
273 if (LOG(szone
,ptr
)) malloc_printf("In free_list_add_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
274 if (((unsigned)ptr
) & (QUANTUM
- 1)) {
275 szone_error(szone
, "free_list_add_ptr: Unaligned ptr", ptr
);
278 MSIZE_FLAGS_FOR_PTR(ptr
) = msize
| THIS_FREE
;
280 free_list_checksum(szone
, free_head
);
282 if (free_head
->previous
) {
283 malloc_printf("ptr=%p grain=%d free_head=%p previous=%p\n", ptr
, grain
, free_head
, free_head
->previous
);
284 szone_error(szone
, "free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr
);
286 if (!(MSIZE_FLAGS_FOR_PTR(free_head
) & THIS_FREE
)) {
287 malloc_printf("ptr=%p grain=%d free_head=%p\n", ptr
, grain
, free_head
);
288 szone_error(szone
, "free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr
);
290 if ((grain
!= MAX_GRAIN
-1) && (MSIZE_FLAGS_FOR_PTR(free_head
) != (THIS_FREE
| msize
))) {
291 malloc_printf("ptr=%p grain=%d free_head=%p previous_msize=%d\n", ptr
, grain
, free_head
, MSIZE_FLAGS_FOR_PTR(free_head
));
292 szone_error(szone
, "free_list_add_ptr: Internal invariant broken (incorrect msize)", ptr
);
295 free_head
->previous
= free_ptr
;
296 free_list_set_checksum(szone
, free_head
);
298 free_ptr
->previous
= NULL
;
299 free_ptr
->next
= free_head
;
300 free_list_set_checksum(szone
, free_ptr
);
301 szone
->free_list
[grain
] = free_ptr
;
302 // mark the end of this block
303 PREVIOUS_MSIZE(follower
) = msize
;
304 MSIZE_FLAGS_FOR_PTR(follower
) |= PREV_FREE
;
307 static void free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
308 // Removes item in the proper free list
309 // msize could be read, but all callers have it so we pass it in
310 grain_t grain
= grain_for_msize(szone
, msize
);
311 free_list_t
*free_ptr
= ptr
;
312 free_list_t
*next
= free_ptr
->next
;
313 free_list_t
*previous
= free_ptr
->previous
;
315 if (LOG(szone
,ptr
)) malloc_printf("In free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
317 free_list_checksum(szone
, free_ptr
);
320 if (szone
->free_list
[grain
] != ptr
) {
321 malloc_printf("ptr=%p grain=%d msize=%d szone->free_list[grain]=%p\n", ptr
, grain
, msize
, szone
->free_list
[grain
]);
322 szone_error(szone
, "free_list_remove_ptr: Internal invariant broken (szone->free_list[grain])", ptr
);
326 szone
->free_list
[grain
] = next
;
328 previous
->next
= next
;
329 free_list_set_checksum(szone
, previous
);
332 next
->previous
= previous
;
333 free_list_set_checksum(szone
, next
);
335 MSIZE_FLAGS_FOR_PTR(FOLLOWING_PTR(ptr
, msize
)) &= ~ PREV_FREE
;
338 static boolean_t
free_list_check(szone_t
*szone
, grain_t grain
) {
340 free_list_t
*ptr
= szone
->free_list
[grain
];
341 free_list_t
*previous
= NULL
;
343 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
345 if (!(msize_and_free
& THIS_FREE
)) {
346 malloc_printf("*** malloc[%d]: In-use ptr in free list grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
349 if (((unsigned)ptr
) & (QUANTUM
- 1)) {
350 malloc_printf("*** malloc[%d]: Unaligned ptr in free list grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
353 if (!region_for_ptr_no_lock(szone
, ptr
)) {
354 malloc_printf("*** malloc[%d]: Ptr not in szone grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
357 free_list_checksum(szone
, ptr
);
358 if (ptr
->previous
!= previous
) {
359 malloc_printf("*** malloc[%d]: Previous incorrectly set grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
362 if ((grain
!= MAX_GRAIN
-1) && (msize_and_free
!= (msize_for_grain(szone
, grain
) | THIS_FREE
))) {
363 malloc_printf("*** malloc[%d]: Incorrect msize for grain=%d count=%d ptr=%p msize=%d\n", getpid(), grain
, count
, ptr
, msize_and_free
);
372 /********************* SMALL BLOCKS MANAGEMENT ************************/
374 static INLINE region_t
*region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
) {
375 region_t
*first_region
= szone
->regions
;
376 region_t
*region
= first_region
+ szone
->last_region_hit
;
377 region_t
this = *region
;
378 if ((unsigned)ptr
- (unsigned)REGION_ADDRESS(this) < (unsigned)REGION_SIZE
) {
381 // We iterate in reverse order becase last regions are more likely
382 region
= first_region
+ szone
->num_regions
;
383 while (region
!= first_region
) {
385 if ((unsigned)ptr
- (unsigned)REGION_ADDRESS(this) < (unsigned)REGION_SIZE
) {
386 szone
->last_region_hit
= region
- first_region
;
394 static INLINE
void small_free_no_lock(szone_t
*szone
, region_t
*region
, void *ptr
, msize_t msize_and_free
) {
395 msize_t msize
= msize_and_free
& ~ PREV_FREE
;
396 size_t original_size
= msize
<< SHIFT_QUANTUM
;
397 void *next_block
= ((char *)ptr
+ original_size
);
398 msize_t next_msize_and_free
;
400 if (LOG(szone
,ptr
)) malloc_printf("In small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
401 if (msize
< MIN_BLOCK
) {
402 malloc_printf("In small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
403 szone_error(szone
, "Trying to free small block that is too small", ptr
);
406 if (((vm_address_t
)next_block
< REGION_END(*region
)) && ((next_msize_and_free
= MSIZE_FLAGS_FOR_PTR(next_block
)) & THIS_FREE
)) {
407 // If the next block is free, we coalesce
408 msize_t next_msize
= next_msize_and_free
& ~THIS_FREE
;
409 if (LOG(szone
,ptr
)) malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr
, msize
, next_block
, next_msize
);
410 free_list_remove_ptr(szone
, next_block
, next_msize
);
413 // Let's try to coalesce backwards now
414 if (msize_and_free
& PREV_FREE
) {
415 msize_t previous_msize
= PREVIOUS_MSIZE(ptr
);
416 void *previous
= ptr
- (previous_msize
<< SHIFT_QUANTUM
);
418 if (LOG(szone
,previous
)) malloc_printf("In small_free_no_lock(), coalesced backwards for %p previous=%p, msize=%d\n", ptr
, previous
, previous_msize
);
419 if (!previous_msize
|| (previous_msize
>= (((vm_address_t
)ptr
- REGION_ADDRESS(*region
)) >> SHIFT_QUANTUM
))) {
420 szone_error(szone
, "Invariant 1 broken when coalescing backwards", ptr
);
422 if (MSIZE_FLAGS_FOR_PTR(previous
) != (previous_msize
| THIS_FREE
)) {
423 malloc_printf("previous=%p its_msize_and_free=0x%x previous_msize=%d\n", previous
, MSIZE_FLAGS_FOR_PTR(previous
), previous_msize
);
424 szone_error(szone
, "Invariant 3 broken when coalescing backwards", ptr
);
427 free_list_remove_ptr(szone
, previous
, previous_msize
);
429 msize
+= previous_msize
;
431 if (msize
& PREV_FREE
) {
432 malloc_printf("In small_free_no_lock(), after coalescing with previous ptr=%p, msize=%d previous_msize=%d\n", ptr
, msize
, previous_msize
);
433 szone_error(szone
, "Incorrect coalescing", ptr
);
437 if (szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) {
439 szone_error(szone
, "Incorrect size information - block header was damaged", ptr
);
441 memset(ptr
, 0x55, (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
);
444 free_list_add_ptr(szone
, ptr
, msize
);
445 CHECK(szone
, "small_free_no_lock: added to free list");
446 szone
->num_small_objects
--;
447 szone
->num_bytes_in_small_objects
-= original_size
; // we use original_size and not msize to avoid double counting the coalesced blocks
450 static void *small_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
) {
451 // Allocates from the last region or a freshly allocated region
452 region_t
*last_region
= szone
->regions
+ szone
->num_regions
- 1;
453 vm_address_t new_address
;
455 msize_t msize_and_free
;
456 unsigned region_capacity
;
457 ptr
= (void *)(REGION_END(*last_region
) - szone
->num_bytes_free_in_last_region
+ PTR_HEADER_SIZE
);
459 if (((vm_address_t
)ptr
) & (QUANTUM
- 1)) {
460 szone_error(szone
, "Invariant broken while using end of region", ptr
);
463 msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
465 if (msize_and_free
!= PREV_FREE
&& msize_and_free
!= 0) {
466 malloc_printf("*** malloc[%d]: msize_and_free = %d\n", getpid(), msize_and_free
);
467 szone_error(szone
, "Invariant broken when allocating at end of zone", ptr
);
470 // In order to make sure we don't have 2 free pointers following themselves, if the last item is a free item, we combine it and clear it
471 if (msize_and_free
== PREV_FREE
) {
472 msize_t previous_msize
= PREVIOUS_MSIZE(ptr
);
473 void *previous
= ptr
- (previous_msize
<< SHIFT_QUANTUM
);
475 if (LOG(szone
, ptr
)) malloc_printf("Combining last with free space at %p\n", ptr
);
476 if (!previous_msize
|| (previous_msize
>= (((vm_address_t
)ptr
- REGION_ADDRESS(*last_region
)) >> SHIFT_QUANTUM
)) || (MSIZE_FLAGS_FOR_PTR(previous
) != (previous_msize
| THIS_FREE
))) {
477 szone_error(szone
, "Invariant broken when coalescing backwards at end of zone", ptr
);
480 free_list_remove_ptr(szone
, previous
, previous_msize
);
481 szone
->num_bytes_free_in_last_region
+= previous_msize
<< SHIFT_QUANTUM
;
482 memset(previous
, 0, previous_msize
<< SHIFT_QUANTUM
);
483 MSIZE_FLAGS_FOR_PTR(previous
) = 0;
486 // first try at the end of the last region
487 CHECK(szone
, __PRETTY_FUNCTION__
);
488 if (szone
->num_bytes_free_in_last_region
>= (msize
<< SHIFT_QUANTUM
)) {
489 szone
->num_bytes_free_in_last_region
-= (msize
<< SHIFT_QUANTUM
);
490 szone
->num_small_objects
++;
491 szone
->num_bytes_in_small_objects
+= msize
<< SHIFT_QUANTUM
;
492 MSIZE_FLAGS_FOR_PTR(ptr
) = msize
;
495 // time to create a new region
496 new_address
= allocate_pages(szone
, REGION_SIZE
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL
));
501 // let's prepare to free the remnants of last_region
502 if (szone
->num_bytes_free_in_last_region
>= QUANTUM
) {
503 msize_t this_msize
= szone
->num_bytes_free_in_last_region
>> SHIFT_QUANTUM
;
504 // malloc_printf("Entering last block %p size=%d\n", ptr, this_msize << SHIFT_QUANTUM);
505 if (this_msize
>= MIN_BLOCK
) {
506 free_list_add_ptr(szone
, ptr
, this_msize
);
508 // malloc_printf("Leaking last block at %p\n", ptr);
510 szone
->num_bytes_free_in_last_region
-= this_msize
<< SHIFT_QUANTUM
; // to avoid coming back here
512 last_region
[1] = new_address
;
513 szone
->num_regions
++;
514 szone
->num_bytes_free_in_last_region
= REGION_SIZE
- QUANTUM
+ PTR_HEADER_SIZE
- (msize
<< SHIFT_QUANTUM
);
515 ptr
= (void *)(new_address
+ QUANTUM
); // waste the first bytes
516 region_capacity
= (MSIZE_FLAGS_FOR_PTR(szone
->regions
) * QUANTUM
- PTR_HEADER_SIZE
) / sizeof(region_t
);
517 if (szone
->num_regions
>= region_capacity
) {
518 unsigned new_capacity
= region_capacity
* 2 + 1;
519 msize_t new_msize
= (new_capacity
* sizeof(region_t
) + PTR_HEADER_SIZE
+ QUANTUM
- 1) / QUANTUM
;
520 region_t
*new_regions
= ptr
;
521 // malloc_printf("Now %d regions growing regions %p to %d\n", szone->num_regions, szone->regions, new_capacity);
522 MSIZE_FLAGS_FOR_PTR(new_regions
) = new_msize
;
523 szone
->num_small_objects
++;
524 szone
->num_bytes_in_small_objects
+= new_msize
<< SHIFT_QUANTUM
;
525 memcpy(new_regions
, szone
->regions
, szone
->num_regions
* sizeof(region_t
));
526 // We intentionally leak the previous regions pointer to avoid multi-threading crashes if another thread was reading it (unlocked) while we are changing it
527 // Given that in practise the number of regions is typically a handful, this should not be a big deal
528 szone
->regions
= new_regions
;
529 ptr
+= (new_msize
<< SHIFT_QUANTUM
);
530 szone
->num_bytes_free_in_last_region
-= (new_msize
<< SHIFT_QUANTUM
);
531 // malloc_printf("Regions is now %p next ptr is %p\n", szone->regions, ptr);
533 szone
->num_small_objects
++;
534 szone
->num_bytes_in_small_objects
+= msize
<< SHIFT_QUANTUM
;
535 MSIZE_FLAGS_FOR_PTR(ptr
) = msize
;
539 static boolean_t
szone_check_region(szone_t
*szone
, region_t
*region
) {
540 void *ptr
= (void *)REGION_ADDRESS(*region
) + QUANTUM
;
541 vm_address_t region_end
= REGION_END(*region
);
542 int is_last_region
= region
== szone
->regions
+ szone
->num_regions
- 1;
543 msize_t prev_free
= 0;
544 while ((vm_address_t
)ptr
< region_end
) {
545 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
546 if (!(msize_and_free
& THIS_FREE
)) {
547 msize_t msize
= msize_and_free
& ~PREV_FREE
;
548 if ((msize_and_free
& PREV_FREE
) != prev_free
) {
549 malloc_printf("*** malloc[%d]: invariant broken for %p (prev_free=%d) this msize=%d\n", getpid(), ptr
, prev_free
, msize_and_free
);
553 int extra
= (is_last_region
) ? szone
->num_bytes_free_in_last_region
: QUANTUM
;
554 if (((unsigned)(ptr
+ extra
)) < region_end
) {
555 malloc_printf("*** malloc[%d]: invariant broken at region end: ptr=%p extra=%d index=%d num_regions=%d end=%p\n", getpid(), ptr
, extra
, region
- szone
->regions
, szone
->num_regions
, (void *)region_end
);
558 break; // last encountered
560 if (msize
> (LARGE_THRESHOLD
/ QUANTUM
)) {
561 malloc_printf("*** malloc[%d]: invariant broken for %p this msize=%d - size is too large\n", getpid(), ptr
, msize_and_free
);
564 if ((msize
< MIN_BLOCK
) && ((unsigned)ptr
!= region_end
- QUANTUM
)) {
565 malloc_printf("*** malloc[%d]: invariant broken for %p this msize=%d - size is too small\n", getpid(), ptr
, msize_and_free
);
568 ptr
+= msize
<< SHIFT_QUANTUM
;
570 if (is_last_region
&& ((vm_address_t
)ptr
- PTR_HEADER_SIZE
> region_end
- szone
->num_bytes_free_in_last_region
)) {
571 malloc_printf("*** malloc[%d]: invariant broken for %p this msize=%d - block extends beyond allocated region\n", getpid(), ptr
, msize_and_free
);
575 msize_t msize
= msize_and_free
& ~THIS_FREE
;
576 free_list_t
*free_head
= ptr
;
577 msize_t
*follower
= (void *)FOLLOWING_PTR(ptr
, msize
);
578 if ((msize_and_free
& PREV_FREE
) && !prev_free
) {
579 malloc_printf("*** malloc[%d]: invariant broken for free block %p this msize=%d: PREV_FREE set while previous block is in use\n", getpid(), ptr
, msize
);
582 if (msize
< MIN_BLOCK
) {
583 malloc_printf("*** malloc[%d]: invariant broken for free block %p this msize=%d\n", getpid(), ptr
, msize
);
587 malloc_printf("*** malloc[%d]: invariant broken for %p (2 free in a row)\n", getpid(), ptr
);
590 free_list_checksum(szone
, free_head
);
591 if (free_head
->previous
&& !(MSIZE_FLAGS_FOR_PTR(free_head
->previous
) & THIS_FREE
)) {
592 malloc_printf("*** malloc[%d]: invariant broken for %p (previous %p is not a free pointer)\n", getpid(), ptr
, free_head
->previous
);
595 if (free_head
->next
&& !(MSIZE_FLAGS_FOR_PTR(free_head
->next
) & THIS_FREE
)) {
596 malloc_printf("*** malloc[%d]: invariant broken for %p (next is not a free pointer)\n", getpid(), ptr
);
599 if (PREVIOUS_MSIZE(follower
) != msize
) {
600 malloc_printf("*** malloc[%d]: invariant broken for free %p followed by %p in region [%x-%x] (end marker incorrect) should be %d; in fact %d\n", getpid(), ptr
, follower
, REGION_ADDRESS(*region
), region_end
, msize
, PREVIOUS_MSIZE(follower
));
604 prev_free
= PREV_FREE
;
610 static kern_return_t
small_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned num_regions
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
613 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
616 err
= reader(task
, region_address
, sizeof(region_t
) * num_regions
, (void **)®ions
);
618 while (index
< num_regions
) {
619 region_t region
= regions
[index
++];
620 vm_range_t range
= {REGION_ADDRESS(region
), REGION_SIZE
};
621 vm_address_t start
= range
.address
+ QUANTUM
;
622 // malloc_printf("Enumerating small ptrs for Region starting at 0x%x\n", start);
623 if (type_mask
& MALLOC_PTR_REGION_RANGE_TYPE
) recorder(task
, context
, MALLOC_PTR_REGION_RANGE_TYPE
, &range
, 1);
624 if (type_mask
& MALLOC_PTR_IN_USE_RANGE_TYPE
) while (start
< range
.address
+ range
.size
) {
626 msize_t msize_and_free
;
627 err
= reader(task
, start
- PTR_HEADER_SIZE
, QUANTUM
, (void **)&previous
);
629 previous
+= PTR_HEADER_SIZE
;
630 msize_and_free
= MSIZE_FLAGS_FOR_PTR(previous
);
631 if (!(msize_and_free
& THIS_FREE
)) {
633 msize_t msize
= msize_and_free
& ~PREV_FREE
;
634 if (!msize
) break; // last encountered
635 buffer
[count
].address
= start
;
636 buffer
[count
].size
= (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
;
638 if (count
>= MAX_RECORDER_BUFFER
) {
639 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
642 start
+= msize
<< SHIFT_QUANTUM
;
645 msize_t msize
= msize_and_free
& ~THIS_FREE
;
646 start
+= msize
<< SHIFT_QUANTUM
;
649 // malloc_printf("End region - count=%d\n", count);
651 if (count
) recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
655 static INLINE
void *small_malloc_from_free_list(szone_t
*szone
, msize_t msize
, boolean_t
*locked
) {
658 free_list_t
**free_list
;
659 free_list_t
**limit
= szone
->free_list
+ MAX_GRAIN
- 1;
660 // first try the small grains
661 free_list
= szone
->free_list
+ grain_for_msize(szone
, msize
);
662 while (free_list
< limit
) {
666 if (!*locked
) { *locked
= 1; SZONE_LOCK(szone
); CHECK(szone
, __PRETTY_FUNCTION__
); }
669 // optimistic test worked
671 next
= ((free_list_t
*)ptr
)->next
;
673 next
->previous
= NULL
;
674 free_list_set_checksum(szone
, next
);
677 this_msize
= MSIZE_FLAGS_FOR_PTR(ptr
) & ~THIS_FREE
;
678 MSIZE_FLAGS_FOR_PTR(FOLLOWING_PTR(ptr
, this_msize
)) &= ~ PREV_FREE
;
679 goto add_leftover_and_proceed
;
684 // We now check the large grains for one that is big enough
685 if (!*locked
) { *locked
= 1; SZONE_LOCK(szone
); CHECK(szone
, __PRETTY_FUNCTION__
); }
688 this_msize
= MSIZE_FLAGS_FOR_PTR(ptr
) & ~THIS_FREE
;
689 if (this_msize
>= msize
) {
690 free_list_remove_ptr(szone
, ptr
, this_msize
);
691 goto add_leftover_and_proceed
;
693 ptr
= ((free_list_t
*)ptr
)->next
;
696 add_leftover_and_proceed
:
697 if (this_msize
>= msize
+ MIN_BLOCK
) {
698 if (LOG(szone
,ptr
)) malloc_printf("In small_malloc_should_clear(), adding leftover ptr=%p, this_msize=%d\n", ptr
, this_msize
);
699 free_list_add_ptr(szone
, ptr
+ (msize
<< SHIFT_QUANTUM
), this_msize
- msize
);
702 szone
->num_small_objects
++;
703 szone
->num_bytes_in_small_objects
+= this_msize
<< SHIFT_QUANTUM
;
705 if (LOG(szone
,ptr
)) malloc_printf("In small_malloc_should_clear(), ptr=%p, this_msize=%d, msize=%d\n", ptr
, this_msize
, msize
);
707 MSIZE_FLAGS_FOR_PTR(ptr
) = this_msize
;
711 static INLINE
void *small_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
) {
712 boolean_t locked
= 0;
715 if (! (msize
& 0xffff)) {
716 szone_error(szone
, "Invariant broken (!msize) in allocation (region)", NULL
);
718 if (msize
< MIN_BLOCK
) {
719 szone_error(szone
, "Invariant broken (msize too small) in allocation (region)", NULL
);
722 ptr
= small_malloc_from_free_list(szone
, msize
, &locked
);
724 CHECK(szone
, __PRETTY_FUNCTION__
);
726 if (cleared_requested
) memset(ptr
, 0, (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
);
729 if (!locked
) SZONE_LOCK(szone
);
730 CHECK(szone
, __PRETTY_FUNCTION__
);
731 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
732 // we don't clear because this freshly allocated space is pristine
733 CHECK(szone
, __PRETTY_FUNCTION__
);
739 static INLINE
void *small_malloc_cleared_no_lock(szone_t
*szone
, msize_t msize
) {
740 // tries to allocate a small, cleared block
741 boolean_t locked
= 1;
743 ptr
= small_malloc_from_free_list(szone
, msize
, &locked
);
745 memset(ptr
, 0, (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
);
748 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
749 // we don't clear because this freshly allocated space is pristine
754 /********************* LARGE ENTRY UTILITIES ************************/
758 static void large_debug_print(szone_t
*szone
) {
759 unsigned num_large_entries
= szone
->num_large_entries
;
760 unsigned index
= num_large_entries
;
762 large_entry_t
*range
= szone
->large_entries
+ index
;
763 large_entry_t entry
= *range
;
764 if (!LARGE_ENTRY_IS_EMPTY(entry
)) malloc_printf("%d: 0x%x(%dKB); ", index
, LARGE_ENTRY_ADDRESS(entry
), LARGE_ENTRY_SIZE(entry
)/1024);
770 static large_entry_t
*large_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
) {
771 // result only valid during a lock
772 unsigned num_large_entries
= szone
->num_large_entries
;
775 if (!num_large_entries
) return NULL
;
776 hash_index
= ((unsigned)ptr
>> vm_page_shift
) % num_large_entries
;
779 large_entry_t
*range
= szone
->large_entries
+ index
;
780 large_entry_t entry
= *range
;
781 if (LARGE_ENTRY_MATCHES(entry
, ptr
)) return range
;
782 if (LARGE_ENTRY_IS_EMPTY(entry
)) return NULL
; // end of chain
783 index
++; if (index
== num_large_entries
) index
= 0;
784 } while (index
!= hash_index
);
788 static void large_entry_insert_no_lock(szone_t
*szone
, large_entry_t range
) {
789 unsigned num_large_entries
= szone
->num_large_entries
;
790 unsigned hash_index
= (range
.address_and_num_pages
>> vm_page_shift
) % num_large_entries
;
791 unsigned index
= hash_index
;
792 // malloc_printf("Before insertion of 0x%x\n", LARGE_ENTRY_ADDRESS(range));
794 large_entry_t
*entry
= szone
->large_entries
+ index
;
795 if (LARGE_ENTRY_IS_EMPTY(*entry
)) {
797 return; // end of chain
799 index
++; if (index
== num_large_entries
) index
= 0;
800 } while (index
!= hash_index
);
803 static INLINE
void large_entries_rehash_after_entry_no_lock(szone_t
*szone
, large_entry_t
*entry
) {
804 unsigned num_large_entries
= szone
->num_large_entries
;
805 unsigned hash_index
= entry
- szone
->large_entries
;
806 unsigned index
= hash_index
;
809 index
++; if (index
== num_large_entries
) index
= 0;
810 range
= szone
->large_entries
[index
];
811 if (LARGE_ENTRY_IS_EMPTY(range
)) return;
812 szone
->large_entries
[index
].address_and_num_pages
= 0;
813 large_entry_insert_no_lock(szone
, range
); // this will reinsert in the proper place
814 } while (index
!= hash_index
);
817 static INLINE large_entry_t
*large_entries_alloc_no_lock(szone_t
*szone
, unsigned num
) {
818 size_t size
= num
* sizeof(large_entry_t
);
819 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
820 if (is_vm_allocation
) {
821 return (void *)allocate_pages(szone
, round_page(size
), 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
823 return small_malloc_cleared_no_lock(szone
, (size
+ PTR_HEADER_SIZE
+ QUANTUM
- 1) >> SHIFT_QUANTUM
);
827 static void large_entries_free_no_lock(szone_t
*szone
, large_entry_t
*entries
, unsigned num
) {
828 size_t size
= num
* sizeof(large_entry_t
);
829 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
830 if (is_vm_allocation
) {
831 deallocate_pages(szone
, (vm_address_t
)entries
, round_page(size
), 0);
833 region_t
*region
= region_for_ptr_no_lock(szone
, entries
);
834 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(entries
);
835 if (msize_and_free
& THIS_FREE
) {
836 szone_error(szone
, "Object already freed being freed", entries
);
839 small_free_no_lock(szone
, region
, entries
, msize_and_free
);
843 static void large_entries_grow_no_lock(szone_t
*szone
) {
844 unsigned old_num_entries
= szone
->num_large_entries
;
845 large_entry_t
*old_entries
= szone
->large_entries
;
846 unsigned new_num_entries
= (old_num_entries
) ? old_num_entries
* 2 + 1 : 15; // always an odd number for good hashing
847 large_entry_t
*new_entries
= large_entries_alloc_no_lock(szone
, new_num_entries
);
848 unsigned index
= old_num_entries
;
849 szone
->num_large_entries
= new_num_entries
;
850 szone
->large_entries
= new_entries
;
851 // malloc_printf("_grow_large_entries old_num_entries=%d new_num_entries=%d\n", old_num_entries, new_num_entries);
853 large_entry_t oldRange
= old_entries
[index
];
854 if (!LARGE_ENTRY_IS_EMPTY(oldRange
)) large_entry_insert_no_lock(szone
, oldRange
);
856 if (old_entries
) large_entries_free_no_lock(szone
, old_entries
, old_num_entries
);
859 static vm_range_t
large_free_no_lock(szone_t
*szone
, large_entry_t
*entry
) {
860 // frees the specific entry in the size table
861 // returns a range to truly deallocate
863 range
.address
= LARGE_ENTRY_ADDRESS(*entry
);
864 range
.size
= LARGE_ENTRY_SIZE(*entry
);
865 szone
->num_large_objects_in_use
--;
866 szone
->num_bytes_in_large_objects
-= range
.size
;
867 if (szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) {
868 protect(szone
, range
.address
, range
.size
, VM_PROT_READ
| VM_PROT_WRITE
, szone
->debug_flags
);
869 range
.address
-= vm_page_size
;
870 range
.size
+= 2 * vm_page_size
;
872 // printf("Entry is 0x%x=%d; cache is 0x%x ; found=0x%x\n", entry, entry-szone->large_entries, szone->large_entries, large_entry_for_pointer_no_lock(szone, (void *)range.address));
873 entry
->address_and_num_pages
= 0;
874 large_entries_rehash_after_entry_no_lock(szone
, entry
);
876 if (large_entry_for_pointer_no_lock(szone
, (void *)range
.address
)) {
877 malloc_printf("*** malloc[%d]: Freed entry 0x%x still in use; num_large_entries=%d\n", getpid(), range
.address
, szone
->num_large_entries
);
878 large_debug_print(szone
);
885 static kern_return_t
large_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t large_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
887 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
889 large_entry_t
*entries
;
891 err
= reader(task
, large_entries_address
, sizeof(large_entry_t
) * num_entries
, (void **)&entries
);
894 if ((type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) && (num_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
)) {
896 range
.address
= large_entries_address
;
897 range
.size
= round_page(num_entries
* sizeof(large_entry_t
));
898 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &range
, 1);
900 if (type_mask
& (MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
)) while (index
--) {
901 large_entry_t entry
= entries
[index
];
902 if (!LARGE_ENTRY_IS_EMPTY(entry
)) {
904 range
.address
= LARGE_ENTRY_ADDRESS(entry
);
905 range
.size
= LARGE_ENTRY_SIZE(entry
);
906 buffer
[count
++] = range
;
907 if (count
>= MAX_RECORDER_BUFFER
) {
908 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
913 if (count
) recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
917 /********************* HUGE ENTRY UTILITIES ************************/
919 static huge_entry_t
*huge_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
) {
920 unsigned index
= szone
->num_huge_entries
;
922 huge_entry_t
*huge
= szone
->huge_entries
+ index
;
923 if (huge
->address
== (vm_address_t
)ptr
) return huge
;
928 static boolean_t
huge_entry_append(szone_t
*szone
, huge_entry_t huge
) {
929 // We do a little dance with locking because doing allocation (even in the default szone) may cause something to get freed in this szone, with a deadlock
930 huge_entry_t
*new_huge_entries
= NULL
;
933 unsigned num_huge_entries
;
934 num_huge_entries
= szone
->num_huge_entries
;
936 // malloc_printf("In huge_entry_append currentEntries=%d\n", num_huge_entries);
937 if (new_huge_entries
) szone_free(szone
, new_huge_entries
);
938 new_huge_entries
= szone_malloc(szone
, (num_huge_entries
+ 1) * sizeof(huge_entry_t
));
939 if (new_huge_entries
== NULL
)
942 if (num_huge_entries
== szone
->num_huge_entries
) {
943 // No change - our malloc still applies
944 huge_entry_t
*old_huge_entries
= szone
->huge_entries
;
945 if (num_huge_entries
) memcpy(new_huge_entries
, old_huge_entries
, num_huge_entries
* sizeof(huge_entry_t
));
946 new_huge_entries
[szone
->num_huge_entries
++] = huge
;
947 szone
->huge_entries
= new_huge_entries
;
949 szone_free(szone
, old_huge_entries
);
950 // malloc_printf("Done huge_entry_append now=%d\n", szone->num_huge_entries);
957 static kern_return_t
huge_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t huge_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
958 huge_entry_t
*entries
;
960 err
= reader(task
, huge_entries_address
, sizeof(huge_entry_t
) * num_entries
, (void **)&entries
);
962 if (num_entries
) recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, entries
, num_entries
);
966 static void *large_and_huge_malloc(szone_t
*szone
, unsigned num_pages
) {
967 vm_address_t addr
= 0;
968 if (!num_pages
) num_pages
= 1; // minimal allocation size for this szone
969 // malloc_printf("In large_and_huge_malloc for %dKB\n", num_pages * vm_page_size / 1024);
970 if (num_pages
>= (1 << vm_page_shift
)) {
972 huge
.size
= num_pages
<< vm_page_shift
;
973 addr
= allocate_pages(szone
, huge
.size
, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_HUGE
));
974 if (!addr
) return NULL
;
976 if (huge_entry_append(szone
, huge
))
979 szone
->num_bytes_in_huge_objects
+= huge
.size
;
981 vm_size_t size
= num_pages
<< vm_page_shift
;
983 addr
= allocate_pages(szone
, size
, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
984 if (LOG(szone
, addr
)) malloc_printf("In szone_malloc true large allocation at %p for %dKB\n", (void *)addr
, size
/ 1024);
991 if (large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
992 malloc_printf("Freshly allocated is already in use: 0x%x\n", addr
);
993 large_debug_print(szone
);
997 if ((szone
->num_large_objects_in_use
+ 1) * 4 > szone
->num_large_entries
) {
998 // density of hash table too high; grow table
999 // we do that under lock to avoid a race
1000 // malloc_printf("In szone_malloc growing hash table current=%d\n", szone->num_large_entries);
1001 large_entries_grow_no_lock(szone
);
1003 // malloc_printf("Inserting large entry (0x%x, %dKB)\n", addr, num_pages * vm_page_size / 1024);
1004 entry
.address_and_num_pages
= addr
| num_pages
;
1006 if (large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
1007 malloc_printf("Entry about to be added already in use: 0x%x\n", addr
);
1008 large_debug_print(szone
);
1012 large_entry_insert_no_lock(szone
, entry
);
1014 if (!large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
1015 malloc_printf("Can't find entry just added\n");
1016 large_debug_print(szone
);
1020 // malloc_printf("Inserted large entry (0x%x, %d pages)\n", addr, num_pages);
1021 szone
->num_large_objects_in_use
++;
1022 szone
->num_bytes_in_large_objects
+= size
;
1024 SZONE_UNLOCK(szone
);
1025 return (void *)addr
;
1028 /********************* Zone call backs ************************/
1030 static void szone_free(szone_t
*szone
, void *ptr
) {
1032 large_entry_t
*entry
;
1033 vm_range_t vm_range_to_deallocate
;
1035 if (LOG(szone
, ptr
)) malloc_printf("In szone_free with %p\n", ptr
);
1037 if ((vm_address_t
)ptr
& (QUANTUM
- 1)) {
1038 szone_error(szone
, "Non-aligned pointer being freed", ptr
);
1041 // try a small pointer
1042 region
= region_for_ptr_no_lock(szone
, ptr
);
1044 // this is indeed a valid pointer
1045 msize_t msize_and_free
;
1047 msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
1048 if (msize_and_free
& THIS_FREE
) {
1049 szone_error(szone
, "Object already freed being freed", ptr
);
1052 CHECK(szone
, __PRETTY_FUNCTION__
);
1053 small_free_no_lock(szone
, region
, ptr
, msize_and_free
);
1054 CHECK(szone
, __PRETTY_FUNCTION__
);
1055 SZONE_UNLOCK(szone
);
1058 if (((unsigned)ptr
) & (vm_page_size
- 1)) {
1059 szone_error(szone
, "Non-page-aligned, non-allocated pointer being freed", ptr
);
1063 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
1065 // malloc_printf("Ready for deallocation [0x%x-%dKB]\n", LARGE_ENTRY_ADDRESS(*entry), LARGE_ENTRY_SIZE(*entry)/1024);
1066 if (KILL_THRESHOLD
&& (LARGE_ENTRY_SIZE(*entry
) > KILL_THRESHOLD
)) {
1067 // We indicate to the VM system that these pages contain garbage and therefore don't need to be swapped out
1068 vm_msync(mach_task_self(), LARGE_ENTRY_ADDRESS(*entry
), LARGE_ENTRY_SIZE(*entry
), VM_SYNC_KILLPAGES
);
1070 vm_range_to_deallocate
= large_free_no_lock(szone
, entry
);
1072 if (large_entry_for_pointer_no_lock(szone
, ptr
)) {
1073 malloc_printf("*** malloc[%d]: Just after freeing 0x%x still in use num_large_entries=%d\n", getpid(), ptr
, szone
->num_large_entries
);
1074 large_debug_print(szone
);
1078 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
1079 vm_range_to_deallocate
= *huge
;
1080 *huge
= szone
->huge_entries
[--szone
->num_huge_entries
]; // last entry fills that spot
1081 szone
->num_bytes_in_huge_objects
-= vm_range_to_deallocate
.size
;
1084 large_debug_print(szone
);
1086 szone_error(szone
, "Pointer being freed was not allocated", ptr
);
1089 CHECK(szone
, __PRETTY_FUNCTION__
);
1090 SZONE_UNLOCK(szone
); // we release the lock asap
1091 // we deallocate_pages, including guard pages
1092 if (vm_range_to_deallocate
.address
) {
1093 // malloc_printf("About to deallocate 0x%x size %dKB\n", vm_range_to_deallocate.address, vm_range_to_deallocate.size / 1024);
1095 if (large_entry_for_pointer_no_lock(szone
, (void *)vm_range_to_deallocate
.address
)) {
1096 malloc_printf("*** malloc[%d]: Invariant broken: 0x%x still in use num_large_entries=%d\n", getpid(), vm_range_to_deallocate
.address
, szone
->num_large_entries
);
1097 large_debug_print(szone
);
1101 deallocate_pages(szone
, vm_range_to_deallocate
.address
, vm_range_to_deallocate
.size
, 0);
1105 static INLINE
void *szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
) {
1107 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
1109 size_t msize
= (size
+ PTR_HEADER_SIZE
+ QUANTUM
- 1) >> SHIFT_QUANTUM
;
1110 if (msize
< MIN_BLOCK
) msize
= MIN_BLOCK
;
1111 ptr
= small_malloc_should_clear(szone
, msize
, cleared_requested
);
1113 if ((MSIZE_FLAGS_FOR_PTR(ptr
) & ~ PREV_FREE
) < msize
) {
1114 malloc_printf("ptr=%p this=%d msize=%d\n", ptr
, MSIZE_FLAGS_FOR_PTR(ptr
), (int)msize
);
1115 szone_error(szone
, "Pointer allocated has improper size (1)", ptr
);
1118 if ((MSIZE_FLAGS_FOR_PTR(ptr
) & ~ PREV_FREE
) < MIN_BLOCK
) {
1119 malloc_printf("ptr=%p this=%d msize=%d\n", ptr
, MSIZE_FLAGS_FOR_PTR(ptr
), (int)msize
);
1120 szone_error(szone
, "Pointer allocated has improper size (2)", ptr
);
1126 num_pages
= round_page(size
) >> vm_page_shift
;
1127 ptr
= large_and_huge_malloc(szone
, num_pages
);
1129 if (LOG(szone
, ptr
)) malloc_printf("szone_malloc returned %p\n", ptr
);
1133 static void *szone_malloc(szone_t
*szone
, size_t size
) {
1134 return szone_malloc_should_clear(szone
, size
, 0);
1137 static void *szone_calloc(szone_t
*szone
, size_t num_items
, size_t size
) {
1138 return szone_malloc_should_clear(szone
, num_items
* size
, 1);
1141 static void *szone_valloc(szone_t
*szone
, size_t size
) {
1144 num_pages
= round_page(size
) >> vm_page_shift
;
1145 ptr
= large_and_huge_malloc(szone
, num_pages
);
1146 if (LOG(szone
, ptr
)) malloc_printf("szone_valloc returned %p\n", ptr
);
1150 static size_t szone_size(szone_t
*szone
, const void *ptr
) {
1153 large_entry_t
*entry
;
1156 if (LOG(szone
, ptr
)) malloc_printf("In szone_size for %p (szone=%p)\n", ptr
, szone
);
1157 if ((vm_address_t
)ptr
& (QUANTUM
- 1)) return 0;
1158 if ((((unsigned)ptr
) & (vm_page_size
- 1)) && (MSIZE_FLAGS_FOR_PTR(ptr
) & THIS_FREE
)) {
1159 // not page aligned, but definitely not in use
1162 // Try a small pointer
1163 region
= region_for_ptr_no_lock(szone
, ptr
);
1164 // malloc_printf("FOUND REGION %p\n", region);
1166 // this is indeed a valid pointer
1167 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
1168 return (msize_and_free
& THIS_FREE
) ? 0 : ((msize_and_free
& ~PREV_FREE
) << SHIFT_QUANTUM
) - PTR_HEADER_SIZE
;
1170 if (((unsigned)ptr
) & (vm_page_size
- 1)) {
1174 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
1176 size
= LARGE_ENTRY_SIZE(*entry
);
1177 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
1180 SZONE_UNLOCK(szone
);
1181 // malloc_printf("szone_size for large/huge %p returned %d\n", ptr, (unsigned)size);
1182 if (LOG(szone
, ptr
)) malloc_printf("szone_size for %p returned %d\n", ptr
, (unsigned)size
);
1186 static INLINE
int try_realloc_small_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) {
1187 // returns 1 on success
1188 void *next_block
= (char *)ptr
+ old_size
+ PTR_HEADER_SIZE
;
1189 msize_t next_msize_and_free
;
1192 msize_t coalesced_msize
;
1193 msize_t leftover_msize
;
1194 msize_t new_msize_and_free
;
1195 void *following_ptr
;
1197 region
= szone
->regions
[szone
->num_regions
- 1];
1198 if (((vm_address_t
)ptr
>= region
) && ((vm_address_t
)ptr
< region
+ REGION_SIZE
) && ((vm_address_t
)next_block
== REGION_END(region
) - szone
->num_bytes_free_in_last_region
+ PTR_HEADER_SIZE
)) {
1199 // This could be optimized but it is so rare it's not worth it
1200 SZONE_UNLOCK(szone
);
1203 // If the next block is free, we coalesce
1204 next_msize_and_free
= MSIZE_FLAGS_FOR_PTR(next_block
);
1206 if ((vm_address_t
)next_block
& (QUANTUM
- 1)) {
1207 szone_error(szone
, "Internal invariant broken in realloc(next_block)", next_block
);
1209 if (next_msize_and_free
& PREV_FREE
) {
1210 malloc_printf("try_realloc_small_in_place: 0x%x=PREV_FREE|%d\n", next_msize_and_free
, next_msize_and_free
& ~PREV_FREE
);
1211 SZONE_UNLOCK(szone
);
1215 next_msize
= next_msize_and_free
& ~THIS_FREE
;
1216 if (!(next_msize_and_free
& THIS_FREE
) || !next_msize
|| (old_size
+ (next_msize
<< SHIFT_QUANTUM
) < new_size
)) {
1217 SZONE_UNLOCK(szone
);
1220 coalesced_msize
= (new_size
- old_size
+ QUANTUM
- 1) >> SHIFT_QUANTUM
;
1221 leftover_msize
= next_msize
- coalesced_msize
;
1222 new_msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
1223 // malloc_printf("Realloc in place for %p; current msize=%d next_msize=%d wanted=%d\n", ptr, MSIZE_FLAGS_FOR_PTR(ptr), next_msize, new_size);
1224 free_list_remove_ptr(szone
, next_block
, next_msize
);
1225 if ((leftover_msize
< MIN_BLOCK
) || (leftover_msize
< coalesced_msize
/ 4)) {
1226 // don't bother splitting it off
1227 // malloc_printf("No leftover ");
1228 coalesced_msize
= next_msize
;
1231 void *leftover
= next_block
+ (coalesced_msize
<< SHIFT_QUANTUM
);
1232 // malloc_printf("Leftover ");
1233 free_list_add_ptr(szone
, leftover
, leftover_msize
);
1235 new_msize_and_free
+= coalesced_msize
;
1236 MSIZE_FLAGS_FOR_PTR(ptr
) = new_msize_and_free
;
1237 following_ptr
= FOLLOWING_PTR(ptr
, new_msize_and_free
& ~PREV_FREE
);
1238 MSIZE_FLAGS_FOR_PTR(following_ptr
) &= ~ PREV_FREE
;
1241 msize_t ms
= MSIZE_FLAGS_FOR_PTR(following_ptr
);
1242 msize_t pms
= PREVIOUS_MSIZE(FOLLOWING_PTR(following_ptr
, ms
& ~THIS_FREE
));
1243 malloc_printf("Following ptr of coalesced (%p) has msize_and_free=0x%x=%s%d end_of_block_marker=%d\n", following_ptr
, ms
, (ms
& THIS_FREE
) ? "THIS_FREE|" : "", ms
& ~THIS_FREE
, pms
);
1245 if (LOG(szone
,ptr
)) malloc_printf("In szone_realloc(), ptr=%p, msize=%d\n", ptr
, MSIZE_FLAGS_FOR_PTR(ptr
));
1247 CHECK(szone
, __PRETTY_FUNCTION__
);
1248 szone
->num_bytes_in_small_objects
+= coalesced_msize
<< SHIFT_QUANTUM
;
1249 SZONE_UNLOCK(szone
);
1250 // malloc_printf("Extended ptr %p for realloc old=%d desired=%d new=%d leftover=%d\n", ptr, (unsigned)old_size, (unsigned)new_size, (unsigned)szone_size(szone, ptr), leftover_msize << SHIFT_QUANTUM);
1254 static INLINE
int try_realloc_large_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) {
1255 vm_address_t addr
= (vm_address_t
)ptr
+ old_size
;
1256 large_entry_t
*entry
;
1258 if (((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
)) return 0; // don't want to bother with the protected case
1260 if (old_size
!= ((old_size
>> vm_page_shift
) << vm_page_shift
)) malloc_printf("*** old_size is %d\n", old_size
);
1262 // malloc_printf("=== Trying (1) to extend %p from %d to %d\n", ptr, old_size, new_size);
1264 entry
= large_entry_for_pointer_no_lock(szone
, (void *)addr
);
1265 SZONE_UNLOCK(szone
);
1266 if (entry
) return 0; // large pointer already exist in table - extension is not going to work
1267 new_size
= round_page(new_size
);
1268 // malloc_printf("=== Trying (2) to extend %p from %d to %d\n", ptr, old_size, new_size);
1269 err
= vm_allocate(mach_task_self(), &addr
, new_size
- old_size
, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
)); // we ask for allocation specifically at addr
1271 // we can just extend the block
1273 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
1274 if (!entry
) szone_error(szone
, "large entry reallocated is not properly in table", ptr
);
1275 // malloc_printf("=== Successfully reallocated at end of %p from %d to %d\n", ptr, old_size, new_size);
1276 entry
->address_and_num_pages
= (vm_address_t
)ptr
| (new_size
>> vm_page_shift
);
1277 szone
->num_bytes_in_large_objects
+= new_size
- old_size
;
1278 SZONE_UNLOCK(szone
); // we release the lock asap
1282 static void *szone_realloc(szone_t
*szone
, void *ptr
, size_t new_size
) {
1283 size_t old_size
= 0;
1285 if (LOG(szone
, ptr
)) malloc_printf("In szone_realloc for %p, %d\n", ptr
, (unsigned)new_size
);
1286 if (!ptr
) return szone_malloc(szone
, new_size
);
1287 old_size
= szone_size(szone
, ptr
);
1289 szone_error(szone
, "Pointer being reallocated was not allocated", ptr
);
1292 if (old_size
>= new_size
) return ptr
;
1293 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && ((new_size
+ PTR_HEADER_SIZE
+ QUANTUM
- 1) < LARGE_THRESHOLD
)) {
1294 // We now try to realloc in place
1295 if (try_realloc_small_in_place(szone
, ptr
, old_size
, new_size
)) return ptr
;
1297 if ((old_size
> VM_COPY_THRESHOLD
) && ((new_size
+ vm_page_size
- 1) < (1 << (vm_page_shift
+ vm_page_shift
)))) {
1298 // we know it's a large block, and not a huge block (both for old and new)
1299 kern_return_t err
= 0;
1301 large_entry_t
*entry
;
1303 num_pages
= round_page(new_size
) >> vm_page_shift
;
1304 if (try_realloc_large_in_place(szone
, ptr
, old_size
, new_size
)) return ptr
;
1305 new_ptr
= large_and_huge_malloc(szone
, num_pages
);
1306 err
= vm_copy(mach_task_self(), (vm_address_t
)ptr
, old_size
, (vm_address_t
)new_ptr
);
1308 szone_error(szone
, "Can't vm_copy region", ptr
);
1310 // We do not want the kernel to alias the old and the new, so we deallocate the old pointer right away and tear down the ptr-to-size data structure
1312 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
1314 szone_error(szone
, "Can't find entry for large copied block", ptr
);
1316 range
= large_free_no_lock(szone
, entry
);
1317 SZONE_UNLOCK(szone
); // we release the lock asap
1318 // we truly deallocate_pages, including guard pages
1319 deallocate_pages(szone
, range
.address
, range
.size
, 0);
1320 if (LOG(szone
, ptr
)) malloc_printf("szone_realloc returned %p for %d\n", new_ptr
, (unsigned)new_size
);
1323 new_ptr
= szone_malloc(szone
, new_size
);
1324 if (new_ptr
== NULL
)
1326 memcpy(new_ptr
, ptr
, old_size
);
1328 szone_free(szone
, ptr
);
1329 if (LOG(szone
, ptr
)) malloc_printf("szone_realloc returned %p for %d\n", new_ptr
, (unsigned)new_size
);
1333 static void szone_destroy(szone_t
*szone
) {
1335 index
= szone
->num_large_entries
;
1337 large_entry_t
*entry
= szone
->large_entries
+ index
;
1338 if (!LARGE_ENTRY_IS_EMPTY(*entry
)) {
1339 large_entry_t range
;
1341 // we deallocate_pages, including guard pages
1342 deallocate_pages(szone
, LARGE_ENTRY_ADDRESS(range
), LARGE_ENTRY_SIZE(range
), szone
->debug_flags
);
1345 if (szone
->num_large_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
) large_entries_free_no_lock(szone
, szone
->large_entries
, szone
->num_large_entries
); // we do not free in the small chunk case
1346 index
= szone
->num_huge_entries
;
1348 huge_entry_t
*huge
= szone
->huge_entries
+ index
;
1349 deallocate_pages(szone
, huge
->address
, huge
->size
, szone
->debug_flags
);
1351 // and now we free regions, with regions[0] as the last one (the final harakiri)
1352 index
= szone
->num_regions
;
1353 while (index
--) { // we skip the first region, that is the zone itself
1354 region_t region
= szone
->regions
[index
];
1355 deallocate_pages(szone
, REGION_ADDRESS(region
), REGION_SIZE
, 0);
1359 static size_t szone_good_size(szone_t
*szone
, size_t size
) {
1360 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
1362 msize_t msize
= (size
+ PTR_HEADER_SIZE
+ QUANTUM
- 1) >> SHIFT_QUANTUM
;
1363 if (msize
< MIN_BLOCK
) msize
= MIN_BLOCK
;
1364 return (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
;
1367 num_pages
= round_page(size
) >> vm_page_shift
;
1368 if (!num_pages
) num_pages
= 1; // minimal allocation size for this
1369 return num_pages
<< vm_page_shift
;
1373 unsigned szone_check_counter
= 0;
1374 unsigned szone_check_start
= 0;
1375 unsigned szone_check_modulo
= 1;
1377 static boolean_t
szone_check_all(szone_t
*szone
, const char *function
) {
1380 while (index
< szone
->num_regions
) {
1381 region_t
*region
= szone
->regions
+ index
++;
1382 if (!szone_check_region(szone
, region
)) {
1383 SZONE_UNLOCK(szone
);
1384 szone
->debug_flags
&= ~ CHECK_REGIONS
;
1385 malloc_printf("*** malloc[%d]: Region %d incorrect szone_check_all(%s) counter=%d\n", getpid(), index
-1, function
, szone_check_counter
);
1386 szone_error(szone
, "Check: region incorrect", NULL
);
1391 while (index
< MAX_GRAIN
) {
1392 if (! free_list_check(szone
, index
)) {
1393 SZONE_UNLOCK(szone
);
1394 szone
->debug_flags
&= ~ CHECK_REGIONS
;
1395 malloc_printf("*** malloc[%d]: Free list incorrect (grain=%d) szone_check_all(%s) counter=%d\n", getpid(), index
, function
, szone_check_counter
);
1396 szone_error(szone
, "Check: free list incorrect", NULL
);
1401 SZONE_UNLOCK(szone
);
1405 static boolean_t
szone_check(szone_t
*szone
) {
1406 if (! (++szone_check_counter
% 10000)) {
1407 malloc_printf("At szone_check counter=%d\n", szone_check_counter
);
1409 if (szone_check_counter
< szone_check_start
) return 1;
1410 if (szone_check_counter
% szone_check_modulo
) return 1;
1411 return szone_check_all(szone
, "");
1414 static kern_return_t
szone_ptr_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t zone_address
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
1417 if (!reader
) reader
= _szone_default_reader
;
1418 // malloc_printf("Enumerator for zone 0x%x\n", zone_address);
1419 err
= reader(task
, zone_address
, sizeof(szone_t
), (void **)&szone
);
1420 if (err
) return err
;
1421 // malloc_printf("Small ptrs enumeration for zone 0x%x\n", zone_address);
1422 err
= small_in_use_enumerator(task
, context
, type_mask
, (vm_address_t
)szone
->regions
, szone
->num_regions
, reader
, recorder
);
1423 if (err
) return err
;
1424 // malloc_printf("Large ptrs enumeration for zone 0x%x\n", zone_address);
1425 err
= large_in_use_enumerator(task
, context
, type_mask
, (vm_address_t
)szone
->large_entries
, szone
->num_large_entries
, reader
, recorder
);
1426 if (err
) return err
;
1427 // malloc_printf("Huge ptrs enumeration for zone 0x%x\n", zone_address);
1428 err
= huge_in_use_enumerator(task
, context
, type_mask
, (vm_address_t
)szone
->huge_entries
, szone
->num_huge_entries
, reader
, recorder
);
1432 static void szone_print_free_list(szone_t
*szone
) {
1433 grain_t grain
= MAX_GRAIN
;
1434 malloc_printf("Free Sizes: ");
1436 free_list_t
*ptr
= szone
->free_list
[grain
];
1441 // malloc_printf("%p ", ptr);
1444 malloc_printf("%s%d[%d] ", (grain
== MAX_GRAIN
-1) ? ">=" : "", (grain
+1)*QUANTUM
, count
);
1447 malloc_printf("\n");
1450 static void szone_print(szone_t
*szone
, boolean_t verbose
) {
1451 unsigned info
[scalable_zone_info_count
];
1453 scalable_zone_info((void *)szone
, info
, scalable_zone_info_count
);
1454 malloc_printf("Scalable zone %p: inUse=%d(%dKB) small=%d(%dKB) large=%d(%dKB) huge=%d(%dKB) guard_page=%d\n", szone
, info
[0], info
[1] / 1024, info
[2], info
[3] / 1024, info
[4], info
[5] / 1024, info
[6], info
[7] / 1024, info
[8]);
1455 malloc_printf("%d regions: \n", szone
->num_regions
);
1456 while (index
< szone
->num_regions
) {
1457 region_t
*region
= szone
->regions
+ index
;
1458 unsigned counts
[512];
1460 unsigned in_use
= 0;
1461 vm_address_t start
= REGION_ADDRESS(*region
) + QUANTUM
;
1462 memset(counts
, 0, 512 * sizeof(unsigned));
1463 while (start
< REGION_END(*region
)) {
1464 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(start
);
1465 if (!(msize_and_free
& THIS_FREE
)) {
1466 msize_t msize
= msize_and_free
& ~PREV_FREE
;
1467 if (!msize
) break; // last encountered
1469 if (msize
< 512) counts
[msize
]++;
1470 start
+= msize
<< SHIFT_QUANTUM
;
1473 msize_t msize
= msize_and_free
& ~THIS_FREE
;
1475 start
+= msize
<< SHIFT_QUANTUM
;
1478 malloc_printf("Region [0x%x-0x%x, %dKB] \tIn_use=%d ", REGION_ADDRESS(*region
), REGION_END(*region
), (int)REGION_SIZE
/ 1024, in_use
);
1480 malloc_printf("\n\tSizes in use: ");
1482 if (counts
[ci
]) malloc_printf("%d[%d] ", ci
<< SHIFT_QUANTUM
, counts
[ci
]);
1486 malloc_printf("\n");
1489 if (verbose
) szone_print_free_list(szone
);
1490 malloc_printf("Free in last zone %d\n", szone
->num_bytes_free_in_last_region
);
1493 static void szone_log(malloc_zone_t
*zone
, void *log_address
) {
1494 szone_t
*szone
= (void *)zone
;
1495 szone
->log_address
= log_address
;
1498 static void szone_force_lock(szone_t
*szone
) {
1499 // malloc_printf("szone_force_lock\n");
1503 static void szone_force_unlock(szone_t
*szone
) {
1504 // malloc_printf("szone_force_unlock\n");
1505 SZONE_UNLOCK(szone
);
1508 static struct malloc_introspection_t szone_introspect
= {(void *)szone_ptr_in_use_enumerator
, (void *)szone_good_size
, (void *)szone_check
, (void *)szone_print
, szone_log
, (void *)szone_force_lock
, (void *)szone_force_unlock
};
1510 malloc_zone_t
*create_scalable_zone(size_t initial_size
, unsigned debug_flags
) {
1514 size_t msize_used
= 0;
1515 // malloc_printf("=== create_scalable_zone(%d,%d);\n", initial_size, debug_flags);
1516 if (!vm_page_shift
) {
1518 vm_page_shift
= 12; // the minimal for page sizes
1519 page
= 1 << vm_page_shift
;
1520 while (page
!= vm_page_size
) { page
+= page
; vm_page_shift
++;};
1521 if (MIN_BLOCK
* QUANTUM
< sizeof(free_list_t
) + PTR_HEADER_SIZE
) {
1522 malloc_printf("*** malloc[%d]: inconsistant parameters\n", getpid());
1525 addr
= allocate_pages(NULL
, REGION_SIZE
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC
));
1526 if (!addr
) return NULL
;
1527 szone
= (void *)(addr
+ QUANTUM
);
1528 msize
= (sizeof(szone_t
) + PTR_HEADER_SIZE
+ QUANTUM
-1) >> SHIFT_QUANTUM
;
1529 MSIZE_FLAGS_FOR_PTR(szone
) = msize
;
1530 msize_used
+= msize
; szone
->num_small_objects
++;
1531 szone
->basic_zone
.size
= (void *)szone_size
;
1532 szone
->basic_zone
.malloc
= (void *)szone_malloc
;
1533 szone
->basic_zone
.calloc
= (void *)szone_calloc
;
1534 szone
->basic_zone
.valloc
= (void *)szone_valloc
;
1535 szone
->basic_zone
.free
= (void *)szone_free
;
1536 szone
->basic_zone
.realloc
= (void *)szone_realloc
;
1537 szone
->basic_zone
.destroy
= (void *)szone_destroy
;
1538 szone
->basic_zone
.introspect
= &szone_introspect
;
1539 LOCK_INIT(szone
->lock
);
1540 szone
->debug_flags
= debug_flags
;
1541 szone
->regions
= (void *)((char *)szone
+ (msize
<< SHIFT_QUANTUM
));
1542 // we always reserve room for a few regions
1543 msize
= (sizeof(region_t
) * INITIAL_NUM_REGIONS
+ PTR_HEADER_SIZE
+ QUANTUM
-1) >> SHIFT_QUANTUM
;
1544 if (msize
< MIN_BLOCK
) msize
= MIN_BLOCK
;
1545 MSIZE_FLAGS_FOR_PTR(szone
->regions
) = msize
;
1546 msize_used
+= msize
; szone
->num_small_objects
++;
1547 szone
->regions
[0] = addr
;
1548 szone
->num_regions
= 1;
1549 szone
->num_bytes_free_in_last_region
= REGION_SIZE
- ((msize_used
+1) << SHIFT_QUANTUM
) + PTR_HEADER_SIZE
;
1550 CHECK(szone
, __PRETTY_FUNCTION__
);
1551 return (malloc_zone_t
*)szone
;
1554 /********* The following is private API for debug and perf tools ************/
1556 void scalable_zone_info(malloc_zone_t
*zone
, unsigned *info_to_fill
, unsigned count
) {
1557 szone_t
*szone
= (void *)zone
;
1558 unsigned info
[scalable_zone_info_count
];
1559 // We do not lock to facilitate debug
1560 info
[2] = szone
->num_small_objects
;
1561 info
[3] = szone
->num_bytes_in_small_objects
;
1562 info
[4] = szone
->num_large_objects_in_use
;
1563 info
[5] = szone
->num_bytes_in_large_objects
;
1564 info
[6] = szone
->num_huge_entries
;
1565 info
[7] = szone
->num_bytes_in_huge_objects
;
1566 info
[8] = szone
->debug_flags
;
1567 info
[0] = info
[2] + info
[4] + info
[6];
1568 info
[1] = info
[3] + info
[5] + info
[7];
1569 memcpy(info_to_fill
, info
, sizeof(unsigned)*count
);
1572 /********* Support code for emacs unexec ************/
1574 /* History of freezedry version numbers:
1576 * 1) Old malloc (before the scalable malloc implementation in this file
1578 * 2) Original freezedrying code for scalable malloc. This code was apparently
1579 * based on the old freezedrying code and was fundamentally flawed in its
1580 * assumption that tracking allocated memory regions was adequate to fake
1581 * operations on freezedried memory. This doesn't work, since scalable
1582 * malloc does not store flags in front of large page-aligned allocations.
1583 * 3) Original szone-based freezedrying code.
1585 * No version backward compatibility is provided, but the version number does
1586 * make it possible for malloc_jumpstart() to return an error if the application
1587 * was freezedried with an older version of malloc.
1589 #define MALLOC_FREEZEDRY_VERSION 3
1597 static void *frozen_malloc(szone_t
*zone
, size_t new_size
) {
1598 return malloc(new_size
);
1601 static void *frozen_calloc(szone_t
*zone
, size_t num_items
, size_t size
) {
1602 return calloc(num_items
, size
);
1605 static void *frozen_valloc(szone_t
*zone
, size_t new_size
) {
1606 return valloc(new_size
);
1609 static void *frozen_realloc(szone_t
*zone
, void *ptr
, size_t new_size
) {
1610 size_t old_size
= szone_size(zone
, ptr
);
1613 if (new_size
<= old_size
) {
1617 new_ptr
= malloc(new_size
);
1620 memcpy(new_ptr
, ptr
, old_size
);
1626 static void frozen_free(szone_t
*zone
, void *ptr
) {
1629 static void frozen_destroy(szone_t
*zone
) {
1632 /********* Pseudo-private API for emacs unexec ************/
1635 * malloc_freezedry() records all of the szones in use, so that they can be
1636 * partially reconstituted by malloc_jumpstart(). Due to the differences
1637 * between reconstituted memory regions and those created by the szone code,
1638 * care is taken not to reallocate from the freezedried memory, except in the
1639 * case of a non-growing realloc().
1641 * Due to the flexibility provided by the zone registration mechanism, it is
1642 * impossible to implement generic freezedrying for any zone type. This code
1643 * only handles applications that use the szone allocator, so malloc_freezedry()
1644 * returns 0 (error) if any non-szone zones are encountered.
1647 int malloc_freezedry(void) {
1648 extern unsigned malloc_num_zones
;
1649 extern malloc_zone_t
**malloc_zones
;
1650 malloc_frozen
*data
;
1653 /* Allocate space in which to store the freezedry state. */
1654 data
= (malloc_frozen
*) malloc(sizeof(malloc_frozen
));
1656 /* Set freezedry version number so that malloc_jumpstart() can check for
1658 data
->version
= MALLOC_FREEZEDRY_VERSION
;
1660 /* Allocate the array of szone pointers. */
1661 data
->nszones
= malloc_num_zones
;
1662 data
->szones
= (szone_t
*) calloc(malloc_num_zones
, sizeof(szone_t
));
1664 /* Fill in the array of szone structures. They are copied rather than
1665 * referenced, since the originals are likely to be clobbered during malloc
1666 * initialization. */
1667 for (i
= 0; i
< malloc_num_zones
; i
++) {
1668 if (strcmp(malloc_zones
[i
]->zone_name
, "DefaultMallocZone")) {
1669 /* Unknown zone type. */
1674 memcpy(&data
->szones
[i
], malloc_zones
[i
], sizeof(szone_t
));
1680 int malloc_jumpstart(int cookie
) {
1681 malloc_frozen
*data
= (malloc_frozen
*) cookie
;
1684 if (data
->version
!= MALLOC_FREEZEDRY_VERSION
) {
1685 /* Unsupported freezedry version. */
1689 for (i
= 0; i
< data
->nszones
; i
++) {
1690 /* Set function pointers. Even the functions that stay the same must be
1691 * set, since there are no guarantees that they will be mapped to the
1692 * same addresses. */
1693 data
->szones
[i
].basic_zone
.size
= (void *) szone_size
;
1694 data
->szones
[i
].basic_zone
.malloc
= (void *) frozen_malloc
;
1695 data
->szones
[i
].basic_zone
.calloc
= (void *) frozen_calloc
;
1696 data
->szones
[i
].basic_zone
.valloc
= (void *) frozen_valloc
;
1697 data
->szones
[i
].basic_zone
.free
= (void *) frozen_free
;
1698 data
->szones
[i
].basic_zone
.realloc
= (void *) frozen_realloc
;
1699 data
->szones
[i
].basic_zone
.destroy
= (void *) frozen_destroy
;
1700 data
->szones
[i
].basic_zone
.introspect
= &szone_introspect
;
1702 /* Register the freezedried zone. */
1703 malloc_zone_register(&data
->szones
[i
].basic_zone
);