2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 /* Author: Bertrand Serlet, August 1999 */
28 #import "scalable_malloc.h"
32 #import <pthread_internals.h> // for spin lock
34 #include <mach/vm_statistics.h>
36 /********************* DEFINITIONS ************************/
38 static unsigned vm_page_shift
= 0; // guaranteed to be intialized by zone creation
40 #define DEBUG_MALLOC 0 // set to one to debug malloc itself
41 #define DEBUG_CLIENT 0 // set to one to help debug a nasty memory smasher
44 #warning DEBUG ENABLED
50 #define CHECK_REGIONS (1 << 31)
52 #define VM_COPY_THRESHOLD (40 * 1024)
53 // When all memory is touched after a copy, vm_copy() is always a lose
54 // But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages (on a G3/300MHz)
55 // This must be larger than LARGE_THRESHOLD
57 #define KILL_THRESHOLD (32 * 1024)
59 #define LARGE_THRESHOLD (3 * vm_page_size) // at or above this use "large"
61 #define SHIFT_QUANTUM 4 // Required for AltiVec
62 #define QUANTUM (1 << SHIFT_QUANTUM) // allocation quantum
63 #define MIN_BLOCK 1 // minimum size, in QUANTUM multiples
65 /* The header of a small block in use contains its size (expressed as multiples of QUANTUM, and header included), or 0;
66 If 0 then the block is either free (in which case the size is directly at the block itself), or the last block (indicated by either being beyond range, or having 0 in the block itself) */
68 #define PTR_HEADER_SIZE (sizeof(msize_t))
69 #define FOLLOWING_PTR(ptr,msize) (((char *)(ptr)) + ((msize) << SHIFT_QUANTUM))
70 #define PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-2]
72 #define THIS_FREE 0x8000 // indicates this block is free
73 #define PREV_FREE 0x4000 // indicates previous block is free
74 #define MSIZE_FLAGS_FOR_PTR(ptr) (((msize_t *)(ptr))[-1])
76 #define REGION_SIZE (1 << (16 - 2 + SHIFT_QUANTUM)) // since we only have 16 bits for msize_t, and 1 bit is taken by THIS_FREE, and 1 by PREV_FREE
78 #define INITIAL_NUM_REGIONS 8 // must always be at least 2 to always have 1 slot empty
80 #define CHECKSUM_MAGIC 0x357B
82 #define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use
84 #define MAX_RECORDER_BUFFER 256
88 typedef unsigned short msize_t
; // a size in multiples of SHIFT_QUANTUM
97 unsigned address_and_num_pages
;
98 // this type represents both an address and a number of pages
99 // the low bits are the number of pages
100 // the high bits are the address
101 // note that the exact number of bits used for depends on the page size
102 // also, this cannot represent pointers larger than 1 << (vm_page_shift * 2)
105 typedef vm_address_t region_t
;
107 typedef compact_range_t large_entry_t
;
109 typedef vm_range_t huge_entry_t
;
111 typedef unsigned short grain_t
;
114 malloc_zone_t basic_zone
;
116 unsigned debug_flags
;
119 /* Regions for small objects */
120 unsigned num_regions
;
122 // this array is always created with 1 extra slot to be able to add a region without taking memory right away
123 unsigned last_region_hit
;
124 free_list_t
*free_list
[MAX_GRAIN
];
125 unsigned num_bytes_free_in_last_region
; // these bytes are cleared
126 unsigned num_small_objects
;
127 unsigned num_bytes_in_small_objects
;
129 /* large objects: vm_page_shift <= log2(size) < 2 *vm_page_shift */
130 unsigned num_large_objects_in_use
;
131 unsigned num_large_entries
;
132 unsigned num_bytes_in_large_objects
;
133 large_entry_t
*large_entries
;
134 // large_entries are hashed by location
135 // large_entries that are 0 should be discarded
137 /* huge objects: log2(size) >= 2 *vm_page_shift */
138 unsigned num_bytes_in_huge_objects
;
139 unsigned num_huge_entries
;
140 huge_entry_t
*huge_entries
;
143 static void *szone_malloc(szone_t
*szone
, size_t size
);
144 static void *szone_valloc(szone_t
*szone
, size_t size
);
145 static INLINE
void *szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
);
146 static void szone_free(szone_t
*szone
, void *ptr
);
147 static size_t szone_good_size(szone_t
*szone
, size_t size
);
148 static boolean_t
szone_check_all(szone_t
*szone
, const char *function
);
149 static void szone_print(szone_t
*szone
, boolean_t verbose
);
150 static INLINE region_t
*region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
);
151 static vm_range_t
large_free_no_lock(szone_t
*szone
, large_entry_t
*entry
);
153 #define LOG(szone,ptr) (szone->log_address && (szone->num_small_objects > 8) && (((unsigned)szone->log_address == -1) || (szone->log_address == (void *)(ptr))))
155 /********************* ACCESSOR MACROS ************************/
157 #define SZONE_LOCK(szone) LOCK(szone->lock)
158 #define SZONE_UNLOCK(szone) UNLOCK(szone->lock)
160 #define CHECK(szone,fun) if ((szone)->debug_flags & CHECK_REGIONS) szone_check_all(szone, fun)
162 #define REGION_ADDRESS(region) (region)
163 #define REGION_END(region) (region+REGION_SIZE)
165 #define LARGE_ENTRY_ADDRESS(entry) (((entry).address_and_num_pages >> vm_page_shift) << vm_page_shift)
166 #define LARGE_ENTRY_NUM_PAGES(entry) ((entry).address_and_num_pages & ((1 << vm_page_shift) - 1))
167 #define LARGE_ENTRY_SIZE(entry) (LARGE_ENTRY_NUM_PAGES(entry) << vm_page_shift)
168 #define LARGE_ENTRY_MATCHES(entry,ptr) (!(((entry).address_and_num_pages - (unsigned)(ptr)) >> vm_page_shift))
169 #define LARGE_ENTRY_IS_EMPTY(entry) (!((entry).address_and_num_pages))
171 /********************* VERY LOW LEVEL UTILITIES ************************/
173 static void szone_error(szone_t
*szone
, const char *msg
, const void *ptr
) {
174 if (szone
) SZONE_UNLOCK(szone
);
176 malloc_printf("*** malloc[%d]: error for object %p: %s\n", getpid(), ptr
, msg
);
178 szone_print(szone
, 1);
181 malloc_printf("*** malloc[%d]: error: %s\n", getpid(), msg
);
184 malloc_printf("*** Sleeping to help debug\n");
185 sleep(3600); // to help debug
189 static void protect(szone_t
*szone
, vm_address_t address
, vm_size_t size
, unsigned protection
, unsigned debug_flags
) {
191 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_PRELUDE
)) {
192 err
= vm_protect(mach_task_self(), address
- vm_page_size
, vm_page_size
, 0, protection
);
193 if (err
) malloc_printf("*** malloc[%d]: Can't protect(%x) region for prelude guard page at 0x%x\n", getpid(), protection
, address
- vm_page_size
);
195 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE
)) {
196 err
= vm_protect(mach_task_self(), (vm_address_t
)(address
+ size
), vm_page_size
, 0, protection
);
197 if (err
) malloc_printf("*** malloc[%d]: Can't protect(%x) region for postlude guard page at 0x%x\n", getpid(), protection
, address
+ size
);
201 static vm_address_t
allocate_pages(szone_t
*szone
, size_t size
, unsigned debug_flags
, int vm_page_label
) {
204 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
205 size_t allocation_size
= round_page(size
);
206 if (!allocation_size
) allocation_size
= vm_page_size
;
207 if (add_guard_pages
) allocation_size
+= 2 * vm_page_size
;
208 err
= vm_allocate(mach_task_self(), &addr
, allocation_size
, vm_page_label
| 1);
210 malloc_printf("*** malloc: vm_allocate(size=%d) failed with %d\n", size
, err
);
211 szone_error(szone
, "Can't allocate region", NULL
);
214 if (add_guard_pages
) {
215 addr
+= vm_page_size
;
216 protect(szone
, addr
, size
, 0, debug_flags
);
221 static void deallocate_pages(szone_t
*szone
, vm_address_t addr
, size_t size
, unsigned debug_flags
) {
223 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
224 if (add_guard_pages
) {
225 addr
-= vm_page_size
;
226 size
+= 2 * vm_page_size
;
228 err
= vm_deallocate(mach_task_self(), addr
, size
);
230 szone_error(szone
, "Can't deallocate_pages region", (void *)addr
);
234 static kern_return_t
_szone_default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
) {
235 *ptr
= (void *)address
;
239 /********************* FREE LIST UTILITIES ************************/
241 static INLINE grain_t
grain_for_msize(szone_t
*szone
, msize_t msize
) {
242 // assumes msize >= MIN_BLOCK
244 if (msize
< MIN_BLOCK
) {
245 szone_error(szone
, "grain_for_msize: msize too small", NULL
);
248 return (msize
< MAX_GRAIN
+ MIN_BLOCK
) ? msize
- MIN_BLOCK
: MAX_GRAIN
- 1;
251 static INLINE msize_t
msize_for_grain(szone_t
*szone
, grain_t grain
) {
252 // 0 if multiple sizes
253 return (grain
< MAX_GRAIN
- 1) ? grain
+ MIN_BLOCK
: 0;
256 static INLINE
void free_list_checksum(szone_t
*szone
, free_list_t
*ptr
) {
257 // We always checksum, as testing whether to do it (based on szone->debug_flags) is as fast as doing it
258 if (ptr
->checksum
!= (((unsigned)ptr
->previous
) ^ ((unsigned)ptr
->next
) ^ CHECKSUM_MAGIC
)) {
259 szone_error(szone
, "Incorrect checksum for freed object - object was probably modified after being freed; break at szone_error", ptr
);
263 static INLINE
void free_list_set_checksum(szone_t
*szone
, free_list_t
*ptr
) {
264 // We always set checksum, as testing whether to do it (based on szone->debug_flags) is slower than just doing it
265 ptr
->checksum
= ((unsigned)ptr
->previous
) ^ ((unsigned)ptr
->next
) ^ CHECKSUM_MAGIC
;
268 static void free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
269 // Adds an item to the proper free list
270 // Also marks the header of the block properly
271 grain_t grain
= grain_for_msize(szone
, msize
);
272 free_list_t
*free_ptr
= ptr
;
273 free_list_t
*free_head
= szone
->free_list
[grain
];
274 msize_t
*follower
= (msize_t
*)FOLLOWING_PTR(ptr
, msize
);
276 if (LOG(szone
,ptr
)) malloc_printf("In free_list_add_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
277 if (((unsigned)ptr
) & (QUANTUM
- 1)) {
278 szone_error(szone
, "free_list_add_ptr: Unaligned ptr", ptr
);
281 MSIZE_FLAGS_FOR_PTR(ptr
) = msize
| THIS_FREE
;
283 free_list_checksum(szone
, free_head
);
285 if (free_head
->previous
) {
286 malloc_printf("ptr=%p grain=%d free_head=%p previous=%p\n", ptr
, grain
, free_head
, free_head
->previous
);
287 szone_error(szone
, "free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr
);
289 if (!(MSIZE_FLAGS_FOR_PTR(free_head
) & THIS_FREE
)) {
290 malloc_printf("ptr=%p grain=%d free_head=%p\n", ptr
, grain
, free_head
);
291 szone_error(szone
, "free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr
);
293 if ((grain
!= MAX_GRAIN
-1) && (MSIZE_FLAGS_FOR_PTR(free_head
) != (THIS_FREE
| msize
))) {
294 malloc_printf("ptr=%p grain=%d free_head=%p previous_msize=%d\n", ptr
, grain
, free_head
, MSIZE_FLAGS_FOR_PTR(free_head
));
295 szone_error(szone
, "free_list_add_ptr: Internal invariant broken (incorrect msize)", ptr
);
298 free_head
->previous
= free_ptr
;
299 free_list_set_checksum(szone
, free_head
);
301 free_ptr
->previous
= NULL
;
302 free_ptr
->next
= free_head
;
303 free_list_set_checksum(szone
, free_ptr
);
304 szone
->free_list
[grain
] = free_ptr
;
305 // mark the end of this block
306 PREVIOUS_MSIZE(follower
) = msize
;
307 MSIZE_FLAGS_FOR_PTR(follower
) |= PREV_FREE
;
310 static void free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
311 // Removes item in the proper free list
312 // msize could be read, but all callers have it so we pass it in
313 grain_t grain
= grain_for_msize(szone
, msize
);
314 free_list_t
*free_ptr
= ptr
;
315 free_list_t
*next
= free_ptr
->next
;
316 free_list_t
*previous
= free_ptr
->previous
;
318 if (LOG(szone
,ptr
)) malloc_printf("In free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
320 free_list_checksum(szone
, free_ptr
);
323 if (szone
->free_list
[grain
] != ptr
) {
324 malloc_printf("ptr=%p grain=%d msize=%d szone->free_list[grain]=%p\n", ptr
, grain
, msize
, szone
->free_list
[grain
]);
325 szone_error(szone
, "free_list_remove_ptr: Internal invariant broken (szone->free_list[grain])", ptr
);
329 szone
->free_list
[grain
] = next
;
331 previous
->next
= next
;
332 free_list_set_checksum(szone
, previous
);
335 next
->previous
= previous
;
336 free_list_set_checksum(szone
, next
);
338 MSIZE_FLAGS_FOR_PTR(FOLLOWING_PTR(ptr
, msize
)) &= ~ PREV_FREE
;
341 static boolean_t
free_list_check(szone_t
*szone
, grain_t grain
) {
343 free_list_t
*ptr
= szone
->free_list
[grain
];
344 free_list_t
*previous
= NULL
;
346 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
348 if (!(msize_and_free
& THIS_FREE
)) {
349 malloc_printf("*** malloc[%d]: In-use ptr in free list grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
352 if (((unsigned)ptr
) & (QUANTUM
- 1)) {
353 malloc_printf("*** malloc[%d]: Unaligned ptr in free list grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
356 if (!region_for_ptr_no_lock(szone
, ptr
)) {
357 malloc_printf("*** malloc[%d]: Ptr not in szone grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
360 free_list_checksum(szone
, ptr
);
361 if (ptr
->previous
!= previous
) {
362 malloc_printf("*** malloc[%d]: Previous incorrectly set grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
365 if ((grain
!= MAX_GRAIN
-1) && (msize_and_free
!= (msize_for_grain(szone
, grain
) | THIS_FREE
))) {
366 malloc_printf("*** malloc[%d]: Incorrect msize for grain=%d count=%d ptr=%p msize=%d\n", getpid(), grain
, count
, ptr
, msize_and_free
);
375 /********************* SMALL BLOCKS MANAGEMENT ************************/
377 static INLINE region_t
*region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
) {
378 region_t
*first_region
= szone
->regions
;
379 region_t
*region
= first_region
+ szone
->last_region_hit
;
380 region_t
this = *region
;
381 if ((unsigned)ptr
- (unsigned)REGION_ADDRESS(this) < (unsigned)REGION_SIZE
) {
384 // We iterate in reverse order becase last regions are more likely
385 region
= first_region
+ szone
->num_regions
;
386 while (region
!= first_region
) {
388 if ((unsigned)ptr
- (unsigned)REGION_ADDRESS(this) < (unsigned)REGION_SIZE
) {
389 szone
->last_region_hit
= region
- first_region
;
397 static INLINE
void small_free_no_lock(szone_t
*szone
, region_t
*region
, void *ptr
, msize_t msize_and_free
) {
398 msize_t msize
= msize_and_free
& ~ PREV_FREE
;
399 size_t original_size
= msize
<< SHIFT_QUANTUM
;
400 void *next_block
= ((char *)ptr
+ original_size
);
401 msize_t next_msize_and_free
;
403 if (LOG(szone
,ptr
)) malloc_printf("In small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
404 if (msize
< MIN_BLOCK
) {
405 malloc_printf("In small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
406 szone_error(szone
, "Trying to free small block that is too small", ptr
);
409 if (((vm_address_t
)next_block
< REGION_END(*region
)) && ((next_msize_and_free
= MSIZE_FLAGS_FOR_PTR(next_block
)) & THIS_FREE
)) {
410 // If the next block is free, we coalesce
411 msize_t next_msize
= next_msize_and_free
& ~THIS_FREE
;
412 if (LOG(szone
,ptr
)) malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr
, msize
, next_block
, next_msize
);
413 free_list_remove_ptr(szone
, next_block
, next_msize
);
416 // Let's try to coalesce backwards now
417 if (msize_and_free
& PREV_FREE
) {
418 msize_t previous_msize
= PREVIOUS_MSIZE(ptr
);
419 void *previous
= ptr
- (previous_msize
<< SHIFT_QUANTUM
);
421 if (LOG(szone
,previous
)) malloc_printf("In small_free_no_lock(), coalesced backwards for %p previous=%p, msize=%d\n", ptr
, previous
, previous_msize
);
422 if (!previous_msize
|| (previous_msize
>= (((vm_address_t
)ptr
- REGION_ADDRESS(*region
)) >> SHIFT_QUANTUM
))) {
423 szone_error(szone
, "Invariant 1 broken when coalescing backwards", ptr
);
425 if (MSIZE_FLAGS_FOR_PTR(previous
) != (previous_msize
| THIS_FREE
)) {
426 malloc_printf("previous=%p its_msize_and_free=0x%x previous_msize=%d\n", previous
, MSIZE_FLAGS_FOR_PTR(previous
), previous_msize
);
427 szone_error(szone
, "Invariant 3 broken when coalescing backwards", ptr
);
430 free_list_remove_ptr(szone
, previous
, previous_msize
);
432 msize
+= previous_msize
;
434 if (msize
& PREV_FREE
) {
435 malloc_printf("In small_free_no_lock(), after coalescing with previous ptr=%p, msize=%d previous_msize=%d\n", ptr
, msize
, previous_msize
);
436 szone_error(szone
, "Incorrect coalescing", ptr
);
440 if (szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) {
442 szone_error(szone
, "Incorrect size information - block header was damaged", ptr
);
444 memset(ptr
, 0x55, (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
);
447 free_list_add_ptr(szone
, ptr
, msize
);
448 CHECK(szone
, "small_free_no_lock: added to free list");
449 szone
->num_small_objects
--;
450 szone
->num_bytes_in_small_objects
-= original_size
; // we use original_size and not msize to avoid double counting the coalesced blocks
453 static void *small_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
) {
454 // Allocates from the last region or a freshly allocated region
455 region_t
*last_region
= szone
->regions
+ szone
->num_regions
- 1;
456 vm_address_t new_address
;
458 msize_t msize_and_free
;
459 unsigned region_capacity
;
460 ptr
= (void *)(REGION_END(*last_region
) - szone
->num_bytes_free_in_last_region
+ PTR_HEADER_SIZE
);
462 if (((vm_address_t
)ptr
) & (QUANTUM
- 1)) {
463 szone_error(szone
, "Invariant broken while using end of region", ptr
);
466 msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
468 if (msize_and_free
!= PREV_FREE
&& msize_and_free
!= 0) {
469 malloc_printf("*** malloc[%d]: msize_and_free = %d\n", getpid(), msize_and_free
);
470 szone_error(szone
, "Invariant broken when allocating at end of zone", ptr
);
473 // In order to make sure we don't have 2 free pointers following themselves, if the last item is a free item, we combine it and clear it
474 if (msize_and_free
== PREV_FREE
) {
475 msize_t previous_msize
= PREVIOUS_MSIZE(ptr
);
476 void *previous
= ptr
- (previous_msize
<< SHIFT_QUANTUM
);
478 if (LOG(szone
, ptr
)) malloc_printf("Combining last with free space at %p\n", ptr
);
479 if (!previous_msize
|| (previous_msize
>= (((vm_address_t
)ptr
- REGION_ADDRESS(*last_region
)) >> SHIFT_QUANTUM
)) || (MSIZE_FLAGS_FOR_PTR(previous
) != (previous_msize
| THIS_FREE
))) {
480 szone_error(szone
, "Invariant broken when coalescing backwards at end of zone", ptr
);
483 free_list_remove_ptr(szone
, previous
, previous_msize
);
484 szone
->num_bytes_free_in_last_region
+= previous_msize
<< SHIFT_QUANTUM
;
485 memset(previous
, 0, previous_msize
<< SHIFT_QUANTUM
);
486 MSIZE_FLAGS_FOR_PTR(previous
) = 0;
489 // first try at the end of the last region
490 CHECK(szone
, __PRETTY_FUNCTION__
);
491 if (szone
->num_bytes_free_in_last_region
>= (msize
<< SHIFT_QUANTUM
)) {
492 szone
->num_bytes_free_in_last_region
-= (msize
<< SHIFT_QUANTUM
);
493 szone
->num_small_objects
++;
494 szone
->num_bytes_in_small_objects
+= msize
<< SHIFT_QUANTUM
;
495 MSIZE_FLAGS_FOR_PTR(ptr
) = msize
;
498 // time to create a new region
499 new_address
= allocate_pages(szone
, REGION_SIZE
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL
));
504 // let's prepare to free the remnants of last_region
505 if (szone
->num_bytes_free_in_last_region
>= QUANTUM
) {
506 msize_t this_msize
= szone
->num_bytes_free_in_last_region
>> SHIFT_QUANTUM
;
507 // malloc_printf("Entering last block %p size=%d\n", ptr, this_msize << SHIFT_QUANTUM);
508 if (this_msize
>= MIN_BLOCK
) {
509 free_list_add_ptr(szone
, ptr
, this_msize
);
511 // malloc_printf("Leaking last block at %p\n", ptr);
513 szone
->num_bytes_free_in_last_region
-= this_msize
<< SHIFT_QUANTUM
; // to avoid coming back here
515 last_region
[1] = new_address
;
516 szone
->num_regions
++;
517 szone
->num_bytes_free_in_last_region
= REGION_SIZE
- QUANTUM
+ PTR_HEADER_SIZE
- (msize
<< SHIFT_QUANTUM
);
518 ptr
= (void *)(new_address
+ QUANTUM
); // waste the first bytes
519 region_capacity
= (MSIZE_FLAGS_FOR_PTR(szone
->regions
) * QUANTUM
- PTR_HEADER_SIZE
) / sizeof(region_t
);
520 if (szone
->num_regions
>= region_capacity
) {
521 unsigned new_capacity
= region_capacity
* 2 + 1;
522 msize_t new_msize
= (new_capacity
* sizeof(region_t
) + PTR_HEADER_SIZE
+ QUANTUM
- 1) / QUANTUM
;
523 region_t
*new_regions
= ptr
;
524 // malloc_printf("Now %d regions growing regions %p to %d\n", szone->num_regions, szone->regions, new_capacity);
525 MSIZE_FLAGS_FOR_PTR(new_regions
) = new_msize
;
526 szone
->num_small_objects
++;
527 szone
->num_bytes_in_small_objects
+= new_msize
<< SHIFT_QUANTUM
;
528 memcpy(new_regions
, szone
->regions
, szone
->num_regions
* sizeof(region_t
));
529 // We intentionally leak the previous regions pointer to avoid multi-threading crashes if another thread was reading it (unlocked) while we are changing it
530 // Given that in practise the number of regions is typically a handful, this should not be a big deal
531 szone
->regions
= new_regions
;
532 ptr
+= (new_msize
<< SHIFT_QUANTUM
);
533 szone
->num_bytes_free_in_last_region
-= (new_msize
<< SHIFT_QUANTUM
);
534 // malloc_printf("Regions is now %p next ptr is %p\n", szone->regions, ptr);
536 szone
->num_small_objects
++;
537 szone
->num_bytes_in_small_objects
+= msize
<< SHIFT_QUANTUM
;
538 MSIZE_FLAGS_FOR_PTR(ptr
) = msize
;
542 static boolean_t
szone_check_region(szone_t
*szone
, region_t
*region
) {
543 void *ptr
= (void *)REGION_ADDRESS(*region
) + QUANTUM
;
544 vm_address_t region_end
= REGION_END(*region
);
545 int is_last_region
= region
== szone
->regions
+ szone
->num_regions
- 1;
546 msize_t prev_free
= 0;
547 while ((vm_address_t
)ptr
< region_end
) {
548 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
549 if (!(msize_and_free
& THIS_FREE
)) {
550 msize_t msize
= msize_and_free
& ~PREV_FREE
;
551 if ((msize_and_free
& PREV_FREE
) != prev_free
) {
552 malloc_printf("*** malloc[%d]: invariant broken for %p (prev_free=%d) this msize=%d\n", getpid(), ptr
, prev_free
, msize_and_free
);
556 int extra
= (is_last_region
) ? szone
->num_bytes_free_in_last_region
: QUANTUM
;
557 if (((unsigned)(ptr
+ extra
)) < region_end
) {
558 malloc_printf("*** malloc[%d]: invariant broken at region end: ptr=%p extra=%d index=%d num_regions=%d end=%p\n", getpid(), ptr
, extra
, region
- szone
->regions
, szone
->num_regions
, (void *)region_end
);
561 break; // last encountered
563 if (msize
> (LARGE_THRESHOLD
/ QUANTUM
)) {
564 malloc_printf("*** malloc[%d]: invariant broken for %p this msize=%d - size is too large\n", getpid(), ptr
, msize_and_free
);
567 if ((msize
< MIN_BLOCK
) && ((unsigned)ptr
!= region_end
- QUANTUM
)) {
568 malloc_printf("*** malloc[%d]: invariant broken for %p this msize=%d - size is too small\n", getpid(), ptr
, msize_and_free
);
571 ptr
+= msize
<< SHIFT_QUANTUM
;
573 if (is_last_region
&& ((vm_address_t
)ptr
- PTR_HEADER_SIZE
> region_end
- szone
->num_bytes_free_in_last_region
)) {
574 malloc_printf("*** malloc[%d]: invariant broken for %p this msize=%d - block extends beyond allocated region\n", getpid(), ptr
, msize_and_free
);
578 msize_t msize
= msize_and_free
& ~THIS_FREE
;
579 free_list_t
*free_head
= ptr
;
580 msize_t
*follower
= (void *)FOLLOWING_PTR(ptr
, msize
);
581 if ((msize_and_free
& PREV_FREE
) && !prev_free
) {
582 malloc_printf("*** malloc[%d]: invariant broken for free block %p this msize=%d: PREV_FREE set while previous block is in use\n", getpid(), ptr
, msize
);
585 if (msize
< MIN_BLOCK
) {
586 malloc_printf("*** malloc[%d]: invariant broken for free block %p this msize=%d\n", getpid(), ptr
, msize
);
590 malloc_printf("*** malloc[%d]: invariant broken for %p (2 free in a row)\n", getpid(), ptr
);
593 free_list_checksum(szone
, free_head
);
594 if (free_head
->previous
&& !(MSIZE_FLAGS_FOR_PTR(free_head
->previous
) & THIS_FREE
)) {
595 malloc_printf("*** malloc[%d]: invariant broken for %p (previous %p is not a free pointer)\n", getpid(), ptr
, free_head
->previous
);
598 if (free_head
->next
&& !(MSIZE_FLAGS_FOR_PTR(free_head
->next
) & THIS_FREE
)) {
599 malloc_printf("*** malloc[%d]: invariant broken for %p (next is not a free pointer)\n", getpid(), ptr
);
602 if (PREVIOUS_MSIZE(follower
) != msize
) {
603 malloc_printf("*** malloc[%d]: invariant broken for free %p followed by %p in region [%x-%x] (end marker incorrect) should be %d; in fact %d\n", getpid(), ptr
, follower
, REGION_ADDRESS(*region
), region_end
, msize
, PREVIOUS_MSIZE(follower
));
607 prev_free
= PREV_FREE
;
613 static kern_return_t
small_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned num_regions
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
616 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
619 err
= reader(task
, region_address
, sizeof(region_t
) * num_regions
, (void **)®ions
);
621 while (index
< num_regions
) {
622 region_t region
= regions
[index
++];
623 vm_range_t range
= {REGION_ADDRESS(region
), REGION_SIZE
};
624 vm_address_t start
= range
.address
+ QUANTUM
;
625 // malloc_printf("Enumerating small ptrs for Region starting at 0x%x\n", start);
626 if (type_mask
& MALLOC_PTR_REGION_RANGE_TYPE
) recorder(task
, context
, MALLOC_PTR_REGION_RANGE_TYPE
, &range
, 1);
627 if (type_mask
& MALLOC_PTR_IN_USE_RANGE_TYPE
) while (start
< range
.address
+ range
.size
) {
629 msize_t msize_and_free
;
630 err
= reader(task
, start
- PTR_HEADER_SIZE
, QUANTUM
, (void **)&previous
);
632 previous
+= PTR_HEADER_SIZE
;
633 msize_and_free
= MSIZE_FLAGS_FOR_PTR(previous
);
634 if (!(msize_and_free
& THIS_FREE
)) {
636 msize_t msize
= msize_and_free
& ~PREV_FREE
;
637 if (!msize
) break; // last encountered
638 buffer
[count
].address
= start
;
639 buffer
[count
].size
= (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
;
641 if (count
>= MAX_RECORDER_BUFFER
) {
642 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
645 start
+= msize
<< SHIFT_QUANTUM
;
648 msize_t msize
= msize_and_free
& ~THIS_FREE
;
649 start
+= msize
<< SHIFT_QUANTUM
;
652 // malloc_printf("End region - count=%d\n", count);
654 if (count
) recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
658 static INLINE
void *small_malloc_from_free_list(szone_t
*szone
, msize_t msize
, boolean_t
*locked
) {
661 free_list_t
**free_list
;
662 free_list_t
**limit
= szone
->free_list
+ MAX_GRAIN
- 1;
663 // first try the small grains
664 free_list
= szone
->free_list
+ grain_for_msize(szone
, msize
);
665 while (free_list
< limit
) {
669 if (!*locked
) { *locked
= 1; SZONE_LOCK(szone
); CHECK(szone
, __PRETTY_FUNCTION__
); }
672 // optimistic test worked
674 next
= ((free_list_t
*)ptr
)->next
;
676 next
->previous
= NULL
;
677 free_list_set_checksum(szone
, next
);
680 this_msize
= MSIZE_FLAGS_FOR_PTR(ptr
) & ~THIS_FREE
;
681 MSIZE_FLAGS_FOR_PTR(FOLLOWING_PTR(ptr
, this_msize
)) &= ~ PREV_FREE
;
682 goto add_leftover_and_proceed
;
687 // We now check the large grains for one that is big enough
688 if (!*locked
) { *locked
= 1; SZONE_LOCK(szone
); CHECK(szone
, __PRETTY_FUNCTION__
); }
691 this_msize
= MSIZE_FLAGS_FOR_PTR(ptr
) & ~THIS_FREE
;
692 if (this_msize
>= msize
) {
693 free_list_remove_ptr(szone
, ptr
, this_msize
);
694 goto add_leftover_and_proceed
;
696 ptr
= ((free_list_t
*)ptr
)->next
;
699 add_leftover_and_proceed
:
700 if (this_msize
>= msize
+ MIN_BLOCK
) {
701 if (LOG(szone
,ptr
)) malloc_printf("In small_malloc_should_clear(), adding leftover ptr=%p, this_msize=%d\n", ptr
, this_msize
);
702 free_list_add_ptr(szone
, ptr
+ (msize
<< SHIFT_QUANTUM
), this_msize
- msize
);
705 szone
->num_small_objects
++;
706 szone
->num_bytes_in_small_objects
+= this_msize
<< SHIFT_QUANTUM
;
708 if (LOG(szone
,ptr
)) malloc_printf("In small_malloc_should_clear(), ptr=%p, this_msize=%d, msize=%d\n", ptr
, this_msize
, msize
);
710 MSIZE_FLAGS_FOR_PTR(ptr
) = this_msize
;
714 static INLINE
void *small_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
) {
715 boolean_t locked
= 0;
718 if (! (msize
& 0xffff)) {
719 szone_error(szone
, "Invariant broken (!msize) in allocation (region)", NULL
);
721 if (msize
< MIN_BLOCK
) {
722 szone_error(szone
, "Invariant broken (msize too small) in allocation (region)", NULL
);
725 ptr
= small_malloc_from_free_list(szone
, msize
, &locked
);
727 CHECK(szone
, __PRETTY_FUNCTION__
);
729 if (cleared_requested
) memset(ptr
, 0, (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
);
732 if (!locked
) SZONE_LOCK(szone
);
733 CHECK(szone
, __PRETTY_FUNCTION__
);
734 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
735 // we don't clear because this freshly allocated space is pristine
736 CHECK(szone
, __PRETTY_FUNCTION__
);
742 static INLINE
void *small_malloc_cleared_no_lock(szone_t
*szone
, msize_t msize
) {
743 // tries to allocate a small, cleared block
744 boolean_t locked
= 1;
746 ptr
= small_malloc_from_free_list(szone
, msize
, &locked
);
748 memset(ptr
, 0, (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
);
751 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
752 // we don't clear because this freshly allocated space is pristine
757 /********************* LARGE ENTRY UTILITIES ************************/
761 static void large_debug_print(szone_t
*szone
) {
762 unsigned num_large_entries
= szone
->num_large_entries
;
763 unsigned index
= num_large_entries
;
765 large_entry_t
*range
= szone
->large_entries
+ index
;
766 large_entry_t entry
= *range
;
767 if (!LARGE_ENTRY_IS_EMPTY(entry
)) malloc_printf("%d: 0x%x(%dKB); ", index
, LARGE_ENTRY_ADDRESS(entry
), LARGE_ENTRY_SIZE(entry
)/1024);
773 static large_entry_t
*large_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
) {
774 // result only valid during a lock
775 unsigned num_large_entries
= szone
->num_large_entries
;
778 if (!num_large_entries
) return NULL
;
779 hash_index
= ((unsigned)ptr
>> vm_page_shift
) % num_large_entries
;
782 large_entry_t
*range
= szone
->large_entries
+ index
;
783 large_entry_t entry
= *range
;
784 if (LARGE_ENTRY_MATCHES(entry
, ptr
)) return range
;
785 if (LARGE_ENTRY_IS_EMPTY(entry
)) return NULL
; // end of chain
786 index
++; if (index
== num_large_entries
) index
= 0;
787 } while (index
!= hash_index
);
791 static void large_entry_insert_no_lock(szone_t
*szone
, large_entry_t range
) {
792 unsigned num_large_entries
= szone
->num_large_entries
;
793 unsigned hash_index
= (range
.address_and_num_pages
>> vm_page_shift
) % num_large_entries
;
794 unsigned index
= hash_index
;
795 // malloc_printf("Before insertion of 0x%x\n", LARGE_ENTRY_ADDRESS(range));
797 large_entry_t
*entry
= szone
->large_entries
+ index
;
798 if (LARGE_ENTRY_IS_EMPTY(*entry
)) {
800 return; // end of chain
802 index
++; if (index
== num_large_entries
) index
= 0;
803 } while (index
!= hash_index
);
806 static INLINE
void large_entries_rehash_after_entry_no_lock(szone_t
*szone
, large_entry_t
*entry
) {
807 unsigned num_large_entries
= szone
->num_large_entries
;
808 unsigned hash_index
= entry
- szone
->large_entries
;
809 unsigned index
= hash_index
;
812 index
++; if (index
== num_large_entries
) index
= 0;
813 range
= szone
->large_entries
[index
];
814 if (LARGE_ENTRY_IS_EMPTY(range
)) return;
815 szone
->large_entries
[index
].address_and_num_pages
= 0;
816 large_entry_insert_no_lock(szone
, range
); // this will reinsert in the proper place
817 } while (index
!= hash_index
);
820 static INLINE large_entry_t
*large_entries_alloc_no_lock(szone_t
*szone
, unsigned num
) {
821 size_t size
= num
* sizeof(large_entry_t
);
822 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
823 if (is_vm_allocation
) {
824 return (void *)allocate_pages(szone
, round_page(size
), 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
826 return small_malloc_cleared_no_lock(szone
, (size
+ PTR_HEADER_SIZE
+ QUANTUM
- 1) >> SHIFT_QUANTUM
);
830 static void large_entries_free_no_lock(szone_t
*szone
, large_entry_t
*entries
, unsigned num
) {
831 size_t size
= num
* sizeof(large_entry_t
);
832 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
833 if (is_vm_allocation
) {
834 deallocate_pages(szone
, (vm_address_t
)entries
, round_page(size
), 0);
836 region_t
*region
= region_for_ptr_no_lock(szone
, entries
);
837 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(entries
);
838 if (msize_and_free
& THIS_FREE
) {
839 szone_error(szone
, "Object already freed being freed", entries
);
842 small_free_no_lock(szone
, region
, entries
, msize_and_free
);
846 static void large_entries_grow_no_lock(szone_t
*szone
) {
847 unsigned old_num_entries
= szone
->num_large_entries
;
848 large_entry_t
*old_entries
= szone
->large_entries
;
849 unsigned new_num_entries
= (old_num_entries
) ? old_num_entries
* 2 + 1 : 15; // always an odd number for good hashing
850 large_entry_t
*new_entries
= large_entries_alloc_no_lock(szone
, new_num_entries
);
851 unsigned index
= old_num_entries
;
852 szone
->num_large_entries
= new_num_entries
;
853 szone
->large_entries
= new_entries
;
854 // malloc_printf("_grow_large_entries old_num_entries=%d new_num_entries=%d\n", old_num_entries, new_num_entries);
856 large_entry_t oldRange
= old_entries
[index
];
857 if (!LARGE_ENTRY_IS_EMPTY(oldRange
)) large_entry_insert_no_lock(szone
, oldRange
);
859 if (old_entries
) large_entries_free_no_lock(szone
, old_entries
, old_num_entries
);
862 static vm_range_t
large_free_no_lock(szone_t
*szone
, large_entry_t
*entry
) {
863 // frees the specific entry in the size table
864 // returns a range to truly deallocate
866 range
.address
= LARGE_ENTRY_ADDRESS(*entry
);
867 range
.size
= LARGE_ENTRY_SIZE(*entry
);
868 szone
->num_large_objects_in_use
--;
869 szone
->num_bytes_in_large_objects
-= range
.size
;
870 if (szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) {
871 protect(szone
, range
.address
, range
.size
, VM_PROT_READ
| VM_PROT_WRITE
, szone
->debug_flags
);
872 range
.address
-= vm_page_size
;
873 range
.size
+= 2 * vm_page_size
;
875 // printf("Entry is 0x%x=%d; cache is 0x%x ; found=0x%x\n", entry, entry-szone->large_entries, szone->large_entries, large_entry_for_pointer_no_lock(szone, (void *)range.address));
876 entry
->address_and_num_pages
= 0;
877 large_entries_rehash_after_entry_no_lock(szone
, entry
);
879 if (large_entry_for_pointer_no_lock(szone
, (void *)range
.address
)) {
880 malloc_printf("*** malloc[%d]: Freed entry 0x%x still in use; num_large_entries=%d\n", getpid(), range
.address
, szone
->num_large_entries
);
881 large_debug_print(szone
);
888 static kern_return_t
large_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t large_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
890 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
892 large_entry_t
*entries
;
894 err
= reader(task
, large_entries_address
, sizeof(large_entry_t
) * num_entries
, (void **)&entries
);
897 if ((type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) && (num_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
)) {
899 range
.address
= large_entries_address
;
900 range
.size
= round_page(num_entries
* sizeof(large_entry_t
));
901 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &range
, 1);
903 if (type_mask
& (MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
)) while (index
--) {
904 large_entry_t entry
= entries
[index
];
905 if (!LARGE_ENTRY_IS_EMPTY(entry
)) {
907 range
.address
= LARGE_ENTRY_ADDRESS(entry
);
908 range
.size
= LARGE_ENTRY_SIZE(entry
);
909 buffer
[count
++] = range
;
910 if (count
>= MAX_RECORDER_BUFFER
) {
911 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
916 if (count
) recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
920 /********************* HUGE ENTRY UTILITIES ************************/
922 static huge_entry_t
*huge_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
) {
923 unsigned index
= szone
->num_huge_entries
;
925 huge_entry_t
*huge
= szone
->huge_entries
+ index
;
926 if (huge
->address
== (vm_address_t
)ptr
) return huge
;
931 static boolean_t
huge_entry_append(szone_t
*szone
, huge_entry_t huge
) {
932 // We do a little dance with locking because doing allocation (even in the default szone) may cause something to get freed in this szone, with a deadlock
933 huge_entry_t
*new_huge_entries
= NULL
;
936 unsigned num_huge_entries
;
937 num_huge_entries
= szone
->num_huge_entries
;
939 // malloc_printf("In huge_entry_append currentEntries=%d\n", num_huge_entries);
940 if (new_huge_entries
) szone_free(szone
, new_huge_entries
);
941 new_huge_entries
= szone_malloc(szone
, (num_huge_entries
+ 1) * sizeof(huge_entry_t
));
942 if (new_huge_entries
== NULL
)
945 if (num_huge_entries
== szone
->num_huge_entries
) {
946 // No change - our malloc still applies
947 huge_entry_t
*old_huge_entries
= szone
->huge_entries
;
948 if (num_huge_entries
) memcpy(new_huge_entries
, old_huge_entries
, num_huge_entries
* sizeof(huge_entry_t
));
949 new_huge_entries
[szone
->num_huge_entries
++] = huge
;
950 szone
->huge_entries
= new_huge_entries
;
952 szone_free(szone
, old_huge_entries
);
953 // malloc_printf("Done huge_entry_append now=%d\n", szone->num_huge_entries);
960 static kern_return_t
huge_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t huge_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
961 huge_entry_t
*entries
;
963 err
= reader(task
, huge_entries_address
, sizeof(huge_entry_t
) * num_entries
, (void **)&entries
);
965 if (num_entries
) recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, entries
, num_entries
);
969 static void *large_and_huge_malloc(szone_t
*szone
, unsigned num_pages
) {
970 vm_address_t addr
= 0;
971 if (!num_pages
) num_pages
= 1; // minimal allocation size for this szone
972 // malloc_printf("In large_and_huge_malloc for %dKB\n", num_pages * vm_page_size / 1024);
973 if (num_pages
>= (1 << vm_page_shift
)) {
975 huge
.size
= num_pages
<< vm_page_shift
;
976 addr
= allocate_pages(szone
, huge
.size
, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_HUGE
));
977 if (!addr
) return NULL
;
979 if (huge_entry_append(szone
, huge
))
982 szone
->num_bytes_in_huge_objects
+= huge
.size
;
984 vm_size_t size
= num_pages
<< vm_page_shift
;
986 addr
= allocate_pages(szone
, size
, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
987 if (LOG(szone
, addr
)) malloc_printf("In szone_malloc true large allocation at %p for %dKB\n", (void *)addr
, size
/ 1024);
994 if (large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
995 malloc_printf("Freshly allocated is already in use: 0x%x\n", addr
);
996 large_debug_print(szone
);
1000 if ((szone
->num_large_objects_in_use
+ 1) * 4 > szone
->num_large_entries
) {
1001 // density of hash table too high; grow table
1002 // we do that under lock to avoid a race
1003 // malloc_printf("In szone_malloc growing hash table current=%d\n", szone->num_large_entries);
1004 large_entries_grow_no_lock(szone
);
1006 // malloc_printf("Inserting large entry (0x%x, %dKB)\n", addr, num_pages * vm_page_size / 1024);
1007 entry
.address_and_num_pages
= addr
| num_pages
;
1009 if (large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
1010 malloc_printf("Entry about to be added already in use: 0x%x\n", addr
);
1011 large_debug_print(szone
);
1015 large_entry_insert_no_lock(szone
, entry
);
1017 if (!large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
1018 malloc_printf("Can't find entry just added\n");
1019 large_debug_print(szone
);
1023 // malloc_printf("Inserted large entry (0x%x, %d pages)\n", addr, num_pages);
1024 szone
->num_large_objects_in_use
++;
1025 szone
->num_bytes_in_large_objects
+= size
;
1027 SZONE_UNLOCK(szone
);
1028 return (void *)addr
;
1031 /********************* Zone call backs ************************/
1033 static void szone_free(szone_t
*szone
, void *ptr
) {
1035 large_entry_t
*entry
;
1036 vm_range_t vm_range_to_deallocate
;
1038 if (LOG(szone
, ptr
)) malloc_printf("In szone_free with %p\n", ptr
);
1040 if ((vm_address_t
)ptr
& (QUANTUM
- 1)) {
1041 szone_error(szone
, "Non-aligned pointer being freed", ptr
);
1044 // try a small pointer
1045 region
= region_for_ptr_no_lock(szone
, ptr
);
1047 // this is indeed a valid pointer
1048 msize_t msize_and_free
;
1050 msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
1051 if (msize_and_free
& THIS_FREE
) {
1052 szone_error(szone
, "Object already freed being freed", ptr
);
1055 CHECK(szone
, __PRETTY_FUNCTION__
);
1056 small_free_no_lock(szone
, region
, ptr
, msize_and_free
);
1057 CHECK(szone
, __PRETTY_FUNCTION__
);
1058 SZONE_UNLOCK(szone
);
1061 if (((unsigned)ptr
) & (vm_page_size
- 1)) {
1062 szone_error(szone
, "Non-page-aligned, non-allocated pointer being freed", ptr
);
1066 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
1068 // malloc_printf("Ready for deallocation [0x%x-%dKB]\n", LARGE_ENTRY_ADDRESS(*entry), LARGE_ENTRY_SIZE(*entry)/1024);
1069 if (KILL_THRESHOLD
&& (LARGE_ENTRY_SIZE(*entry
) > KILL_THRESHOLD
)) {
1070 // We indicate to the VM system that these pages contain garbage and therefore don't need to be swapped out
1071 vm_msync(mach_task_self(), LARGE_ENTRY_ADDRESS(*entry
), LARGE_ENTRY_SIZE(*entry
), VM_SYNC_KILLPAGES
);
1073 vm_range_to_deallocate
= large_free_no_lock(szone
, entry
);
1075 if (large_entry_for_pointer_no_lock(szone
, ptr
)) {
1076 malloc_printf("*** malloc[%d]: Just after freeing 0x%x still in use num_large_entries=%d\n", getpid(), ptr
, szone
->num_large_entries
);
1077 large_debug_print(szone
);
1081 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
1082 vm_range_to_deallocate
= *huge
;
1083 *huge
= szone
->huge_entries
[--szone
->num_huge_entries
]; // last entry fills that spot
1084 szone
->num_bytes_in_huge_objects
-= vm_range_to_deallocate
.size
;
1087 large_debug_print(szone
);
1089 szone_error(szone
, "Pointer being freed was not allocated", ptr
);
1092 CHECK(szone
, __PRETTY_FUNCTION__
);
1093 SZONE_UNLOCK(szone
); // we release the lock asap
1094 // we deallocate_pages, including guard pages
1095 if (vm_range_to_deallocate
.address
) {
1096 // malloc_printf("About to deallocate 0x%x size %dKB\n", vm_range_to_deallocate.address, vm_range_to_deallocate.size / 1024);
1098 if (large_entry_for_pointer_no_lock(szone
, (void *)vm_range_to_deallocate
.address
)) {
1099 malloc_printf("*** malloc[%d]: Invariant broken: 0x%x still in use num_large_entries=%d\n", getpid(), vm_range_to_deallocate
.address
, szone
->num_large_entries
);
1100 large_debug_print(szone
);
1104 deallocate_pages(szone
, vm_range_to_deallocate
.address
, vm_range_to_deallocate
.size
, 0);
1108 static INLINE
void *szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
) {
1110 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
1112 size_t msize
= (size
+ PTR_HEADER_SIZE
+ QUANTUM
- 1) >> SHIFT_QUANTUM
;
1113 if (msize
< MIN_BLOCK
) msize
= MIN_BLOCK
;
1114 ptr
= small_malloc_should_clear(szone
, msize
, cleared_requested
);
1116 if ((MSIZE_FLAGS_FOR_PTR(ptr
) & ~ PREV_FREE
) < msize
) {
1117 malloc_printf("ptr=%p this=%d msize=%d\n", ptr
, MSIZE_FLAGS_FOR_PTR(ptr
), (int)msize
);
1118 szone_error(szone
, "Pointer allocated has improper size (1)", ptr
);
1121 if ((MSIZE_FLAGS_FOR_PTR(ptr
) & ~ PREV_FREE
) < MIN_BLOCK
) {
1122 malloc_printf("ptr=%p this=%d msize=%d\n", ptr
, MSIZE_FLAGS_FOR_PTR(ptr
), (int)msize
);
1123 szone_error(szone
, "Pointer allocated has improper size (2)", ptr
);
1129 num_pages
= round_page(size
) >> vm_page_shift
;
1130 ptr
= large_and_huge_malloc(szone
, num_pages
);
1132 if (LOG(szone
, ptr
)) malloc_printf("szone_malloc returned %p\n", ptr
);
1136 static void *szone_malloc(szone_t
*szone
, size_t size
) {
1137 return szone_malloc_should_clear(szone
, size
, 0);
1140 static void *szone_calloc(szone_t
*szone
, size_t num_items
, size_t size
) {
1141 return szone_malloc_should_clear(szone
, num_items
* size
, 1);
1144 static void *szone_valloc(szone_t
*szone
, size_t size
) {
1147 num_pages
= round_page(size
) >> vm_page_shift
;
1148 ptr
= large_and_huge_malloc(szone
, num_pages
);
1149 if (LOG(szone
, ptr
)) malloc_printf("szone_valloc returned %p\n", ptr
);
1153 static size_t szone_size(szone_t
*szone
, const void *ptr
) {
1156 large_entry_t
*entry
;
1159 if (LOG(szone
, ptr
)) malloc_printf("In szone_size for %p (szone=%p)\n", ptr
, szone
);
1160 if ((vm_address_t
)ptr
& (QUANTUM
- 1)) return 0;
1161 if ((((unsigned)ptr
) & (vm_page_size
- 1)) && (MSIZE_FLAGS_FOR_PTR(ptr
) & THIS_FREE
)) {
1162 // not page aligned, but definitely not in use
1165 // Try a small pointer
1166 region
= region_for_ptr_no_lock(szone
, ptr
);
1167 // malloc_printf("FOUND REGION %p\n", region);
1169 // this is indeed a valid pointer
1170 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
1171 return (msize_and_free
& THIS_FREE
) ? 0 : ((msize_and_free
& ~PREV_FREE
) << SHIFT_QUANTUM
) - PTR_HEADER_SIZE
;
1173 if (((unsigned)ptr
) & (vm_page_size
- 1)) {
1177 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
1179 size
= LARGE_ENTRY_SIZE(*entry
);
1180 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
1183 SZONE_UNLOCK(szone
);
1184 // malloc_printf("szone_size for large/huge %p returned %d\n", ptr, (unsigned)size);
1185 if (LOG(szone
, ptr
)) malloc_printf("szone_size for %p returned %d\n", ptr
, (unsigned)size
);
1189 static INLINE
int try_realloc_small_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) {
1190 // returns 1 on success
1191 void *next_block
= (char *)ptr
+ old_size
+ PTR_HEADER_SIZE
;
1192 msize_t next_msize_and_free
;
1195 msize_t coalesced_msize
;
1196 msize_t leftover_msize
;
1197 msize_t new_msize_and_free
;
1198 void *following_ptr
;
1200 region
= szone
->regions
[szone
->num_regions
- 1];
1201 if (((vm_address_t
)ptr
>= region
) && ((vm_address_t
)ptr
< region
+ REGION_SIZE
) && ((vm_address_t
)next_block
== REGION_END(region
) - szone
->num_bytes_free_in_last_region
+ PTR_HEADER_SIZE
)) {
1202 // This could be optimized but it is so rare it's not worth it
1203 SZONE_UNLOCK(szone
);
1206 // If the next block is free, we coalesce
1207 next_msize_and_free
= MSIZE_FLAGS_FOR_PTR(next_block
);
1209 if ((vm_address_t
)next_block
& (QUANTUM
- 1)) {
1210 szone_error(szone
, "Internal invariant broken in realloc(next_block)", next_block
);
1212 if (next_msize_and_free
& PREV_FREE
) {
1213 malloc_printf("try_realloc_small_in_place: 0x%x=PREV_FREE|%d\n", next_msize_and_free
, next_msize_and_free
& ~PREV_FREE
);
1214 SZONE_UNLOCK(szone
);
1218 next_msize
= next_msize_and_free
& ~THIS_FREE
;
1219 if (!(next_msize_and_free
& THIS_FREE
) || !next_msize
|| (old_size
+ (next_msize
<< SHIFT_QUANTUM
) < new_size
)) {
1220 SZONE_UNLOCK(szone
);
1223 coalesced_msize
= (new_size
- old_size
+ QUANTUM
- 1) >> SHIFT_QUANTUM
;
1224 leftover_msize
= next_msize
- coalesced_msize
;
1225 new_msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
1226 // malloc_printf("Realloc in place for %p; current msize=%d next_msize=%d wanted=%d\n", ptr, MSIZE_FLAGS_FOR_PTR(ptr), next_msize, new_size);
1227 free_list_remove_ptr(szone
, next_block
, next_msize
);
1228 if ((leftover_msize
< MIN_BLOCK
) || (leftover_msize
< coalesced_msize
/ 4)) {
1229 // don't bother splitting it off
1230 // malloc_printf("No leftover ");
1231 coalesced_msize
= next_msize
;
1234 void *leftover
= next_block
+ (coalesced_msize
<< SHIFT_QUANTUM
);
1235 // malloc_printf("Leftover ");
1236 free_list_add_ptr(szone
, leftover
, leftover_msize
);
1238 new_msize_and_free
+= coalesced_msize
;
1239 MSIZE_FLAGS_FOR_PTR(ptr
) = new_msize_and_free
;
1240 following_ptr
= FOLLOWING_PTR(ptr
, new_msize_and_free
& ~PREV_FREE
);
1241 MSIZE_FLAGS_FOR_PTR(following_ptr
) &= ~ PREV_FREE
;
1244 msize_t ms
= MSIZE_FLAGS_FOR_PTR(following_ptr
);
1245 msize_t pms
= PREVIOUS_MSIZE(FOLLOWING_PTR(following_ptr
, ms
& ~THIS_FREE
));
1246 malloc_printf("Following ptr of coalesced (%p) has msize_and_free=0x%x=%s%d end_of_block_marker=%d\n", following_ptr
, ms
, (ms
& THIS_FREE
) ? "THIS_FREE|" : "", ms
& ~THIS_FREE
, pms
);
1248 if (LOG(szone
,ptr
)) malloc_printf("In szone_realloc(), ptr=%p, msize=%d\n", ptr
, MSIZE_FLAGS_FOR_PTR(ptr
));
1250 CHECK(szone
, __PRETTY_FUNCTION__
);
1251 szone
->num_bytes_in_small_objects
+= coalesced_msize
<< SHIFT_QUANTUM
;
1252 SZONE_UNLOCK(szone
);
1253 // malloc_printf("Extended ptr %p for realloc old=%d desired=%d new=%d leftover=%d\n", ptr, (unsigned)old_size, (unsigned)new_size, (unsigned)szone_size(szone, ptr), leftover_msize << SHIFT_QUANTUM);
1257 static INLINE
int try_realloc_large_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) {
1258 vm_address_t addr
= (vm_address_t
)ptr
+ old_size
;
1259 large_entry_t
*entry
;
1261 if (((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
)) return 0; // don't want to bother with the protected case
1263 if (old_size
!= ((old_size
>> vm_page_shift
) << vm_page_shift
)) malloc_printf("*** old_size is %d\n", old_size
);
1265 // malloc_printf("=== Trying (1) to extend %p from %d to %d\n", ptr, old_size, new_size);
1267 entry
= large_entry_for_pointer_no_lock(szone
, (void *)addr
);
1268 SZONE_UNLOCK(szone
);
1269 if (entry
) return 0; // large pointer already exist in table - extension is not going to work
1270 new_size
= round_page(new_size
);
1271 // malloc_printf("=== Trying (2) to extend %p from %d to %d\n", ptr, old_size, new_size);
1272 err
= vm_allocate(mach_task_self(), &addr
, new_size
- old_size
, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
)); // we ask for allocation specifically at addr
1274 // we can just extend the block
1276 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
1277 if (!entry
) szone_error(szone
, "large entry reallocated is not properly in table", ptr
);
1278 // malloc_printf("=== Successfully reallocated at end of %p from %d to %d\n", ptr, old_size, new_size);
1279 entry
->address_and_num_pages
= (vm_address_t
)ptr
| (new_size
>> vm_page_shift
);
1280 szone
->num_bytes_in_large_objects
+= new_size
- old_size
;
1281 SZONE_UNLOCK(szone
); // we release the lock asap
1285 static void *szone_realloc(szone_t
*szone
, void *ptr
, size_t new_size
) {
1286 size_t old_size
= 0;
1288 if (LOG(szone
, ptr
)) malloc_printf("In szone_realloc for %p, %d\n", ptr
, (unsigned)new_size
);
1289 if (!ptr
) return szone_malloc(szone
, new_size
);
1290 old_size
= szone_size(szone
, ptr
);
1292 szone_error(szone
, "Pointer being reallocated was not allocated", ptr
);
1295 if (old_size
>= new_size
) return ptr
;
1296 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && ((new_size
+ PTR_HEADER_SIZE
+ QUANTUM
- 1) < LARGE_THRESHOLD
)) {
1297 // We now try to realloc in place
1298 if (try_realloc_small_in_place(szone
, ptr
, old_size
, new_size
)) return ptr
;
1300 if ((old_size
> VM_COPY_THRESHOLD
) && ((new_size
+ vm_page_size
- 1) < (1 << (vm_page_shift
+ vm_page_shift
)))) {
1301 // we know it's a large block, and not a huge block (both for old and new)
1302 kern_return_t err
= 0;
1304 large_entry_t
*entry
;
1306 num_pages
= round_page(new_size
) >> vm_page_shift
;
1307 if (try_realloc_large_in_place(szone
, ptr
, old_size
, new_size
)) return ptr
;
1308 new_ptr
= large_and_huge_malloc(szone
, num_pages
);
1309 err
= vm_copy(mach_task_self(), (vm_address_t
)ptr
, old_size
, (vm_address_t
)new_ptr
);
1311 szone_error(szone
, "Can't vm_copy region", ptr
);
1313 // We do not want the kernel to alias the old and the new, so we deallocate the old pointer right away and tear down the ptr-to-size data structure
1315 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
1317 szone_error(szone
, "Can't find entry for large copied block", ptr
);
1319 range
= large_free_no_lock(szone
, entry
);
1320 SZONE_UNLOCK(szone
); // we release the lock asap
1321 // we truly deallocate_pages, including guard pages
1322 deallocate_pages(szone
, range
.address
, range
.size
, 0);
1323 if (LOG(szone
, ptr
)) malloc_printf("szone_realloc returned %p for %d\n", new_ptr
, (unsigned)new_size
);
1326 new_ptr
= szone_malloc(szone
, new_size
);
1327 if (new_ptr
== NULL
)
1329 memcpy(new_ptr
, ptr
, old_size
);
1331 szone_free(szone
, ptr
);
1332 if (LOG(szone
, ptr
)) malloc_printf("szone_realloc returned %p for %d\n", new_ptr
, (unsigned)new_size
);
1336 static void szone_destroy(szone_t
*szone
) {
1338 index
= szone
->num_large_entries
;
1340 large_entry_t
*entry
= szone
->large_entries
+ index
;
1341 if (!LARGE_ENTRY_IS_EMPTY(*entry
)) {
1342 large_entry_t range
;
1344 // we deallocate_pages, including guard pages
1345 deallocate_pages(szone
, LARGE_ENTRY_ADDRESS(range
), LARGE_ENTRY_SIZE(range
), szone
->debug_flags
);
1348 if (szone
->num_large_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
) large_entries_free_no_lock(szone
, szone
->large_entries
, szone
->num_large_entries
); // we do not free in the small chunk case
1349 index
= szone
->num_huge_entries
;
1351 huge_entry_t
*huge
= szone
->huge_entries
+ index
;
1352 deallocate_pages(szone
, huge
->address
, huge
->size
, szone
->debug_flags
);
1354 // and now we free regions, with regions[0] as the last one (the final harakiri)
1355 index
= szone
->num_regions
;
1356 while (index
--) { // we skip the first region, that is the zone itself
1357 region_t region
= szone
->regions
[index
];
1358 deallocate_pages(szone
, REGION_ADDRESS(region
), REGION_SIZE
, 0);
1362 static size_t szone_good_size(szone_t
*szone
, size_t size
) {
1363 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
1365 msize_t msize
= (size
+ PTR_HEADER_SIZE
+ QUANTUM
- 1) >> SHIFT_QUANTUM
;
1366 if (msize
< MIN_BLOCK
) msize
= MIN_BLOCK
;
1367 return (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
;
1370 num_pages
= round_page(size
) >> vm_page_shift
;
1371 if (!num_pages
) num_pages
= 1; // minimal allocation size for this
1372 return num_pages
<< vm_page_shift
;
1376 unsigned szone_check_counter
= 0;
1377 unsigned szone_check_start
= 0;
1378 unsigned szone_check_modulo
= 1;
1380 static boolean_t
szone_check_all(szone_t
*szone
, const char *function
) {
1383 while (index
< szone
->num_regions
) {
1384 region_t
*region
= szone
->regions
+ index
++;
1385 if (!szone_check_region(szone
, region
)) {
1386 SZONE_UNLOCK(szone
);
1387 szone
->debug_flags
&= ~ CHECK_REGIONS
;
1388 malloc_printf("*** malloc[%d]: Region %d incorrect szone_check_all(%s) counter=%d\n", getpid(), index
-1, function
, szone_check_counter
);
1389 szone_error(szone
, "Check: region incorrect", NULL
);
1394 while (index
< MAX_GRAIN
) {
1395 if (! free_list_check(szone
, index
)) {
1396 SZONE_UNLOCK(szone
);
1397 szone
->debug_flags
&= ~ CHECK_REGIONS
;
1398 malloc_printf("*** malloc[%d]: Free list incorrect (grain=%d) szone_check_all(%s) counter=%d\n", getpid(), index
, function
, szone_check_counter
);
1399 szone_error(szone
, "Check: free list incorrect", NULL
);
1404 SZONE_UNLOCK(szone
);
1408 static boolean_t
szone_check(szone_t
*szone
) {
1409 if (! (++szone_check_counter
% 10000)) {
1410 malloc_printf("At szone_check counter=%d\n", szone_check_counter
);
1412 if (szone_check_counter
< szone_check_start
) return 1;
1413 if (szone_check_counter
% szone_check_modulo
) return 1;
1414 return szone_check_all(szone
, "");
1417 static kern_return_t
szone_ptr_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t zone_address
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
1420 if (!reader
) reader
= _szone_default_reader
;
1421 // malloc_printf("Enumerator for zone 0x%x\n", zone_address);
1422 err
= reader(task
, zone_address
, sizeof(szone_t
), (void **)&szone
);
1423 if (err
) return err
;
1424 // malloc_printf("Small ptrs enumeration for zone 0x%x\n", zone_address);
1425 err
= small_in_use_enumerator(task
, context
, type_mask
, (vm_address_t
)szone
->regions
, szone
->num_regions
, reader
, recorder
);
1426 if (err
) return err
;
1427 // malloc_printf("Large ptrs enumeration for zone 0x%x\n", zone_address);
1428 err
= large_in_use_enumerator(task
, context
, type_mask
, (vm_address_t
)szone
->large_entries
, szone
->num_large_entries
, reader
, recorder
);
1429 if (err
) return err
;
1430 // malloc_printf("Huge ptrs enumeration for zone 0x%x\n", zone_address);
1431 err
= huge_in_use_enumerator(task
, context
, type_mask
, (vm_address_t
)szone
->huge_entries
, szone
->num_huge_entries
, reader
, recorder
);
1435 static void szone_print_free_list(szone_t
*szone
) {
1436 grain_t grain
= MAX_GRAIN
;
1437 malloc_printf("Free Sizes: ");
1439 free_list_t
*ptr
= szone
->free_list
[grain
];
1444 // malloc_printf("%p ", ptr);
1447 malloc_printf("%s%d[%d] ", (grain
== MAX_GRAIN
-1) ? ">=" : "", (grain
+1)*QUANTUM
, count
);
1450 malloc_printf("\n");
1453 static void szone_print(szone_t
*szone
, boolean_t verbose
) {
1454 unsigned info
[scalable_zone_info_count
];
1456 scalable_zone_info((void *)szone
, info
, scalable_zone_info_count
);
1457 malloc_printf("Scalable zone %p: inUse=%d(%dKB) small=%d(%dKB) large=%d(%dKB) huge=%d(%dKB) guard_page=%d\n", szone
, info
[0], info
[1] / 1024, info
[2], info
[3] / 1024, info
[4], info
[5] / 1024, info
[6], info
[7] / 1024, info
[8]);
1458 malloc_printf("%d regions: \n", szone
->num_regions
);
1459 while (index
< szone
->num_regions
) {
1460 region_t
*region
= szone
->regions
+ index
;
1461 unsigned counts
[512];
1463 unsigned in_use
= 0;
1464 vm_address_t start
= REGION_ADDRESS(*region
) + QUANTUM
;
1465 memset(counts
, 0, 512 * sizeof(unsigned));
1466 while (start
< REGION_END(*region
)) {
1467 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(start
);
1468 if (!(msize_and_free
& THIS_FREE
)) {
1469 msize_t msize
= msize_and_free
& ~PREV_FREE
;
1470 if (!msize
) break; // last encountered
1472 if (msize
< 512) counts
[msize
]++;
1473 start
+= msize
<< SHIFT_QUANTUM
;
1476 msize_t msize
= msize_and_free
& ~THIS_FREE
;
1478 start
+= msize
<< SHIFT_QUANTUM
;
1481 malloc_printf("Region [0x%x-0x%x, %dKB] \tIn_use=%d ", REGION_ADDRESS(*region
), REGION_END(*region
), (int)REGION_SIZE
/ 1024, in_use
);
1483 malloc_printf("\n\tSizes in use: ");
1485 if (counts
[ci
]) malloc_printf("%d[%d] ", ci
<< SHIFT_QUANTUM
, counts
[ci
]);
1489 malloc_printf("\n");
1492 if (verbose
) szone_print_free_list(szone
);
1493 malloc_printf("Free in last zone %d\n", szone
->num_bytes_free_in_last_region
);
1496 static void szone_log(malloc_zone_t
*zone
, void *log_address
) {
1497 szone_t
*szone
= (void *)zone
;
1498 szone
->log_address
= log_address
;
1501 static void szone_force_lock(szone_t
*szone
) {
1502 // malloc_printf("szone_force_lock\n");
1506 static void szone_force_unlock(szone_t
*szone
) {
1507 // malloc_printf("szone_force_unlock\n");
1508 SZONE_UNLOCK(szone
);
1511 static struct malloc_introspection_t szone_introspect
= {(void *)szone_ptr_in_use_enumerator
, (void *)szone_good_size
, (void *)szone_check
, (void *)szone_print
, szone_log
, (void *)szone_force_lock
, (void *)szone_force_unlock
};
1513 malloc_zone_t
*create_scalable_zone(size_t initial_size
, unsigned debug_flags
) {
1517 size_t msize_used
= 0;
1518 // malloc_printf("=== create_scalable_zone(%d,%d);\n", initial_size, debug_flags);
1519 if (!vm_page_shift
) {
1521 vm_page_shift
= 12; // the minimal for page sizes
1522 page
= 1 << vm_page_shift
;
1523 while (page
!= vm_page_size
) { page
+= page
; vm_page_shift
++;};
1524 if (MIN_BLOCK
* QUANTUM
< sizeof(free_list_t
) + PTR_HEADER_SIZE
) {
1525 malloc_printf("*** malloc[%d]: inconsistant parameters\n", getpid());
1528 addr
= allocate_pages(NULL
, REGION_SIZE
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC
));
1529 if (!addr
) return NULL
;
1530 szone
= (void *)(addr
+ QUANTUM
);
1531 msize
= (sizeof(szone_t
) + PTR_HEADER_SIZE
+ QUANTUM
-1) >> SHIFT_QUANTUM
;
1532 MSIZE_FLAGS_FOR_PTR(szone
) = msize
;
1533 msize_used
+= msize
; szone
->num_small_objects
++;
1534 szone
->basic_zone
.size
= (void *)szone_size
;
1535 szone
->basic_zone
.malloc
= (void *)szone_malloc
;
1536 szone
->basic_zone
.calloc
= (void *)szone_calloc
;
1537 szone
->basic_zone
.valloc
= (void *)szone_valloc
;
1538 szone
->basic_zone
.free
= (void *)szone_free
;
1539 szone
->basic_zone
.realloc
= (void *)szone_realloc
;
1540 szone
->basic_zone
.destroy
= (void *)szone_destroy
;
1541 szone
->basic_zone
.introspect
= &szone_introspect
;
1542 LOCK_INIT(szone
->lock
);
1543 szone
->debug_flags
= debug_flags
;
1544 szone
->regions
= (void *)((char *)szone
+ (msize
<< SHIFT_QUANTUM
));
1545 // we always reserve room for a few regions
1546 msize
= (sizeof(region_t
) * INITIAL_NUM_REGIONS
+ PTR_HEADER_SIZE
+ QUANTUM
-1) >> SHIFT_QUANTUM
;
1547 if (msize
< MIN_BLOCK
) msize
= MIN_BLOCK
;
1548 MSIZE_FLAGS_FOR_PTR(szone
->regions
) = msize
;
1549 msize_used
+= msize
; szone
->num_small_objects
++;
1550 szone
->regions
[0] = addr
;
1551 szone
->num_regions
= 1;
1552 szone
->num_bytes_free_in_last_region
= REGION_SIZE
- ((msize_used
+1) << SHIFT_QUANTUM
) + PTR_HEADER_SIZE
;
1553 CHECK(szone
, __PRETTY_FUNCTION__
);
1554 return (malloc_zone_t
*)szone
;
1557 /********* The following is private API for debug and perf tools ************/
1559 void scalable_zone_info(malloc_zone_t
*zone
, unsigned *info_to_fill
, unsigned count
) {
1560 szone_t
*szone
= (void *)zone
;
1561 unsigned info
[scalable_zone_info_count
];
1562 // We do not lock to facilitate debug
1563 info
[2] = szone
->num_small_objects
;
1564 info
[3] = szone
->num_bytes_in_small_objects
;
1565 info
[4] = szone
->num_large_objects_in_use
;
1566 info
[5] = szone
->num_bytes_in_large_objects
;
1567 info
[6] = szone
->num_huge_entries
;
1568 info
[7] = szone
->num_bytes_in_huge_objects
;
1569 info
[8] = szone
->debug_flags
;
1570 info
[0] = info
[2] + info
[4] + info
[6];
1571 info
[1] = info
[3] + info
[5] + info
[7];
1572 memcpy(info_to_fill
, info
, sizeof(unsigned)*count
);
1575 /********* Support code for emacs unexec ************/
1577 /* History of freezedry version numbers:
1579 * 1) Old malloc (before the scalable malloc implementation in this file
1581 * 2) Original freezedrying code for scalable malloc. This code was apparently
1582 * based on the old freezedrying code and was fundamentally flawed in its
1583 * assumption that tracking allocated memory regions was adequate to fake
1584 * operations on freezedried memory. This doesn't work, since scalable
1585 * malloc does not store flags in front of large page-aligned allocations.
1586 * 3) Original szone-based freezedrying code.
1588 * No version backward compatibility is provided, but the version number does
1589 * make it possible for malloc_jumpstart() to return an error if the application
1590 * was freezedried with an older version of malloc.
1592 #define MALLOC_FREEZEDRY_VERSION 3
1600 static void *frozen_malloc(szone_t
*zone
, size_t new_size
) {
1601 return malloc(new_size
);
1604 static void *frozen_calloc(szone_t
*zone
, size_t num_items
, size_t size
) {
1605 return calloc(num_items
, size
);
1608 static void *frozen_valloc(szone_t
*zone
, size_t new_size
) {
1609 return valloc(new_size
);
1612 static void *frozen_realloc(szone_t
*zone
, void *ptr
, size_t new_size
) {
1613 size_t old_size
= szone_size(zone
, ptr
);
1616 if (new_size
<= old_size
) {
1620 new_ptr
= malloc(new_size
);
1623 memcpy(new_ptr
, ptr
, old_size
);
1629 static void frozen_free(szone_t
*zone
, void *ptr
) {
1632 static void frozen_destroy(szone_t
*zone
) {
1635 /********* Pseudo-private API for emacs unexec ************/
1638 * malloc_freezedry() records all of the szones in use, so that they can be
1639 * partially reconstituted by malloc_jumpstart(). Due to the differences
1640 * between reconstituted memory regions and those created by the szone code,
1641 * care is taken not to reallocate from the freezedried memory, except in the
1642 * case of a non-growing realloc().
1644 * Due to the flexibility provided by the zone registration mechanism, it is
1645 * impossible to implement generic freezedrying for any zone type. This code
1646 * only handles applications that use the szone allocator, so malloc_freezedry()
1647 * returns 0 (error) if any non-szone zones are encountered.
1650 int malloc_freezedry(void) {
1651 extern unsigned malloc_num_zones
;
1652 extern malloc_zone_t
**malloc_zones
;
1653 malloc_frozen
*data
;
1656 /* Allocate space in which to store the freezedry state. */
1657 data
= (malloc_frozen
*) malloc(sizeof(malloc_frozen
));
1659 /* Set freezedry version number so that malloc_jumpstart() can check for
1661 data
->version
= MALLOC_FREEZEDRY_VERSION
;
1663 /* Allocate the array of szone pointers. */
1664 data
->nszones
= malloc_num_zones
;
1665 data
->szones
= (szone_t
*) calloc(malloc_num_zones
, sizeof(szone_t
));
1667 /* Fill in the array of szone structures. They are copied rather than
1668 * referenced, since the originals are likely to be clobbered during malloc
1669 * initialization. */
1670 for (i
= 0; i
< malloc_num_zones
; i
++) {
1671 if (strcmp(malloc_zones
[i
]->zone_name
, "DefaultMallocZone")) {
1672 /* Unknown zone type. */
1677 memcpy(&data
->szones
[i
], malloc_zones
[i
], sizeof(szone_t
));
1683 int malloc_jumpstart(int cookie
) {
1684 malloc_frozen
*data
= (malloc_frozen
*) cookie
;
1687 if (data
->version
!= MALLOC_FREEZEDRY_VERSION
) {
1688 /* Unsupported freezedry version. */
1692 for (i
= 0; i
< data
->nszones
; i
++) {
1693 /* Set function pointers. Even the functions that stay the same must be
1694 * set, since there are no guarantees that they will be mapped to the
1695 * same addresses. */
1696 data
->szones
[i
].basic_zone
.size
= (void *) szone_size
;
1697 data
->szones
[i
].basic_zone
.malloc
= (void *) frozen_malloc
;
1698 data
->szones
[i
].basic_zone
.calloc
= (void *) frozen_calloc
;
1699 data
->szones
[i
].basic_zone
.valloc
= (void *) frozen_valloc
;
1700 data
->szones
[i
].basic_zone
.free
= (void *) frozen_free
;
1701 data
->szones
[i
].basic_zone
.realloc
= (void *) frozen_realloc
;
1702 data
->szones
[i
].basic_zone
.destroy
= (void *) frozen_destroy
;
1703 data
->szones
[i
].basic_zone
.introspect
= &szone_introspect
;
1705 /* Register the freezedried zone. */
1706 malloc_zone_register(&data
->szones
[i
].basic_zone
);