2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 /* Author: Bertrand Serlet, August 1999 */
25 #import "scalable_malloc.h"
29 #import <pthread_internals.h> // for spin lock
31 #include <mach/vm_statistics.h>
33 /********************* DEFINITIONS ************************/
35 static unsigned vm_page_shift
= 0; // guaranteed to be intialized by zone creation
37 #define DEBUG_MALLOC 0 // set to one to debug malloc itself
38 #define DEBUG_CLIENT 0 // set to one to help debug a nasty memory smasher
41 #warning DEBUG ENABLED
47 #define CHECK_REGIONS (1 << 31)
49 #define VM_COPY_THRESHOLD (40 * 1024)
50 // When all memory is touched after a copy, vm_copy() is always a lose
51 // But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages (on a G3/300MHz)
52 #define KILL_THRESHOLD (32 * 1024)
54 #define LARGE_THRESHOLD (3 * vm_page_size) // at or above this use "large"
56 #define SHIFT_QUANTUM 4 // Required for AltiVec
57 #define QUANTUM (1 << SHIFT_QUANTUM) // allocation quantum
58 #define MIN_BLOCK 1 // minimum size, in QUANTUM multiples
60 /* The header of a small block in use contains its size (expressed as multiples of QUANTUM, and header included), or 0;
61 If 0 then the block is either free (in which case the size is directly at the block itself), or the last block (indicated by either being beyond range, or having 0 in the block itself) */
63 #define PTR_HEADER_SIZE (sizeof(msize_t))
64 #define FOLLOWING_PTR(ptr,msize) (((char *)(ptr)) + ((msize) << SHIFT_QUANTUM))
65 #define PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-2]
67 #define THIS_FREE 0x8000 // indicates this block is free
68 #define PREV_FREE 0x4000 // indicates previous block is free
69 #define MSIZE_FLAGS_FOR_PTR(ptr) (((msize_t *)(ptr))[-1])
71 #define REGION_SIZE (1 << (16 - 2 + SHIFT_QUANTUM)) // since we only have 16 bits for msize_t, and 1 bit is taken by THIS_FREE, and 1 by PREV_FREE
73 #define INITIAL_NUM_REGIONS 8 // must always be at least 2 to always have 1 slot empty
75 #define CHECKSUM_MAGIC 0x357B
77 #define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use
79 #define LARGE_CACHE_SIZE 4 // define hysterisis of large chunks
81 #define MAX_RECORDER_BUFFER 256
85 typedef unsigned short msize_t
; // a size in multiples of SHIFT_QUANTUM
94 unsigned address_and_num_pages
;
95 // this type represents both an address and a number of pages
96 // the low bits are the number of pages
97 // the high bits are the address
98 // note that the exact number of bits used for depends on the page size
99 // also, this cannot represent pointers larger than 1 << (vm_page_shift * 2)
102 typedef vm_address_t region_t
;
104 typedef compact_range_t large_entry_t
;
106 typedef vm_range_t huge_entry_t
;
108 typedef unsigned short grain_t
;
111 malloc_zone_t basic_zone
;
113 unsigned debug_flags
;
116 /* Regions for small objects */
117 unsigned num_regions
;
119 // this array is always created with 1 extra slot to be able to add a region without taking memory right away
120 unsigned last_region_hit
;
121 free_list_t
*free_list
[MAX_GRAIN
];
122 unsigned num_bytes_free_in_last_region
; // these bytes are cleared
123 unsigned num_small_objects
;
124 unsigned num_bytes_in_small_objects
;
126 /* Cache of recently freed large object */
127 vm_range_t large_to_deallocate
[LARGE_CACHE_SIZE
];
128 // Entries that are 0 should be discarded
130 /* large objects: vm_page_shift <= log2(size) < 2 *vm_page_shift */
131 unsigned num_large_objects_in_use
; // does not count the large entries in large_to_deallocate
132 unsigned num_large_entries
;
133 unsigned num_bytes_in_large_objects
;
134 large_entry_t
*large_entries
;
135 // large_entries are hashed by location
136 // large_entries that are 0 should be discarded
138 /* huge objects: log2(size) >= 2 *vm_page_shift */
139 unsigned num_bytes_in_huge_objects
;
140 unsigned num_huge_entries
;
141 huge_entry_t
*huge_entries
;
144 static void *szone_malloc(szone_t
*szone
, size_t size
);
145 static void *szone_valloc(szone_t
*szone
, size_t size
);
146 static INLINE
void *szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
);
147 static void szone_free(szone_t
*szone
, void *ptr
);
148 static size_t szone_good_size(szone_t
*szone
, size_t size
);
149 static boolean_t
szone_check_all(szone_t
*szone
, const char *function
);
150 static void szone_print(szone_t
*szone
, boolean_t verbose
);
151 static INLINE region_t
*region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
);
153 #define LOG(szone,ptr) (szone->log_address && (szone->num_small_objects > 8) && (((unsigned)szone->log_address == -1) || (szone->log_address == (void *)(ptr))))
155 /********************* ACCESSOR MACROS ************************/
157 #define SZONE_LOCK(szone) LOCK(szone->lock)
158 #define SZONE_UNLOCK(szone) UNLOCK(szone->lock)
160 #define CHECK(szone,fun) if ((szone)->debug_flags & CHECK_REGIONS) szone_check_all(szone, fun)
162 #define REGION_ADDRESS(region) (region)
163 #define REGION_END(region) (region+REGION_SIZE)
165 #define LARGE_ENTRY_ADDRESS(entry) (((entry).address_and_num_pages >> vm_page_shift) << vm_page_shift)
166 #define LARGE_ENTRY_NUM_PAGES(entry) ((entry).address_and_num_pages & ((1 << vm_page_shift) - 1))
167 #define LARGE_ENTRY_SIZE(entry) (LARGE_ENTRY_NUM_PAGES(entry) << vm_page_shift)
168 #define LARGE_ENTRY_MATCHES(entry,ptr) (!(((entry).address_and_num_pages - (unsigned)(ptr)) >> vm_page_shift))
169 #define LARGE_ENTRY_IS_EMPTY(entry) (!((entry).address_and_num_pages))
171 /********************* VERY LOW LEVEL UTILITIES ************************/
173 static void szone_error(szone_t
*szone
, const char *msg
, const void *ptr
) {
174 if (szone
) SZONE_UNLOCK(szone
);
176 malloc_printf("*** malloc[%d]: error for object %p: %s\n", getpid(), ptr
, msg
);
178 szone_print(szone
, 1);
181 malloc_printf("*** malloc[%d]: error: %s\n", getpid(), msg
);
184 malloc_printf("*** Sleeping to help debug\n");
185 sleep(3600); // to help debug
189 static void protect(szone_t
*szone
, vm_address_t address
, vm_size_t size
, unsigned protection
, unsigned debug_flags
) {
191 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_PRELUDE
)) {
192 err
= vm_protect(mach_task_self(), address
- vm_page_size
, vm_page_size
, 0, protection
);
193 if (err
) malloc_printf("*** malloc[%d]: Can't protect(%x) region for prelude guard page at 0x%x\n", getpid(), protection
, address
- vm_page_size
);
195 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE
)) {
196 err
= vm_protect(mach_task_self(), (vm_address_t
)(address
+ size
), vm_page_size
, 0, protection
);
197 if (err
) malloc_printf("*** malloc[%d]: Can't protect(%x) region for postlude guard page at 0x%x\n", getpid(), protection
, address
+ size
);
201 static vm_address_t
allocate_pages(szone_t
*szone
, size_t size
, unsigned debug_flags
, int vm_page_label
) {
204 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
205 size_t allocation_size
= round_page(size
);
206 if (!allocation_size
) allocation_size
= vm_page_size
;
207 if (add_guard_pages
) allocation_size
+= 2 * vm_page_size
;
208 err
= vm_allocate(mach_task_self(), &addr
, allocation_size
, vm_page_label
| 1);
210 szone_error(szone
, "Can't allocate region", NULL
);
213 if (add_guard_pages
) {
214 addr
+= vm_page_size
;
215 protect(szone
, addr
, size
, 0, debug_flags
);
220 static void deallocate_pages(szone_t
*szone
, vm_address_t addr
, size_t size
, unsigned debug_flags
) {
222 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
223 if (add_guard_pages
) {
224 addr
-= vm_page_size
;
225 size
+= 2 * vm_page_size
;
227 err
= vm_deallocate(mach_task_self(), addr
, size
);
229 szone_error(szone
, "Can't deallocate_pages region", (void *)addr
);
233 static kern_return_t
_szone_default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
) {
234 *ptr
= (void *)address
;
238 /********************* RANGE UTILITIES ************************/
240 static const vm_range_t zero_range
= {0, 0};
242 static vm_range_t
coalesce_range(vm_range_t
*ranges
, unsigned count
, vm_range_t range
) {
243 // Given a sequence of ranges and a range, tries to find an abutting range
244 // If no, returns original range
245 // Else zeroes out coalesced range, and reapplies with coalesced range
246 unsigned index
= count
;
247 vm_range_t
*current
= ranges
;
249 vm_range_t
this = *current
++;
250 if (!this.size
) continue;
251 if (this.address
+ this.size
== range
.address
) {
252 range
.address
= this.address
;
253 range
.size
+= this.size
;
254 current
[-1] = zero_range
;
255 return coalesce_range(ranges
, count
, range
);
257 if (range
.address
+ range
.size
== this.address
) {
258 range
.size
+= this.size
;
259 current
[-1] = zero_range
;
260 return coalesce_range(ranges
, count
, range
);
266 static INLINE vm_range_t
*first_zero_range(vm_range_t
*ranges
, unsigned count
) {
267 // Given a sequence of ranges, find the first empty slot
270 if (!ranges
->size
) return ranges
;
276 static vm_range_t
*largest_range(vm_range_t
*ranges
, unsigned count
) {
277 // Given a sequence of ranges, find the largest range
278 // Returns NULL on empty arrays
279 vm_range_t
*largest_range
;
280 if (!count
) return NULL
;
281 largest_range
= ranges
;
284 if (ranges
->size
> largest_range
->size
) largest_range
= ranges
;
287 return largest_range
;
290 static vm_range_t
*first_range_greater_or_equal(vm_range_t
*ranges
, unsigned count
, vm_size_t size
) {
291 // Given a sequence of ranges, find the first range greater than range
292 // Returns NULL when none found
294 if (ranges
->size
>= size
) return ranges
;
300 /********************* FREE LIST UTILITIES ************************/
302 static INLINE grain_t
grain_for_msize(szone_t
*szone
, msize_t msize
) {
303 // assumes msize >= MIN_BLOCK
305 if (msize
< MIN_BLOCK
) {
306 szone_error(szone
, "grain_for_msize: msize too small", NULL
);
309 return (msize
< MAX_GRAIN
+ MIN_BLOCK
) ? msize
- MIN_BLOCK
: MAX_GRAIN
- 1;
312 static INLINE msize_t
msize_for_grain(szone_t
*szone
, grain_t grain
) {
313 // 0 if multiple sizes
314 return (grain
< MAX_GRAIN
- 1) ? grain
+ MIN_BLOCK
: 0;
317 static INLINE
void free_list_checksum(szone_t
*szone
, free_list_t
*ptr
) {
318 // We always checksum, as testing whether to do it (based on szone->debug_flags) is as fast as doing it
319 if (ptr
->checksum
!= (((unsigned)ptr
->previous
) ^ ((unsigned)ptr
->next
) ^ CHECKSUM_MAGIC
)) {
320 szone_error(szone
, "Incorrect check sum for freed object - object was probably modified after beeing freed; break at szone_error", ptr
);
324 static INLINE
void free_list_set_checksum(szone_t
*szone
, free_list_t
*ptr
) {
325 // We always set checksum, as testing whether to do it (based on szone->debug_flags) is slower than just doing it
326 ptr
->checksum
= ((unsigned)ptr
->previous
) ^ ((unsigned)ptr
->next
) ^ CHECKSUM_MAGIC
;
329 static void free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
330 // Adds an item to the proper free list
331 // Also marks the header of the block properly
332 grain_t grain
= grain_for_msize(szone
, msize
);
333 free_list_t
*free_ptr
= ptr
;
334 free_list_t
*free_head
= szone
->free_list
[grain
];
335 msize_t
*follower
= (msize_t
*)FOLLOWING_PTR(ptr
, msize
);
337 if (LOG(szone
,ptr
)) malloc_printf("In free_list_add_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
338 if (((unsigned)ptr
) & (QUANTUM
- 1)) {
339 szone_error(szone
, "free_list_add_ptr: Unaligned ptr", ptr
);
342 MSIZE_FLAGS_FOR_PTR(ptr
) = msize
| THIS_FREE
;
344 free_list_checksum(szone
, free_head
);
346 if (free_head
->previous
) {
347 malloc_printf("ptr=%p grain=%d free_head=%p previous=%p\n", ptr
, grain
, free_head
, free_head
->previous
);
348 szone_error(szone
, "free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr
);
350 if (!(MSIZE_FLAGS_FOR_PTR(free_head
) & THIS_FREE
)) {
351 malloc_printf("ptr=%p grain=%d free_head=%p\n", ptr
, grain
, free_head
);
352 szone_error(szone
, "free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr
);
354 if ((grain
!= MAX_GRAIN
-1) && (MSIZE_FLAGS_FOR_PTR(free_head
) != (THIS_FREE
| msize
))) {
355 malloc_printf("ptr=%p grain=%d free_head=%p previous_msize=%d\n", ptr
, grain
, free_head
, MSIZE_FLAGS_FOR_PTR(free_head
));
356 szone_error(szone
, "free_list_add_ptr: Internal invariant broken (incorrect msize)", ptr
);
359 free_head
->previous
= free_ptr
;
360 free_list_set_checksum(szone
, free_head
);
362 free_ptr
->previous
= NULL
;
363 free_ptr
->next
= free_head
;
364 free_list_set_checksum(szone
, free_ptr
);
365 szone
->free_list
[grain
] = free_ptr
;
366 // mark the end of this block
367 PREVIOUS_MSIZE(follower
) = msize
;
368 MSIZE_FLAGS_FOR_PTR(follower
) |= PREV_FREE
;
371 static void free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) {
372 // Removes item in the proper free list
373 // msize could be read, but all callers have it so we pass it in
374 grain_t grain
= grain_for_msize(szone
, msize
);
375 free_list_t
*free_ptr
= ptr
;
376 free_list_t
*next
= free_ptr
->next
;
377 free_list_t
*previous
= free_ptr
->previous
;
379 if (LOG(szone
,ptr
)) malloc_printf("In free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
381 free_list_checksum(szone
, free_ptr
);
384 if (szone
->free_list
[grain
] != ptr
) {
385 malloc_printf("ptr=%p grain=%d msize=%d szone->free_list[grain]=%p\n", ptr
, grain
, msize
, szone
->free_list
[grain
]);
386 szone_error(szone
, "free_list_remove_ptr: Internal invariant broken (szone->free_list[grain])", ptr
);
390 szone
->free_list
[grain
] = next
;
392 previous
->next
= next
;
393 free_list_set_checksum(szone
, previous
);
396 next
->previous
= previous
;
397 free_list_set_checksum(szone
, next
);
399 MSIZE_FLAGS_FOR_PTR(FOLLOWING_PTR(ptr
, msize
)) &= ~ PREV_FREE
;
402 static boolean_t
free_list_check(szone_t
*szone
, grain_t grain
) {
404 free_list_t
*ptr
= szone
->free_list
[grain
];
405 free_list_t
*previous
= NULL
;
407 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
409 if (!(msize_and_free
& THIS_FREE
)) {
410 malloc_printf("*** malloc[%d]: In-use ptr in free list grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
413 if (((unsigned)ptr
) & (QUANTUM
- 1)) {
414 malloc_printf("*** malloc[%d]: Unaligned ptr in free list grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
417 if (!region_for_ptr_no_lock(szone
, ptr
)) {
418 malloc_printf("*** malloc[%d]: Ptr not in szone grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
421 free_list_checksum(szone
, ptr
);
422 if (ptr
->previous
!= previous
) {
423 malloc_printf("*** malloc[%d]: Previous incorrectly set grain=%d count=%d ptr=%p\n", getpid(), grain
, count
, ptr
);
426 if ((grain
!= MAX_GRAIN
-1) && (msize_and_free
!= (msize_for_grain(szone
, grain
) | THIS_FREE
))) {
427 malloc_printf("*** malloc[%d]: Incorrect msize for grain=%d count=%d ptr=%p msize=%d\n", getpid(), grain
, count
, ptr
, msize_and_free
);
436 /********************* SMALL BLOCKS MANAGEMENT ************************/
438 static INLINE region_t
*region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
) {
439 region_t
*first_region
= szone
->regions
;
440 region_t
*region
= first_region
+ szone
->last_region_hit
;
441 region_t
this = *region
;
442 if ((unsigned)ptr
- (unsigned)REGION_ADDRESS(this) < (unsigned)REGION_SIZE
) {
445 // We iterate in reverse order becase last regions are more likely
446 region
= first_region
+ szone
->num_regions
;
447 while (region
!= first_region
) {
449 if ((unsigned)ptr
- (unsigned)REGION_ADDRESS(this) < (unsigned)REGION_SIZE
) {
450 szone
->last_region_hit
= region
- first_region
;
458 static INLINE
void small_free_no_lock(szone_t
*szone
, region_t
*region
, void *ptr
, msize_t msize_and_free
) {
459 msize_t msize
= msize_and_free
& ~ PREV_FREE
;
460 size_t original_size
= msize
<< SHIFT_QUANTUM
;
461 void *next_block
= ((char *)ptr
+ original_size
);
462 msize_t next_msize_and_free
;
464 if (LOG(szone
,ptr
)) malloc_printf("In small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
465 if (msize
< MIN_BLOCK
) {
466 malloc_printf("In small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
467 szone_error(szone
, "Trying to free small block that is too small", ptr
);
470 if (((vm_address_t
)next_block
< REGION_END(*region
)) && ((next_msize_and_free
= MSIZE_FLAGS_FOR_PTR(next_block
)) & THIS_FREE
)) {
471 // If the next block is free, we coalesce
472 msize_t next_msize
= next_msize_and_free
& ~THIS_FREE
;
473 if (LOG(szone
,ptr
)) malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr
, msize
, next_block
, next_msize
);
474 free_list_remove_ptr(szone
, next_block
, next_msize
);
477 // Let's try to coalesce backwards now
478 if (msize_and_free
& PREV_FREE
) {
479 msize_t previous_msize
= PREVIOUS_MSIZE(ptr
);
480 void *previous
= ptr
- (previous_msize
<< SHIFT_QUANTUM
);
482 if (LOG(szone
,previous
)) malloc_printf("In small_free_no_lock(), coalesced backwards for %p previous=%p, msize=%d\n", ptr
, previous
, previous_msize
);
483 if (!previous_msize
|| (previous_msize
>= (((vm_address_t
)ptr
- REGION_ADDRESS(*region
)) >> SHIFT_QUANTUM
))) {
484 szone_error(szone
, "Invariant 1 broken when coalescing backwards", ptr
);
486 if (MSIZE_FLAGS_FOR_PTR(previous
) != (previous_msize
| THIS_FREE
)) {
487 malloc_printf("previous=%p its_msize_and_free=0x%x previous_msize=%d\n", previous
, MSIZE_FLAGS_FOR_PTR(previous
), previous_msize
);
488 szone_error(szone
, "Invariant 3 broken when coalescing backwards", ptr
);
491 free_list_remove_ptr(szone
, previous
, previous_msize
);
493 msize
+= previous_msize
;
495 if (msize
& PREV_FREE
) {
496 malloc_printf("In small_free_no_lock(), after coalescing with previous ptr=%p, msize=%d previous_msize=%d\n", ptr
, msize
, previous_msize
);
497 szone_error(szone
, "Incorrect coalescing", ptr
);
501 if (szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) {
503 szone_error(szone
, "Incorrect size information - block header was damaged", ptr
);
505 memset(ptr
, 0x55, (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
);
508 free_list_add_ptr(szone
, ptr
, msize
);
509 CHECK(szone
, "small_free_no_lock: added to free list");
510 szone
->num_small_objects
--;
511 szone
->num_bytes_in_small_objects
-= original_size
; // we use original_size and not msize to avoid double counting the coalesced blocks
514 static void *small_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
) {
515 // Allocates from the last region or a freshly allocated region
516 region_t
*last_region
= szone
->regions
+ szone
->num_regions
- 1;
517 vm_address_t new_address
;
519 msize_t msize_and_free
;
520 unsigned region_capacity
;
521 ptr
= (void *)(REGION_END(*last_region
) - szone
->num_bytes_free_in_last_region
+ PTR_HEADER_SIZE
);
523 if (((vm_address_t
)ptr
) & (QUANTUM
- 1)) {
524 szone_error(szone
, "Invariant broken while using end of region", ptr
);
527 msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
529 if (msize_and_free
!= PREV_FREE
&& msize_and_free
!= 0) {
530 malloc_printf("*** malloc[%d]: msize_and_free = %d\n", getpid(), msize_and_free
);
531 szone_error(szone
, "Invariant broken when allocating at end of zone", ptr
);
534 // In order to make sure we don't have 2 free pointers following themselves, if the last item is a free item, we combine it and clear it
535 if (msize_and_free
== PREV_FREE
) {
536 msize_t previous_msize
= PREVIOUS_MSIZE(ptr
);
537 void *previous
= ptr
- (previous_msize
<< SHIFT_QUANTUM
);
539 if (LOG(szone
, ptr
)) malloc_printf("Combining last with free space at %p\n", ptr
);
540 if (!previous_msize
|| (previous_msize
>= (((vm_address_t
)ptr
- REGION_ADDRESS(*last_region
)) >> SHIFT_QUANTUM
)) || (MSIZE_FLAGS_FOR_PTR(previous
) != (previous_msize
| THIS_FREE
))) {
541 szone_error(szone
, "Invariant broken when coalescing backwards at end of zone", ptr
);
544 free_list_remove_ptr(szone
, previous
, previous_msize
);
545 szone
->num_bytes_free_in_last_region
+= previous_msize
<< SHIFT_QUANTUM
;
546 memset(previous
, 0, previous_msize
<< SHIFT_QUANTUM
);
547 MSIZE_FLAGS_FOR_PTR(previous
) = 0;
550 // first try at the end of the last region
551 CHECK(szone
, __PRETTY_FUNCTION__
);
552 if (szone
->num_bytes_free_in_last_region
>= (msize
<< SHIFT_QUANTUM
)) {
553 szone
->num_bytes_free_in_last_region
-= (msize
<< SHIFT_QUANTUM
);
554 szone
->num_small_objects
++;
555 szone
->num_bytes_in_small_objects
+= msize
<< SHIFT_QUANTUM
;
556 MSIZE_FLAGS_FOR_PTR(ptr
) = msize
;
559 // time to create a new region
560 new_address
= allocate_pages(szone
, REGION_SIZE
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL
));
565 // let's prepare to free the remnants of last_region
566 if (szone
->num_bytes_free_in_last_region
>= QUANTUM
) {
567 msize_t this_msize
= szone
->num_bytes_free_in_last_region
>> SHIFT_QUANTUM
;
568 // malloc_printf("Entering last block %p size=%d\n", ptr, this_msize << SHIFT_QUANTUM);
569 if (this_msize
>= MIN_BLOCK
) {
570 free_list_add_ptr(szone
, ptr
, this_msize
);
572 // malloc_printf("Leaking last block at %p\n", ptr);
574 szone
->num_bytes_free_in_last_region
-= this_msize
<< SHIFT_QUANTUM
; // to avoid coming back here
576 last_region
[1] = new_address
;
577 szone
->num_regions
++;
578 szone
->num_bytes_free_in_last_region
= REGION_SIZE
- QUANTUM
+ PTR_HEADER_SIZE
- (msize
<< SHIFT_QUANTUM
);
579 ptr
= (void *)(new_address
+ QUANTUM
); // waste the first bytes
580 region_capacity
= (MSIZE_FLAGS_FOR_PTR(szone
->regions
) * QUANTUM
- PTR_HEADER_SIZE
) / sizeof(region_t
);
581 if (szone
->num_regions
>= region_capacity
) {
582 unsigned new_capacity
= region_capacity
* 2 + 1;
583 msize_t new_msize
= (new_capacity
* sizeof(region_t
) + PTR_HEADER_SIZE
+ QUANTUM
- 1) / QUANTUM
;
584 region_t
*new_regions
= ptr
;
585 // malloc_printf("Now %d regions growing regions %p to %d\n", szone->num_regions, szone->regions, new_capacity);
586 MSIZE_FLAGS_FOR_PTR(new_regions
) = new_msize
;
587 szone
->num_small_objects
++;
588 szone
->num_bytes_in_small_objects
+= new_msize
<< SHIFT_QUANTUM
;
589 memcpy(new_regions
, szone
->regions
, szone
->num_regions
* sizeof(region_t
));
590 // We intentionally leak the previous regions pointer to avoid multi-threading crashes if another thread was reading it (unlocked) while we are changing it
591 // Given that in practise the number of regions is typically a handful, this should not be a big deal
592 szone
->regions
= new_regions
;
593 ptr
+= (new_msize
<< SHIFT_QUANTUM
);
594 szone
->num_bytes_free_in_last_region
-= (new_msize
<< SHIFT_QUANTUM
);
595 // malloc_printf("Regions is now %p next ptr is %p\n", szone->regions, ptr);
597 szone
->num_small_objects
++;
598 szone
->num_bytes_in_small_objects
+= msize
<< SHIFT_QUANTUM
;
599 MSIZE_FLAGS_FOR_PTR(ptr
) = msize
;
603 static boolean_t
szone_check_region(szone_t
*szone
, region_t
*region
) {
604 void *ptr
= (void *)REGION_ADDRESS(*region
) + QUANTUM
;
605 vm_address_t region_end
= REGION_END(*region
);
606 int is_last_region
= region
== szone
->regions
+ szone
->num_regions
- 1;
607 msize_t prev_free
= 0;
608 while ((vm_address_t
)ptr
< region_end
) {
609 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
610 if (!(msize_and_free
& THIS_FREE
)) {
611 msize_t msize
= msize_and_free
& ~PREV_FREE
;
612 if ((msize_and_free
& PREV_FREE
) != prev_free
) {
613 malloc_printf("*** malloc[%d]: invariant broken for %p (prev_free=%d) this msize=%d\n", getpid(), ptr
, prev_free
, msize_and_free
);
617 int extra
= (is_last_region
) ? szone
->num_bytes_free_in_last_region
: QUANTUM
;
618 if (((unsigned)(ptr
+ extra
)) < region_end
) {
619 malloc_printf("*** malloc[%d]: invariant broken at region end: ptr=%p extra=%d index=%d num_regions=%d end=%p\n", getpid(), ptr
, extra
, region
- szone
->regions
, szone
->num_regions
, (void *)region_end
);
622 break; // last encountered
624 if (msize
> (LARGE_THRESHOLD
/ QUANTUM
)) {
625 malloc_printf("*** malloc[%d]: invariant broken for %p this msize=%d - size is too large\n", getpid(), ptr
, msize_and_free
);
628 if ((msize
< MIN_BLOCK
) && ((unsigned)ptr
!= region_end
- QUANTUM
)) {
629 malloc_printf("*** malloc[%d]: invariant broken for %p this msize=%d - size is too small\n", getpid(), ptr
, msize_and_free
);
632 ptr
+= msize
<< SHIFT_QUANTUM
;
634 if (is_last_region
&& ((vm_address_t
)ptr
- PTR_HEADER_SIZE
> region_end
- szone
->num_bytes_free_in_last_region
)) {
635 malloc_printf("*** malloc[%d]: invariant broken for %p this msize=%d - block extends beyond allocated region\n", getpid(), ptr
, msize_and_free
);
639 msize_t msize
= msize_and_free
& ~THIS_FREE
;
640 free_list_t
*free_head
= ptr
;
641 msize_t
*follower
= (void *)FOLLOWING_PTR(ptr
, msize
);
642 if ((msize_and_free
& PREV_FREE
) && !prev_free
) {
643 malloc_printf("*** malloc[%d]: invariant broken for free block %p this msize=%d: PREV_FREE set while previous block is in use\n", getpid(), ptr
, msize
);
646 if (msize
< MIN_BLOCK
) {
647 malloc_printf("*** malloc[%d]: invariant broken for free block %p this msize=%d\n", getpid(), ptr
, msize
);
651 malloc_printf("*** malloc[%d]: invariant broken for %p (2 free in a row)\n", getpid(), ptr
);
654 free_list_checksum(szone
, free_head
);
655 if (free_head
->previous
&& !(MSIZE_FLAGS_FOR_PTR(free_head
->previous
) & THIS_FREE
)) {
656 malloc_printf("*** malloc[%d]: invariant broken for %p (previous %p is not a free pointer)\n", getpid(), ptr
, free_head
->previous
);
659 if (free_head
->next
&& !(MSIZE_FLAGS_FOR_PTR(free_head
->next
) & THIS_FREE
)) {
660 malloc_printf("*** malloc[%d]: invariant broken for %p (next is not a free pointer)\n", getpid(), ptr
);
663 if (PREVIOUS_MSIZE(follower
) != msize
) {
664 malloc_printf("*** malloc[%d]: invariant broken for free %p followed by %p in region [%x-%x] (end marker incorrect) should be %d; in fact %d\n", getpid(), ptr
, follower
, REGION_ADDRESS(*region
), region_end
, msize
, PREVIOUS_MSIZE(follower
));
668 prev_free
= PREV_FREE
;
674 static kern_return_t
small_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned num_regions
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
677 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
680 err
= reader(task
, region_address
, sizeof(region_t
) * num_regions
, (void **)®ions
);
682 while (index
< num_regions
) {
683 region_t region
= regions
[index
++];
684 vm_range_t range
= {REGION_ADDRESS(region
), REGION_SIZE
};
685 vm_address_t start
= range
.address
+ QUANTUM
;
686 // malloc_printf("Enumerating small ptrs for Region starting at 0x%x\n", start);
687 if (type_mask
& MALLOC_PTR_REGION_RANGE_TYPE
) recorder(task
, context
, MALLOC_PTR_REGION_RANGE_TYPE
, &range
, 1);
688 if (type_mask
& MALLOC_PTR_IN_USE_RANGE_TYPE
) while (start
< range
.address
+ range
.size
) {
690 msize_t msize_and_free
;
691 err
= reader(task
, start
- PTR_HEADER_SIZE
, QUANTUM
, (void **)&previous
);
693 previous
+= PTR_HEADER_SIZE
;
694 msize_and_free
= MSIZE_FLAGS_FOR_PTR(previous
);
695 if (!(msize_and_free
& THIS_FREE
)) {
697 msize_t msize
= msize_and_free
& ~PREV_FREE
;
698 if (!msize
) break; // last encountered
699 buffer
[count
].address
= start
;
700 buffer
[count
].size
= (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
;
702 if (count
>= MAX_RECORDER_BUFFER
) {
703 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
706 start
+= msize
<< SHIFT_QUANTUM
;
709 msize_t msize
= msize_and_free
& ~THIS_FREE
;
710 start
+= msize
<< SHIFT_QUANTUM
;
713 // malloc_printf("End region - count=%d\n", count);
715 if (count
) recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
719 static INLINE
void *small_malloc_from_free_list(szone_t
*szone
, msize_t msize
, boolean_t
*locked
) {
722 free_list_t
**free_list
;
723 free_list_t
**limit
= szone
->free_list
+ MAX_GRAIN
- 1;
724 // first try the small grains
725 free_list
= szone
->free_list
+ grain_for_msize(szone
, msize
);
726 while (free_list
< limit
) {
730 if (!*locked
) { *locked
= 1; SZONE_LOCK(szone
); CHECK(szone
, __PRETTY_FUNCTION__
); }
733 // optimistic test worked
735 next
= ((free_list_t
*)ptr
)->next
;
737 next
->previous
= NULL
;
738 free_list_set_checksum(szone
, next
);
741 this_msize
= MSIZE_FLAGS_FOR_PTR(ptr
) & ~THIS_FREE
;
742 MSIZE_FLAGS_FOR_PTR(FOLLOWING_PTR(ptr
, this_msize
)) &= ~ PREV_FREE
;
743 goto add_leftover_and_proceed
;
748 // We now check the large grains for one that is big enough
749 if (!*locked
) { *locked
= 1; SZONE_LOCK(szone
); CHECK(szone
, __PRETTY_FUNCTION__
); }
752 this_msize
= MSIZE_FLAGS_FOR_PTR(ptr
) & ~THIS_FREE
;
753 if (this_msize
>= msize
) {
754 free_list_remove_ptr(szone
, ptr
, this_msize
);
755 goto add_leftover_and_proceed
;
757 ptr
= ((free_list_t
*)ptr
)->next
;
760 add_leftover_and_proceed
:
761 if (this_msize
>= msize
+ MIN_BLOCK
) {
762 if (LOG(szone
,ptr
)) malloc_printf("In small_malloc_should_clear(), adding leftover ptr=%p, this_msize=%d\n", ptr
, this_msize
);
763 free_list_add_ptr(szone
, ptr
+ (msize
<< SHIFT_QUANTUM
), this_msize
- msize
);
766 szone
->num_small_objects
++;
767 szone
->num_bytes_in_small_objects
+= this_msize
<< SHIFT_QUANTUM
;
769 if (LOG(szone
,ptr
)) malloc_printf("In small_malloc_should_clear(), ptr=%p, this_msize=%d, msize=%d\n", ptr
, this_msize
, msize
);
771 MSIZE_FLAGS_FOR_PTR(ptr
) = this_msize
;
775 static INLINE
void *small_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
) {
776 boolean_t locked
= 0;
779 if (! (msize
& 0xffff)) {
780 szone_error(szone
, "Invariant broken (!msize) in allocation (region)", NULL
);
782 if (msize
< MIN_BLOCK
) {
783 szone_error(szone
, "Invariant broken (msize too small) in allocation (region)", NULL
);
786 ptr
= small_malloc_from_free_list(szone
, msize
, &locked
);
788 CHECK(szone
, __PRETTY_FUNCTION__
);
790 if (cleared_requested
) memset(ptr
, 0, (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
);
793 if (!locked
) SZONE_LOCK(szone
);
794 CHECK(szone
, __PRETTY_FUNCTION__
);
795 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
796 // we don't clear because this freshly allocated space is pristine
797 CHECK(szone
, __PRETTY_FUNCTION__
);
803 static INLINE
void *small_malloc_cleared_no_lock(szone_t
*szone
, msize_t msize
) {
804 // tries to allocate a small, cleared block
805 boolean_t locked
= 1;
807 ptr
= small_malloc_from_free_list(szone
, msize
, &locked
);
809 memset(ptr
, 0, (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
);
812 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
813 // we don't clear because this freshly allocated space is pristine
818 /********************* LARGE ENTRY UTILITIES ************************/
822 static void large_cache_debug_print(szone_t
*szone
) {
823 unsigned index
= LARGE_CACHE_SIZE
;
824 malloc_printf("Cache to be dealloced: ");
826 vm_range_t range
= szone
->large_to_deallocate
[index
];
827 if (range
.size
) malloc_printf("0x%x(%dKB) ", range
.address
, range
.size
/1024);
832 static void large_debug_print(szone_t
*szone
) {
833 unsigned num_large_entries
= szone
->num_large_entries
;
834 unsigned index
= num_large_entries
;
836 large_entry_t
*range
= szone
->large_entries
+ index
;
837 large_entry_t entry
= *range
;
838 if (!LARGE_ENTRY_IS_EMPTY(entry
)) malloc_printf("%d: 0x%x(%dKB); ", index
, LARGE_ENTRY_ADDRESS(entry
), LARGE_ENTRY_SIZE(entry
)/1024);
844 static large_entry_t
*large_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
) {
845 // result only valid during a lock
846 unsigned num_large_entries
= szone
->num_large_entries
;
849 if (!num_large_entries
) return NULL
;
850 hash_index
= ((unsigned)ptr
>> vm_page_shift
) % num_large_entries
;
853 large_entry_t
*range
= szone
->large_entries
+ index
;
854 large_entry_t entry
= *range
;
855 if (LARGE_ENTRY_MATCHES(entry
, ptr
)) return range
;
856 if (LARGE_ENTRY_IS_EMPTY(entry
)) return NULL
; // end of chain
857 index
++; if (index
== num_large_entries
) index
= 0;
858 } while (index
!= hash_index
);
862 static void large_entry_insert_no_lock(szone_t
*szone
, large_entry_t range
) {
863 unsigned num_large_entries
= szone
->num_large_entries
;
864 unsigned hash_index
= (range
.address_and_num_pages
>> vm_page_shift
) % num_large_entries
;
865 unsigned index
= hash_index
;
866 // malloc_printf("Before insertion of 0x%x\n", LARGE_ENTRY_ADDRESS(range));
868 large_entry_t
*entry
= szone
->large_entries
+ index
;
869 if (LARGE_ENTRY_IS_EMPTY(*entry
)) {
871 return; // end of chain
873 index
++; if (index
== num_large_entries
) index
= 0;
874 } while (index
!= hash_index
);
877 static INLINE
void large_entries_rehash_after_entry_no_lock(szone_t
*szone
, large_entry_t
*entry
) {
878 unsigned num_large_entries
= szone
->num_large_entries
;
879 unsigned hash_index
= entry
- szone
->large_entries
;
880 unsigned index
= hash_index
;
883 index
++; if (index
== num_large_entries
) index
= 0;
884 range
= szone
->large_entries
[index
];
885 if (LARGE_ENTRY_IS_EMPTY(range
)) return;
886 szone
->large_entries
[index
].address_and_num_pages
= 0;
887 large_entry_insert_no_lock(szone
, range
); // this will reinsert in the proper place
888 } while (index
!= hash_index
);
891 static INLINE large_entry_t
*large_entries_alloc_no_lock(szone_t
*szone
, unsigned num
) {
892 size_t size
= num
* sizeof(large_entry_t
);
893 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
894 if (is_vm_allocation
) {
895 return (void *)allocate_pages(szone
, round_page(size
), 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
897 return small_malloc_cleared_no_lock(szone
, (size
+ PTR_HEADER_SIZE
+ QUANTUM
- 1) >> SHIFT_QUANTUM
);
901 static void large_entries_free_no_lock(szone_t
*szone
, large_entry_t
*entries
, unsigned num
) {
902 size_t size
= num
* sizeof(large_entry_t
);
903 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
904 if (is_vm_allocation
) {
905 deallocate_pages(szone
, (vm_address_t
)entries
, round_page(size
), 0);
907 region_t
*region
= region_for_ptr_no_lock(szone
, entries
);
908 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(entries
);
909 if (msize_and_free
& THIS_FREE
) {
910 szone_error(szone
, "Object already freed being freed", entries
);
913 small_free_no_lock(szone
, region
, entries
, msize_and_free
);
917 static void large_entries_grow_no_lock(szone_t
*szone
) {
918 unsigned old_num_entries
= szone
->num_large_entries
;
919 large_entry_t
*old_entries
= szone
->large_entries
;
920 unsigned new_num_entries
= (old_num_entries
) ? old_num_entries
* 2 + 1 : 15; // always an odd number for good hashing
921 large_entry_t
*new_entries
= large_entries_alloc_no_lock(szone
, new_num_entries
);
922 unsigned index
= old_num_entries
;
923 szone
->num_large_entries
= new_num_entries
;
924 szone
->large_entries
= new_entries
;
925 // malloc_printf("_grow_large_entries old_num_entries=%d new_num_entries=%d\n", old_num_entries, new_num_entries);
927 large_entry_t oldRange
= old_entries
[index
];
928 if (!LARGE_ENTRY_IS_EMPTY(oldRange
)) large_entry_insert_no_lock(szone
, oldRange
);
930 if (old_entries
) large_entries_free_no_lock(szone
, old_entries
, old_num_entries
);
933 static vm_range_t
large_free_no_lock(szone_t
*szone
, large_entry_t
*entry
) {
934 // enters the specified large entry into the cache of freed entries
935 // returns a range to truly deallocate
936 vm_range_t vm_range_to_deallocate
;
938 vm_range_t
*range_to_use
;
939 range
.address
= LARGE_ENTRY_ADDRESS(*entry
);
940 range
.size
= LARGE_ENTRY_SIZE(*entry
);
941 szone
->num_large_objects_in_use
--;
942 szone
->num_bytes_in_large_objects
-= range
.size
;
943 if (szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) {
944 protect(szone
, range
.address
, range
.size
, VM_PROT_READ
| VM_PROT_WRITE
, szone
->debug_flags
);
945 range
.address
-= vm_page_size
;
946 range
.size
+= 2 * vm_page_size
;
948 // printf("Entry is 0x%x=%d; cache is 0x%x ; found=0x%x\n", entry, entry-szone->large_entries, szone->large_entries, large_entry_for_pointer_no_lock(szone, (void *)range.address));
949 entry
->address_and_num_pages
= 0;
950 large_entries_rehash_after_entry_no_lock(szone
, entry
);
952 if (large_entry_for_pointer_no_lock(szone
, (void *)range
.address
)) {
953 malloc_printf("*** malloc[%d]: Freed entry 0x%x still in use; num_large_entries=%d\n", getpid(), range
.address
, szone
->num_large_entries
);
954 large_cache_debug_print(szone
);
955 large_debug_print(szone
);
959 range
= coalesce_range(szone
->large_to_deallocate
, LARGE_CACHE_SIZE
, range
);
960 range_to_use
= first_zero_range(szone
->large_to_deallocate
, LARGE_CACHE_SIZE
);
962 // we fill an empty slot
963 *range_to_use
= range
;
966 // we always try to deallocate the largest chunk
967 range_to_use
= largest_range(szone
->large_to_deallocate
, LARGE_CACHE_SIZE
);
968 if (!range_to_use
) return range
;
969 vm_range_to_deallocate
= *range_to_use
;
970 *range_to_use
= range
;
971 return vm_range_to_deallocate
;
974 static kern_return_t
large_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t large_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
976 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
978 large_entry_t
*entries
;
980 err
= reader(task
, large_entries_address
, sizeof(large_entry_t
) * num_entries
, (void **)&entries
);
983 if ((type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) && (num_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
)) {
985 range
.address
= large_entries_address
;
986 range
.size
= round_page(num_entries
* sizeof(large_entry_t
));
987 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &range
, 1);
989 if (type_mask
& (MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
)) while (index
--) {
990 large_entry_t entry
= entries
[index
];
991 if (!LARGE_ENTRY_IS_EMPTY(entry
)) {
993 range
.address
= LARGE_ENTRY_ADDRESS(entry
);
994 range
.size
= LARGE_ENTRY_SIZE(entry
);
995 buffer
[count
++] = range
;
996 if (count
>= MAX_RECORDER_BUFFER
) {
997 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
1002 if (count
) recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
1006 /********************* HUGE ENTRY UTILITIES ************************/
1008 static huge_entry_t
*huge_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
) {
1009 unsigned index
= szone
->num_huge_entries
;
1011 huge_entry_t
*huge
= szone
->huge_entries
+ index
;
1012 if (huge
->address
== (vm_address_t
)ptr
) return huge
;
1017 static void huge_entry_append(szone_t
*szone
, huge_entry_t huge
) {
1018 // We do a little dance with locking because doing allocation (even in the default szone) may cause something to get freed in this szone, with a deadlock
1019 huge_entry_t
*new_huge_entries
= NULL
;
1022 unsigned num_huge_entries
;
1023 num_huge_entries
= szone
->num_huge_entries
;
1024 SZONE_UNLOCK(szone
);
1025 // malloc_printf("In huge_entry_append currentEntries=%d\n", num_huge_entries);
1026 if (new_huge_entries
) szone_free(szone
, new_huge_entries
);
1027 new_huge_entries
= szone_malloc(szone
, (num_huge_entries
+ 1) * sizeof(huge_entry_t
));
1029 if (num_huge_entries
== szone
->num_huge_entries
) {
1030 // No change - our malloc still applies
1031 huge_entry_t
*old_huge_entries
= szone
->huge_entries
;
1032 if (num_huge_entries
) memcpy(new_huge_entries
, old_huge_entries
, num_huge_entries
* sizeof(huge_entry_t
));
1033 new_huge_entries
[szone
->num_huge_entries
++] = huge
;
1034 szone
->huge_entries
= new_huge_entries
;
1035 SZONE_UNLOCK(szone
);
1036 szone_free(szone
, old_huge_entries
);
1037 // malloc_printf("Done huge_entry_append now=%d\n", szone->num_huge_entries);
1044 static kern_return_t
huge_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t huge_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
1045 huge_entry_t
*entries
;
1047 err
= reader(task
, huge_entries_address
, sizeof(huge_entry_t
) * num_entries
, (void **)&entries
);
1048 if (err
) return err
;
1049 if (num_entries
) recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, entries
, num_entries
);
1053 static void *large_and_huge_malloc(szone_t
*szone
, unsigned num_pages
, boolean_t cleared_requested
) {
1054 vm_address_t addr
= 0;
1055 boolean_t cleared_needed
= 0; // by default blocks will be freshly allocated and therefore no need to clean them
1056 if (!num_pages
) num_pages
= 1; // minimal allocation size for this szone
1057 // malloc_printf("In large_and_huge_malloc for %dKB\n", num_pages * vm_page_size / 1024);
1058 if (num_pages
>= (1 << vm_page_shift
)) {
1060 huge
.size
= num_pages
<< vm_page_shift
;
1061 addr
= allocate_pages(szone
, huge
.size
, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_HUGE
));
1062 if (!addr
) return NULL
;
1063 huge
.address
= addr
;
1064 huge_entry_append(szone
, huge
);
1066 szone
->num_bytes_in_huge_objects
+= huge
.size
;
1068 vm_size_t size
= num_pages
<< vm_page_shift
;
1069 large_entry_t entry
;
1070 boolean_t add_guard_pages
= szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
1071 vm_size_t guard_pages
= (add_guard_pages
) ? 2 * vm_page_size
: 0;
1072 vm_range_t
*range_to_use
;
1073 cleared_needed
= cleared_requested
; // in the "large" case set by default
1075 // First check in the list large_to_deallocate if we can reuse
1076 // malloc_printf("In szone_malloc checking recently deallocated\n");
1077 range_to_use
= first_range_greater_or_equal(szone
->large_to_deallocate
, LARGE_CACHE_SIZE
, size
+ guard_pages
);
1079 // that one will do!
1080 addr
= range_to_use
->address
+ ((add_guard_pages
) ? vm_page_size
: 0);
1081 if (add_guard_pages
) protect(szone
, addr
, size
, 0, szone
->debug_flags
);
1082 // malloc_printf("In szone_malloc found recently deallocated at 0x%x for %d pages\n", addr, num_pages);
1083 if (range_to_use
->size
== size
+ guard_pages
) {
1084 *range_to_use
= zero_range
;
1086 range_to_use
->address
+= size
+ guard_pages
;
1087 range_to_use
->size
-= size
+ guard_pages
;
1090 if (large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
1091 malloc_printf("Entry about to be reused already in use: 0x%x\n", addr
);
1092 large_debug_print(szone
);
1098 // we need to really allocate_pages a new region
1099 SZONE_UNLOCK(szone
);
1100 addr
= allocate_pages(szone
, size
, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
1101 cleared_needed
= 0; // since we allocated the pages, no need to clean them
1102 if (LOG(szone
, addr
)) malloc_printf("In szone_malloc true large allocation at %p for %dKB\n", (void *)addr
, size
/ 1024);
1104 if (!addr
) return NULL
;
1106 if (large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
1107 malloc_printf("Freshly allocated is already in use: 0x%x\n", addr
);
1108 large_debug_print(szone
);
1113 if ((szone
->num_large_objects_in_use
+ 1) * 4 > szone
->num_large_entries
) {
1114 // density of hash table too high; grow table
1115 // we do that under lock to avoid a race
1116 // malloc_printf("In szone_malloc growing hash table current=%d\n", szone->num_large_entries);
1117 large_entries_grow_no_lock(szone
);
1119 // malloc_printf("Inserting large entry (0x%x, %dKB)\n", addr, num_pages * vm_page_size / 1024);
1120 entry
.address_and_num_pages
= addr
| num_pages
;
1122 if (large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
1123 malloc_printf("Entry about to be added already in use: 0x%x\n", addr
);
1124 large_debug_print(szone
);
1128 large_entry_insert_no_lock(szone
, entry
);
1130 if (!large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
1131 malloc_printf("Can't find entry just added\n");
1132 large_debug_print(szone
);
1136 // malloc_printf("Inserted large entry (0x%x, %d pages)\n", addr, num_pages);
1137 szone
->num_large_objects_in_use
++;
1138 szone
->num_bytes_in_large_objects
+= size
;
1140 SZONE_UNLOCK(szone
);
1141 if (cleared_needed
) memset((void *)addr
, 0, num_pages
<< vm_page_shift
);
1142 return (void *)addr
;
1145 /********************* Zone call backs ************************/
1147 static void szone_free(szone_t
*szone
, void *ptr
) {
1149 large_entry_t
*entry
;
1150 vm_range_t vm_range_to_deallocate
;
1152 if (LOG(szone
, ptr
)) malloc_printf("In szone_free with %p\n", ptr
);
1154 if ((vm_address_t
)ptr
& (QUANTUM
- 1)) {
1155 szone_error(szone
, "Non-aligned pointer being freed", ptr
);
1158 // try a small pointer
1159 region
= region_for_ptr_no_lock(szone
, ptr
);
1161 // this is indeed a valid pointer
1162 msize_t msize_and_free
;
1164 msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
1165 if (msize_and_free
& THIS_FREE
) {
1166 szone_error(szone
, "Object already freed being freed", ptr
);
1169 CHECK(szone
, __PRETTY_FUNCTION__
);
1170 small_free_no_lock(szone
, region
, ptr
, msize_and_free
);
1171 CHECK(szone
, __PRETTY_FUNCTION__
);
1172 SZONE_UNLOCK(szone
);
1175 if (((unsigned)ptr
) & (vm_page_size
- 1)) {
1176 szone_error(szone
, "Non-page-aligned, non-allocated pointer being freed", ptr
);
1180 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
1182 // malloc_printf("Ready for deallocation [0x%x-%dKB]\n", LARGE_ENTRY_ADDRESS(*entry), LARGE_ENTRY_SIZE(*entry)/1024);
1183 if (KILL_THRESHOLD
&& (LARGE_ENTRY_SIZE(*entry
) > KILL_THRESHOLD
)) {
1184 // We indicate to the VM system that these pages contain garbage and therefore don't need to be swapped out
1185 vm_msync(mach_task_self(), LARGE_ENTRY_ADDRESS(*entry
), LARGE_ENTRY_SIZE(*entry
), VM_SYNC_KILLPAGES
);
1187 vm_range_to_deallocate
= large_free_no_lock(szone
, entry
);
1189 if (large_entry_for_pointer_no_lock(szone
, ptr
)) {
1190 malloc_printf("*** malloc[%d]: Just after freeing 0x%x still in use num_large_entries=%d\n", getpid(), ptr
, szone
->num_large_entries
);
1191 large_cache_debug_print(szone
);
1192 large_debug_print(szone
);
1196 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
1197 vm_range_to_deallocate
= *huge
;
1198 *huge
= szone
->huge_entries
[--szone
->num_huge_entries
]; // last entry fills that spot
1199 szone
->num_bytes_in_huge_objects
-= vm_range_to_deallocate
.size
;
1202 large_debug_print(szone
);
1204 szone_error(szone
, "Pointer being freed was not allocated", ptr
);
1207 CHECK(szone
, __PRETTY_FUNCTION__
);
1208 SZONE_UNLOCK(szone
); // we release the lock asap
1209 // we deallocate_pages, including guard pages
1210 if (vm_range_to_deallocate
.address
) {
1211 // malloc_printf("About to deallocate 0x%x size %dKB\n", vm_range_to_deallocate.address, vm_range_to_deallocate.size / 1024);
1213 if (large_entry_for_pointer_no_lock(szone
, (void *)vm_range_to_deallocate
.address
)) {
1214 malloc_printf("*** malloc[%d]: Invariant broken: 0x%x still in use num_large_entries=%d\n", getpid(), vm_range_to_deallocate
.address
, szone
->num_large_entries
);
1215 large_cache_debug_print(szone
);
1216 large_debug_print(szone
);
1220 deallocate_pages(szone
, vm_range_to_deallocate
.address
, vm_range_to_deallocate
.size
, 0);
1224 static INLINE
void *szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
) {
1226 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
1228 size_t msize
= (size
+ PTR_HEADER_SIZE
+ QUANTUM
- 1) >> SHIFT_QUANTUM
;
1229 if (msize
< MIN_BLOCK
) msize
= MIN_BLOCK
;
1230 ptr
= small_malloc_should_clear(szone
, msize
, cleared_requested
);
1232 if ((MSIZE_FLAGS_FOR_PTR(ptr
) & ~ PREV_FREE
) < msize
) {
1233 malloc_printf("ptr=%p this=%d msize=%d\n", ptr
, MSIZE_FLAGS_FOR_PTR(ptr
), (int)msize
);
1234 szone_error(szone
, "Pointer allocated has improper size (1)", ptr
);
1237 if ((MSIZE_FLAGS_FOR_PTR(ptr
) & ~ PREV_FREE
) < MIN_BLOCK
) {
1238 malloc_printf("ptr=%p this=%d msize=%d\n", ptr
, MSIZE_FLAGS_FOR_PTR(ptr
), (int)msize
);
1239 szone_error(szone
, "Pointer allocated has improper size (2)", ptr
);
1245 num_pages
= round_page(size
) >> vm_page_shift
;
1246 ptr
= large_and_huge_malloc(szone
, num_pages
, cleared_requested
);
1248 if (LOG(szone
, ptr
)) malloc_printf("szone_malloc returned %p\n", ptr
);
1252 static void *szone_malloc(szone_t
*szone
, size_t size
) {
1253 return szone_malloc_should_clear(szone
, size
, 0);
1256 static void *szone_calloc(szone_t
*szone
, size_t num_items
, size_t size
) {
1257 return szone_malloc_should_clear(szone
, num_items
* size
, 1);
1260 static void *szone_valloc(szone_t
*szone
, size_t size
) {
1263 num_pages
= round_page(size
) >> vm_page_shift
;
1264 ptr
= large_and_huge_malloc(szone
, num_pages
, 1);
1265 if (LOG(szone
, ptr
)) malloc_printf("szone_valloc returned %p\n", ptr
);
1269 static size_t szone_size(szone_t
*szone
, const void *ptr
) {
1272 large_entry_t
*entry
;
1275 if (LOG(szone
, ptr
)) malloc_printf("In szone_size for %p (szone=%p)\n", ptr
, szone
);
1276 if ((vm_address_t
)ptr
& (QUANTUM
- 1)) return 0;
1277 if ((((unsigned)ptr
) & (vm_page_size
- 1)) && (MSIZE_FLAGS_FOR_PTR(ptr
) & THIS_FREE
)) {
1278 // not page aligned, but definitely not in use
1281 // Try a small pointer
1282 region
= region_for_ptr_no_lock(szone
, ptr
);
1283 // malloc_printf("FOUND REGION %p\n", region);
1285 // this is indeed a valid pointer
1286 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
1287 return (msize_and_free
& THIS_FREE
) ? 0 : ((msize_and_free
& ~PREV_FREE
) << SHIFT_QUANTUM
) - PTR_HEADER_SIZE
;
1289 if (((unsigned)ptr
) & (vm_page_size
- 1)) {
1293 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
1295 size
= LARGE_ENTRY_SIZE(*entry
);
1296 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
1299 SZONE_UNLOCK(szone
);
1300 // malloc_printf("szone_size for large/huge %p returned %d\n", ptr, (unsigned)size);
1301 if (LOG(szone
, ptr
)) malloc_printf("szone_size for %p returned %d\n", ptr
, (unsigned)size
);
1305 static INLINE
int szone_try_realloc_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) {
1306 // returns 1 on success
1307 void *next_block
= (char *)ptr
+ old_size
+ PTR_HEADER_SIZE
;
1308 msize_t next_msize_and_free
;
1311 msize_t coalesced_msize
;
1312 msize_t leftover_msize
;
1313 msize_t new_msize_and_free
;
1314 void *following_ptr
;
1316 region
= szone
->regions
[szone
->num_regions
- 1];
1317 if (((vm_address_t
)ptr
>= region
) && ((vm_address_t
)ptr
< region
+ REGION_SIZE
) && ((vm_address_t
)next_block
== REGION_END(region
) - szone
->num_bytes_free_in_last_region
+ PTR_HEADER_SIZE
)) {
1318 // This could be optimized but it is so rare it's not worth it
1319 SZONE_UNLOCK(szone
);
1322 // If the next block is free, we coalesce
1323 next_msize_and_free
= MSIZE_FLAGS_FOR_PTR(next_block
);
1325 if ((vm_address_t
)next_block
& (QUANTUM
- 1)) {
1326 szone_error(szone
, "Internal invariant broken in realloc(next_block)", next_block
);
1328 if (next_msize_and_free
& PREV_FREE
) {
1329 malloc_printf("szone_try_realloc_in_place: 0x%x=PREV_FREE|%d\n", next_msize_and_free
, next_msize_and_free
& ~PREV_FREE
);
1330 SZONE_UNLOCK(szone
);
1334 next_msize
= next_msize_and_free
& ~THIS_FREE
;
1335 if (!(next_msize_and_free
& THIS_FREE
) || !next_msize
|| (old_size
+ (next_msize
<< SHIFT_QUANTUM
) < new_size
)) {
1336 SZONE_UNLOCK(szone
);
1339 coalesced_msize
= (new_size
- old_size
+ QUANTUM
- 1) >> SHIFT_QUANTUM
;
1340 leftover_msize
= next_msize
- coalesced_msize
;
1341 new_msize_and_free
= MSIZE_FLAGS_FOR_PTR(ptr
);
1342 // malloc_printf("Realloc in place for %p; current msize=%d next_msize=%d wanted=%d\n", ptr, MSIZE_FLAGS_FOR_PTR(ptr), next_msize, new_size);
1343 free_list_remove_ptr(szone
, next_block
, next_msize
);
1344 if ((leftover_msize
< MIN_BLOCK
) || (leftover_msize
< coalesced_msize
/ 4)) {
1345 // don't bother splitting it off
1346 // malloc_printf("No leftover ");
1347 coalesced_msize
= next_msize
;
1350 void *leftover
= next_block
+ (coalesced_msize
<< SHIFT_QUANTUM
);
1351 // malloc_printf("Leftover ");
1352 free_list_add_ptr(szone
, leftover
, leftover_msize
);
1354 new_msize_and_free
+= coalesced_msize
;
1355 MSIZE_FLAGS_FOR_PTR(ptr
) = new_msize_and_free
;
1356 following_ptr
= FOLLOWING_PTR(ptr
, new_msize_and_free
& ~PREV_FREE
);
1357 MSIZE_FLAGS_FOR_PTR(following_ptr
) &= ~ PREV_FREE
;
1360 msize_t ms
= MSIZE_FLAGS_FOR_PTR(following_ptr
);
1361 msize_t pms
= PREVIOUS_MSIZE(FOLLOWING_PTR(following_ptr
, ms
& ~THIS_FREE
));
1362 malloc_printf("Following ptr of coalesced (%p) has msize_and_free=0x%x=%s%d end_of_block_marker=%d\n", following_ptr
, ms
, (ms
& THIS_FREE
) ? "THIS_FREE|" : "", ms
& ~THIS_FREE
, pms
);
1364 if (LOG(szone
,ptr
)) malloc_printf("In szone_realloc(), ptr=%p, msize=%d\n", ptr
, MSIZE_FLAGS_FOR_PTR(ptr
));
1366 CHECK(szone
, __PRETTY_FUNCTION__
);
1367 szone
->num_bytes_in_small_objects
+= coalesced_msize
<< SHIFT_QUANTUM
;
1368 SZONE_UNLOCK(szone
);
1369 // malloc_printf("Extended ptr %p for realloc old=%d desired=%d new=%d leftover=%d\n", ptr, (unsigned)old_size, (unsigned)new_size, (unsigned)szone_size(szone, ptr), leftover_msize << SHIFT_QUANTUM);
1373 static void *szone_realloc(szone_t
*szone
, void *ptr
, size_t new_size
) {
1374 size_t old_size
= 0;
1376 if (LOG(szone
, ptr
)) malloc_printf("In szone_realloc for %p, %d\n", ptr
, (unsigned)new_size
);
1377 if (!ptr
) return szone_malloc(szone
, new_size
);
1378 old_size
= szone_size(szone
, ptr
);
1380 szone_error(szone
, "Pointer being reallocated was not allocated", ptr
);
1383 if (old_size
>= new_size
) return ptr
;
1384 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (new_size
< LARGE_THRESHOLD
)) {
1385 // We now try to realloc in place
1386 if (szone_try_realloc_in_place(szone
, ptr
, old_size
, new_size
)) return ptr
;
1388 newPtr
= szone_malloc(szone
, new_size
);
1389 if (old_size
> VM_COPY_THRESHOLD
) {
1390 kern_return_t err
= 0;
1391 err
= vm_copy(mach_task_self(), (vm_address_t
)ptr
, old_size
, (vm_address_t
)newPtr
);
1393 szone_error(szone
, "Can't vm_copy region", ptr
);
1396 memcpy(newPtr
, ptr
, old_size
);
1398 szone_free(szone
, ptr
);
1399 if (LOG(szone
, ptr
)) malloc_printf("szone_realloc returned %p for %d\n", newPtr
, (unsigned)new_size
);
1403 static void szone_destroy(szone_t
*szone
) {
1405 index
= szone
->num_large_entries
;
1407 large_entry_t
*entry
= szone
->large_entries
+ index
;
1408 if (!LARGE_ENTRY_IS_EMPTY(*entry
)) {
1409 large_entry_t range
;
1411 // we deallocate_pages, including guard pages
1412 deallocate_pages(szone
, LARGE_ENTRY_ADDRESS(range
), LARGE_ENTRY_SIZE(range
), szone
->debug_flags
);
1415 if (szone
->num_large_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
) large_entries_free_no_lock(szone
, szone
->large_entries
, szone
->num_large_entries
); // we do not free in the small chunk case
1416 index
= LARGE_CACHE_SIZE
;
1418 vm_range_t range
= szone
->large_to_deallocate
[index
];
1419 if (range
.size
) deallocate_pages(szone
, range
.address
, range
.size
, 0);
1421 index
= szone
->num_huge_entries
;
1423 huge_entry_t
*huge
= szone
->huge_entries
+ index
;
1424 deallocate_pages(szone
, huge
->address
, huge
->size
, szone
->debug_flags
);
1426 // and now we free regions, with regions[0] as the last one (the final harakiri)
1427 index
= szone
->num_regions
;
1428 while (index
--) { // we skip the first region, that is the zone itself
1429 region_t region
= szone
->regions
[index
];
1430 deallocate_pages(szone
, REGION_ADDRESS(region
), REGION_SIZE
, 0);
1434 static size_t szone_good_size(szone_t
*szone
, size_t size
) {
1435 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
1437 msize_t msize
= (size
+ PTR_HEADER_SIZE
+ QUANTUM
- 1) >> SHIFT_QUANTUM
;
1438 if (msize
< MIN_BLOCK
) msize
= MIN_BLOCK
;
1439 return (msize
<< SHIFT_QUANTUM
) - PTR_HEADER_SIZE
;
1442 num_pages
= round_page(size
) >> vm_page_shift
;
1443 if (!num_pages
) num_pages
= 1; // minimal allocation size for this
1444 return num_pages
<< vm_page_shift
;
1448 unsigned szone_check_counter
= 0;
1449 unsigned szone_check_start
= 0;
1450 unsigned szone_check_modulo
= 1;
1452 static boolean_t
szone_check_all(szone_t
*szone
, const char *function
) {
1455 while (index
< szone
->num_regions
) {
1456 region_t
*region
= szone
->regions
+ index
++;
1457 if (!szone_check_region(szone
, region
)) {
1458 SZONE_UNLOCK(szone
);
1459 szone
->debug_flags
&= ~ CHECK_REGIONS
;
1460 malloc_printf("*** malloc[%d]: Region %d incorrect szone_check_all(%s) counter=%d\n", getpid(), index
-1, function
, szone_check_counter
);
1461 szone_error(szone
, "Check: region incorrect", NULL
);
1466 while (index
< MAX_GRAIN
) {
1467 if (! free_list_check(szone
, index
)) {
1468 SZONE_UNLOCK(szone
);
1469 szone
->debug_flags
&= ~ CHECK_REGIONS
;
1470 malloc_printf("*** malloc[%d]: Free list incorrect (grain=%d) szone_check_all(%s) counter=%d\n", getpid(), index
, function
, szone_check_counter
);
1471 szone_error(szone
, "Check: free list incorrect", NULL
);
1476 SZONE_UNLOCK(szone
);
1480 static boolean_t
szone_check(szone_t
*szone
) {
1481 if (! (++szone_check_counter
% 10000)) {
1482 malloc_printf("At szone_check counter=%d\n", szone_check_counter
);
1484 if (szone_check_counter
< szone_check_start
) return 1;
1485 if (szone_check_counter
% szone_check_modulo
) return 1;
1486 return szone_check_all(szone
, "");
1489 static kern_return_t
szone_ptr_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t zone_address
, memory_reader_t reader
, vm_range_recorder_t recorder
) {
1492 if (!reader
) reader
= _szone_default_reader
;
1493 // malloc_printf("Enumerator for zone 0x%x\n", zone_address);
1494 err
= reader(task
, zone_address
, sizeof(szone_t
), (void **)&szone
);
1495 if (err
) return err
;
1496 // malloc_printf("Small ptrs enumeration for zone 0x%x\n", zone_address);
1497 err
= small_in_use_enumerator(task
, context
, type_mask
, (vm_address_t
)szone
->regions
, szone
->num_regions
, reader
, recorder
);
1498 if (err
) return err
;
1499 // malloc_printf("Large ptrs enumeration for zone 0x%x\n", zone_address);
1500 err
= large_in_use_enumerator(task
, context
, type_mask
, (vm_address_t
)szone
->large_entries
, szone
->num_large_entries
, reader
, recorder
);
1501 if (err
) return err
;
1502 // malloc_printf("Huge ptrs enumeration for zone 0x%x\n", zone_address);
1503 err
= huge_in_use_enumerator(task
, context
, type_mask
, (vm_address_t
)szone
->huge_entries
, szone
->num_huge_entries
, reader
, recorder
);
1507 static void szone_print_free_list(szone_t
*szone
) {
1508 grain_t grain
= MAX_GRAIN
;
1509 malloc_printf("Free Sizes: ");
1511 free_list_t
*ptr
= szone
->free_list
[grain
];
1516 // malloc_printf("%p ", ptr);
1519 malloc_printf("%s%d[%d] ", (grain
== MAX_GRAIN
-1) ? ">=" : "", (grain
+1)*QUANTUM
, count
);
1522 malloc_printf("\n");
1525 static void szone_print(szone_t
*szone
, boolean_t verbose
) {
1526 unsigned info
[scalable_zone_info_count
];
1529 index
= LARGE_CACHE_SIZE
;
1530 while (index
--) if (szone
->large_to_deallocate
[index
].size
) num
++;
1532 scalable_zone_info((void *)szone
, info
, scalable_zone_info_count
);
1533 malloc_printf("Scalable zone %p: inUse=%d(%dKB) small=%d(%dKB) large=%d(%dKB) to_be_deallocated=%d huge=%d(%dKB) guard_page=%d\n", szone
, info
[0], info
[1] / 1024, info
[2], info
[3] / 1024, info
[4], info
[5] / 1024, num
, info
[6], info
[7] / 1024, info
[8]);
1534 malloc_printf("%d regions: \n", szone
->num_regions
);
1535 while (index
< szone
->num_regions
) {
1536 region_t
*region
= szone
->regions
+ index
;
1537 unsigned counts
[512];
1539 unsigned in_use
= 0;
1540 vm_address_t start
= REGION_ADDRESS(*region
) + QUANTUM
;
1541 memset(counts
, 0, 512 * sizeof(unsigned));
1542 while (start
< REGION_END(*region
)) {
1543 msize_t msize_and_free
= MSIZE_FLAGS_FOR_PTR(start
);
1544 if (!(msize_and_free
& THIS_FREE
)) {
1545 msize_t msize
= msize_and_free
& ~PREV_FREE
;
1546 if (!msize
) break; // last encountered
1548 if (msize
< 512) counts
[msize
]++;
1549 start
+= msize
<< SHIFT_QUANTUM
;
1552 msize_t msize
= msize_and_free
& ~THIS_FREE
;
1554 start
+= msize
<< SHIFT_QUANTUM
;
1557 malloc_printf("Region [0x%x-0x%x, %dKB] \tIn_use=%d ", REGION_ADDRESS(*region
), REGION_END(*region
), (int)REGION_SIZE
/ 1024, in_use
);
1559 malloc_printf("\n\tSizes in use: ");
1561 if (counts
[ci
]) malloc_printf("%d[%d] ", ci
<< SHIFT_QUANTUM
, counts
[ci
]);
1565 malloc_printf("\n");
1568 if (verbose
) szone_print_free_list(szone
);
1569 malloc_printf("Free in last zone %d\n", szone
->num_bytes_free_in_last_region
);
1572 static void szone_log(malloc_zone_t
*zone
, void *log_address
) {
1573 szone_t
*szone
= (void *)zone
;
1574 szone
->log_address
= log_address
;
1577 static void szone_force_lock(szone_t
*szone
) {
1578 // malloc_printf("szone_force_lock\n");
1582 static void szone_force_unlock(szone_t
*szone
) {
1583 // malloc_printf("szone_force_unlock\n");
1584 SZONE_UNLOCK(szone
);
1587 static struct malloc_introspection_t szone_introspect
= {(void *)szone_ptr_in_use_enumerator
, (void *)szone_good_size
, (void *)szone_check
, (void *)szone_print
, szone_log
, (void *)szone_force_lock
, (void *)szone_force_unlock
};
1589 malloc_zone_t
*create_scalable_zone(size_t initial_size
, unsigned debug_flags
) {
1593 size_t msize_used
= 0;
1594 if (!vm_page_shift
) {
1596 vm_page_shift
= 12; // the minimal for page sizes
1597 page
= 1 << vm_page_shift
;
1598 while (page
!= vm_page_size
) { page
+= page
; vm_page_shift
++;};
1599 if (MIN_BLOCK
* QUANTUM
< sizeof(free_list_t
) + PTR_HEADER_SIZE
) {
1600 malloc_printf("*** malloc[%d]: inconsistant parameters\n", getpid());
1603 addr
= allocate_pages(NULL
, REGION_SIZE
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC
));
1604 if (!addr
) return NULL
;
1605 szone
= (void *)(addr
+ QUANTUM
);
1606 msize
= (sizeof(szone_t
) + PTR_HEADER_SIZE
+ QUANTUM
-1) >> SHIFT_QUANTUM
;
1607 MSIZE_FLAGS_FOR_PTR(szone
) = msize
;
1608 msize_used
+= msize
; szone
->num_small_objects
++;
1609 szone
->basic_zone
.size
= (void *)szone_size
;
1610 szone
->basic_zone
.malloc
= (void *)szone_malloc
;
1611 szone
->basic_zone
.calloc
= (void *)szone_calloc
;
1612 szone
->basic_zone
.valloc
= (void *)szone_valloc
;
1613 szone
->basic_zone
.free
= (void *)szone_free
;
1614 szone
->basic_zone
.realloc
= (void *)szone_realloc
;
1615 szone
->basic_zone
.destroy
= (void *)szone_destroy
;
1616 szone
->basic_zone
.introspect
= &szone_introspect
;
1617 LOCK_INIT(szone
->lock
);
1618 szone
->debug_flags
= debug_flags
;
1619 szone
->regions
= (void *)((char *)szone
+ (msize
<< SHIFT_QUANTUM
));
1620 // we always reserve room for a few regions
1621 msize
= (sizeof(region_t
) * INITIAL_NUM_REGIONS
+ PTR_HEADER_SIZE
+ QUANTUM
-1) >> SHIFT_QUANTUM
;
1622 if (msize
< MIN_BLOCK
) msize
= MIN_BLOCK
;
1623 MSIZE_FLAGS_FOR_PTR(szone
->regions
) = msize
;
1624 msize_used
+= msize
; szone
->num_small_objects
++;
1625 szone
->regions
[0] = addr
;
1626 szone
->num_regions
= 1;
1627 szone
->num_bytes_free_in_last_region
= REGION_SIZE
- ((msize_used
+1) << SHIFT_QUANTUM
) + PTR_HEADER_SIZE
;
1628 CHECK(szone
, __PRETTY_FUNCTION__
);
1629 return (malloc_zone_t
*)szone
;
1632 /********* The following is private API for debug and perf tools ************/
1634 void scalable_zone_info(malloc_zone_t
*zone
, unsigned *info_to_fill
, unsigned count
) {
1635 szone_t
*szone
= (void *)zone
;
1636 unsigned info
[scalable_zone_info_count
];
1637 // We do not lock to facilitate debug
1638 info
[2] = szone
->num_small_objects
;
1639 info
[3] = szone
->num_bytes_in_small_objects
;
1640 info
[4] = szone
->num_large_objects_in_use
;
1641 info
[5] = szone
->num_bytes_in_large_objects
;
1642 info
[6] = szone
->num_huge_entries
;
1643 info
[7] = szone
->num_bytes_in_huge_objects
;
1644 info
[8] = szone
->debug_flags
;
1645 info
[0] = info
[2] + info
[4] + info
[6];
1646 info
[1] = info
[3] + info
[5] + info
[7];
1647 memcpy(info_to_fill
, info
, sizeof(unsigned)*count
);