2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 /* Author: Bertrand Serlet, August 1999 */
28 #include "scalable_malloc.h"
30 #include <pthread_internals.h>
34 #include <mach/vm_statistics.h>
35 #include <mach/mach_init.h>
37 /********************* DEFINITIONS ************************/
39 #define DEBUG_MALLOC 0 // set to one to debug malloc itself
41 #define DEBUG_CLIENT 0 // set to one to help debug a nasty memory smasher
44 #warning DEBUG_MALLOC ENABLED
46 # define CHECK_LOCKED(szone, fun) \
48 if (__is_threaded && TRY_LOCK(szone->lock)) { \
49 malloc_printf("*** lock was not set %p in %s\n", szone->lock, fun); \
53 # define INLINE __inline__
54 # define CHECK_LOCKED(szone, fun) {}
58 * Access to global variables is slow, so optimise our handling of vm_page_size
61 #define _vm_page_size vm_page_size /* to get to the originals */
62 #define _vm_page_shift vm_page_shift
63 #define vm_page_size 4096 /* our normal working sizes */
64 #define vm_page_shift 12
66 typedef unsigned short msize_t
; // a size in multiples of SHIFT_SMALL_QUANTUM or SHIFT_TINY_QUANTUM
69 * Note that in the LP64 case, this is 24 bytes, necessitating the 32-byte tiny grain size.
78 uintptr_t address_and_num_pages
;
79 // this type represents both an address and a number of pages
80 // the low bits are the number of pages; the high bits are the address
81 // note that the exact number of bits used for depends on the page size
82 // also, this cannot represent pointers larger than 1 << (vm_page_shift * 2)
85 typedef unsigned char grain_t
;
87 #define CHECK_REGIONS (1 << 31)
90 # define CHECKSUM_MAGIC 0xdeadbeef0badc0de
92 # define CHECKSUM_MAGIC 0x357B
95 #define MAX_RECORDER_BUFFER 256
97 /********************* DEFINITIONS for tiny ************************/
100 * Memory in the Tiny range is allocated from regions (heaps) pointed to by the szone's tiny_regions
103 * Each region is laid out as a heap (1MB in 32-bit mode, 2MB in 64-bit mode), followed by a header
104 * block. The header block is arranged:
109 * 0xffffffff pad word
113 * pad word (not written)
115 * Each bitfield comprises NUM_TINY_BLOCKS bits, and refers to the corresponding TINY_QUANTUM block
118 * The bitfields are used to encode the state of memory within the heap. The header bit indicates
119 * that the corresponding quantum is the first quantum in a block (either in use or free). The
120 * in-use bit is set for the header if the block has been handed out (allocated). If the header
121 * bit is not set, the in-use bit is invalid.
123 * The szone maintains an array of 32 freelists, each of which is used to hold free objects
124 * of the corresponding quantum size.
126 * A free block is laid out as:
128 * Offset (32-bit mode) (64-bit mode)
136 * size (in quantum counts)
138 * size (in quantum counts)
141 * All fields are pointer-sized, except for the size which is an unsigned short.
146 # define SHIFT_TINY_QUANTUM 5 // Required to fit free_list_t
148 # define SHIFT_TINY_QUANTUM 4 // Required for AltiVec
150 #define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM)
152 #define FOLLOWING_TINY_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM))
154 #define NUM_TINY_SLOTS 32 // number of slots for free-lists
156 #define SHIFT_NUM_TINY_BLOCKS 16
157 #define NUM_TINY_BLOCKS (1 << SHIFT_NUM_TINY_BLOCKS)
158 #define TINY_BLOCKS_ALIGN (SHIFT_NUM_TINY_BLOCKS + SHIFT_TINY_QUANTUM)
161 * Enough room for the data, followed by the bit arrays (2-bits per block) plus 2 words of padding
162 * as our bitmap operators overflow, plus rounding to the nearest page.
164 #define TINY_REGION_SIZE ((NUM_TINY_BLOCKS * TINY_QUANTUM + (NUM_TINY_BLOCKS >> 2) + 8 + vm_page_size - 1) & ~ (vm_page_size - 1))
167 * Obtain the size of a free tiny block (in msize_t units).
170 # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[14])
172 # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[6])
175 * The size is also kept at the very end of a free block.
177 #define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
180 * Beginning and end pointers for a region's heap.
182 #define TINY_REGION_ADDRESS(region) ((void *)(region))
183 #define TINY_REGION_END(region) (TINY_REGION_ADDRESS(region) + (1 << TINY_BLOCKS_ALIGN))
186 * Locate the heap base for a pointer known to be within a tiny region.
188 #define TINY_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << TINY_BLOCKS_ALIGN) - 1)))
191 * Convert between byte and msize units.
193 #define TINY_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_TINY_QUANTUM)
194 #define TINY_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_TINY_QUANTUM)
197 * Locate the block header for a pointer known to be within a tiny region.
199 #define TINY_BLOCK_HEADER_FOR_PTR(_p) ((void *)(((((uintptr_t)(_p)) >> TINY_BLOCKS_ALIGN) + 1) << TINY_BLOCKS_ALIGN))
202 * Locate the inuse map for a given block header pointer.
204 #define TINY_INUSE_FOR_HEADER(_h) ((void *)((uintptr_t)(_h) + (NUM_TINY_BLOCKS >> 3) + 4))
207 * Compute the bitmap index for a pointer known to be within a tiny region.
209 #define TINY_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_TINY_QUANTUM) & (NUM_TINY_BLOCKS - 1))
211 typedef void *tiny_region_t
;
213 #define INITIAL_NUM_TINY_REGIONS 24 // must be even for szone to be aligned
215 #define TINY_CACHE 1 // This governs a last-free cache of 1 that bypasses the free-list
218 #warning TINY_CACHE turned off
221 /********************* DEFINITIONS for small ************************/
224 * Memory in the Small range is allocated from regions (heaps) pointed to by the szone's small_regions
227 * Each region is laid out as a heap (8MB in 32-bit and 64-bit mode), followed by the metadata array.
228 * The array is arranged as an array of shorts, one for each SMALL_QUANTUM in the heap.
230 * The MSB of each short is set for the first quantum in a free block. The low 15 bits encode the
231 * block size (in SMALL_QUANTUM units), or are zero if the quantum is not the first in a block.
233 * The szone maintains an array of 32 freelists, each of which is used to hold free objects
234 * of the corresponding quantum size.
236 * A free block is laid out as:
238 * Offset (32-bit mode) (64-bit mode)
246 * size (in quantum counts)
248 * size (in quantum counts)
251 * All fields are pointer-sized, except for the size which is an unsigned short.
255 #define SMALL_IS_FREE (1 << 15)
257 #define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9
258 #define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes
260 #define FOLLOWING_SMALL_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM))
262 #define NUM_SMALL_SLOTS 32 // number of slots for free-lists
265 * We can only represent up to 1<<15 for msize; but we choose to stay even below that to avoid the
266 * convention msize=0 => msize = (1<<15)
268 #define SHIFT_NUM_SMALL_BLOCKS 14
269 #define NUM_SMALL_BLOCKS (1 << SHIFT_NUM_SMALL_BLOCKS)
270 #define SMALL_BLOCKS_ALIGN (SHIFT_NUM_SMALL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23
271 #define SMALL_REGION_SIZE (NUM_SMALL_BLOCKS * SMALL_QUANTUM + NUM_SMALL_BLOCKS * 2) // data + meta data
273 #define SMALL_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
276 * Convert between byte and msize units.
278 #define SMALL_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_SMALL_QUANTUM)
279 #define SMALL_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_SMALL_QUANTUM)
282 #define SMALL_REGION_ADDRESS(region) ((unsigned char *)region)
283 #define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region) + (1 << SMALL_BLOCKS_ALIGN))
286 * Locate the heap base for a pointer known to be within a small region.
288 #define SMALL_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << SMALL_BLOCKS_ALIGN) - 1)))
291 * Locate the metadata base for a pointer known to be within a small region.
293 #define SMALL_META_HEADER_FOR_PTR(_p) ((msize_t *)(((((uintptr_t)(_p)) >> SMALL_BLOCKS_ALIGN) + 1) << SMALL_BLOCKS_ALIGN))
296 * Compute the metadata index for a pointer known to be within a small region.
298 #define SMALL_META_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_SMALL_QUANTUM) & (NUM_SMALL_BLOCKS - 1))
301 * Find the metadata word for a pointer known to be within a small region.
303 #define SMALL_METADATA_FOR_PTR(_p) (SMALL_META_HEADER_FOR_PTR(_p) + SMALL_META_INDEX_FOR_PTR(_p))
306 * Determine whether a pointer known to be within a small region points to memory which is free.
308 #define SMALL_PTR_IS_FREE(_p) (*SMALL_METADATA_FOR_PTR(_p) & SMALL_IS_FREE)
311 * Extract the msize value for a pointer known to be within a small region.
313 #define SMALL_PTR_SIZE(_p) (*SMALL_METADATA_FOR_PTR(_p) & ~SMALL_IS_FREE)
315 typedef void * small_region_t
;
317 #define INITIAL_NUM_SMALL_REGIONS 6 // must be even for szone to be aligned
319 #define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use
321 #define SMALL_CACHE 1
323 #warning SMALL_CACHE turned off
326 /********************* DEFINITIONS for large ************************/
328 #define LARGE_THRESHOLD (15 * 1024) // at or above this use "large"
330 #if (LARGE_THRESHOLD > NUM_SMALL_SLOTS * SMALL_QUANTUM)
331 #error LARGE_THRESHOLD should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM
334 #define VM_COPY_THRESHOLD (40 * 1024)
335 // When all memory is touched after a copy, vm_copy() is always a lose
336 // But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages (on a G3/300MHz)
337 // This must be larger than LARGE_THRESHOLD
340 * Given a large_entry, return the address of the allocated block.
342 #define LARGE_ENTRY_ADDRESS(entry) \
343 (void *)(((entry).address_and_num_pages >> vm_page_shift) << vm_page_shift)
346 * Given a large entry, return the number of pages or bytes in the allocated block.
348 #define LARGE_ENTRY_NUM_PAGES(entry) \
349 ((entry).address_and_num_pages & (vm_page_size - 1))
350 #define LARGE_ENTRY_SIZE(entry) \
351 (LARGE_ENTRY_NUM_PAGES(entry) << vm_page_shift)
354 * Compare a pointer with a large entry.
356 #define LARGE_ENTRY_MATCHES(entry,ptr) \
357 ((((entry).address_and_num_pages - (uintptr_t)(ptr)) >> vm_page_shift) == 0)
359 #define LARGE_ENTRY_IS_EMPTY(entry) (((entry).address_and_num_pages) == 0)
361 typedef compact_range_t large_entry_t
;
363 /********************* DEFINITIONS for huge ************************/
365 typedef vm_range_t huge_entry_t
;
367 /********************* zone itself ************************/
370 malloc_zone_t basic_zone
;
372 unsigned debug_flags
;
375 /* Regions for tiny objects */
376 unsigned num_tiny_regions
;
377 tiny_region_t
*tiny_regions
;
378 void *last_tiny_free
; // low SHIFT_TINY_QUANTUM indicate the msize
379 unsigned tiny_bitmap
; // cache of the 32 free lists
380 free_list_t
*tiny_free_list
[NUM_TINY_SLOTS
]; // 31 free lists for 1*TINY_QUANTUM to 31*TINY_QUANTUM plus 1 for larger than 32*SMALL_QUANTUM
381 size_t tiny_bytes_free_at_end
; // the last free region in the last block is treated as a big block in use that is not accounted for
382 unsigned num_tiny_objects
;
383 size_t num_bytes_in_tiny_objects
;
385 /* Regions for small objects */
386 unsigned num_small_regions
;
387 small_region_t
*small_regions
;
388 void *last_small_free
; // low SHIFT_SMALL_QUANTUM indicate the msize
389 unsigned small_bitmap
; // cache of the free list
390 free_list_t
*small_free_list
[NUM_SMALL_SLOTS
];
391 size_t small_bytes_free_at_end
; // the last free region in the last block is treated as a big block in use that is not accounted for
392 unsigned num_small_objects
;
393 size_t num_bytes_in_small_objects
;
395 /* large objects: vm_page_shift <= log2(size) < 2 *vm_page_shift */
396 unsigned num_large_objects_in_use
;
397 unsigned num_large_entries
;
398 large_entry_t
*large_entries
; // hashed by location; null entries don't count
399 size_t num_bytes_in_large_objects
;
401 /* huge objects: log2(size) >= 2 *vm_page_shift */
402 unsigned num_huge_entries
;
403 huge_entry_t
*huge_entries
;
404 size_t num_bytes_in_huge_objects
;
406 /* Initial region list */
407 tiny_region_t initial_tiny_regions
[INITIAL_NUM_TINY_REGIONS
];
408 small_region_t initial_small_regions
[INITIAL_NUM_SMALL_REGIONS
];
411 #if DEBUG_MALLOC || DEBUG_CLIENT
412 static void szone_sleep(void);
414 static void szone_error(szone_t
*szone
, const char *msg
, const void *ptr
);
415 static void protect(szone_t
*szone
, void *address
, size_t size
, unsigned protection
, unsigned debug_flags
);
416 static void *allocate_pages(szone_t
*szone
, size_t size
, unsigned char align
, unsigned debug_flags
, int vm_page_label
);
417 static void deallocate_pages(szone_t
*szone
, void *addr
, size_t size
, unsigned debug_flags
);
418 static kern_return_t
_szone_default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
);
420 static INLINE
void free_list_checksum(szone_t
*szone
, free_list_t
*ptr
, const char *msg
);
421 static INLINE
void free_list_set_checksum(szone_t
*szone
, free_list_t
*ptr
);
422 static unsigned free_list_count(const free_list_t
*ptr
);
424 static INLINE msize_t
get_tiny_meta_header(const void *ptr
, boolean_t
*is_free
);
425 static INLINE
void set_tiny_meta_header_in_use(const void *ptr
, msize_t msize
);
426 static INLINE
void set_tiny_meta_header_middle(const void *ptr
);
427 static INLINE
void set_tiny_meta_header_free(const void *ptr
, msize_t msize
);
428 static INLINE boolean_t
tiny_meta_header_is_free(const void *ptr
);
429 static INLINE
void *tiny_previous_preceding_free(void *ptr
, msize_t
*prev_msize
);
430 static INLINE
void tiny_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
);
431 static INLINE
void tiny_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
);
432 static INLINE tiny_region_t
*tiny_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
);
433 static INLINE
void tiny_free_no_lock(szone_t
*szone
, tiny_region_t
*region
, void *ptr
, msize_t msize
);
434 static void *tiny_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
);
435 static INLINE boolean_t
try_realloc_tiny_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
);
436 static boolean_t
tiny_check_region(szone_t
*szone
, tiny_region_t
*region
);
437 static kern_return_t
tiny_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t tiny_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
);
438 static INLINE
void *tiny_malloc_from_free_list(szone_t
*szone
, msize_t msize
);
439 static INLINE
void *tiny_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
);
440 static INLINE
void free_tiny(szone_t
*szone
, void *ptr
, tiny_region_t
*tiny_region
);
441 static void print_tiny_free_list(szone_t
*szone
);
442 static void print_tiny_region(boolean_t verbose
, tiny_region_t region
, size_t bytes_at_end
);
443 static boolean_t
tiny_free_list_check(szone_t
*szone
, grain_t slot
);
445 static INLINE
void small_meta_header_set_is_free(msize_t
*meta_headers
, unsigned index
, msize_t msize
);
446 static INLINE
void small_meta_header_set_in_use(msize_t
*meta_headers
, msize_t index
, msize_t msize
);
447 static INLINE
void small_meta_header_set_middle(msize_t
*meta_headers
, msize_t index
);
448 static void small_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
);
449 static void small_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
);
450 static INLINE small_region_t
*small_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
);
451 static INLINE
void small_free_no_lock(szone_t
*szone
, small_region_t
*region
, void *ptr
, msize_t msize
);
452 static void *small_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
);
453 static INLINE boolean_t
try_realloc_small_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
);
454 static boolean_t
szone_check_small_region(szone_t
*szone
, small_region_t
*region
);
455 static kern_return_t
small_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t small_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
);
456 static INLINE
void *small_malloc_from_free_list(szone_t
*szone
, msize_t msize
);
457 static INLINE
void *small_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
);
458 static INLINE
void *small_malloc_cleared_no_lock(szone_t
*szone
, msize_t msize
);
459 static INLINE
void free_small(szone_t
*szone
, void *ptr
, small_region_t
*small_region
);
460 static void print_small_free_list(szone_t
*szone
);
461 static void print_small_region(szone_t
*szone
, boolean_t verbose
, small_region_t
*region
, size_t bytes_at_end
);
462 static boolean_t
small_free_list_check(szone_t
*szone
, grain_t grain
);
465 static void large_debug_print(szone_t
*szone
);
467 static large_entry_t
*large_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
);
468 static void large_entry_insert_no_lock(szone_t
*szone
, large_entry_t range
);
469 static INLINE
void large_entries_rehash_after_entry_no_lock(szone_t
*szone
, large_entry_t
*entry
);
470 static INLINE large_entry_t
*large_entries_alloc_no_lock(szone_t
*szone
, unsigned num
);
471 static void large_entries_free_no_lock(szone_t
*szone
, large_entry_t
*entries
, unsigned num
, vm_range_t
*range_to_deallocate
);
472 static void large_entries_grow_no_lock(szone_t
*szone
, vm_range_t
*range_to_deallocate
);
473 static vm_range_t
large_free_no_lock(szone_t
*szone
, large_entry_t
*entry
);
474 static kern_return_t
large_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t large_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
);
475 static huge_entry_t
*huge_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
);
476 static boolean_t
huge_entry_append(szone_t
*szone
, huge_entry_t huge
);
477 static kern_return_t
huge_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t huge_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
);
478 static void *large_and_huge_malloc(szone_t
*szone
, unsigned num_pages
);
479 static INLINE
void free_large_or_huge(szone_t
*szone
, void *ptr
);
480 static INLINE
int try_realloc_large_or_huge_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
);
482 static void szone_free(szone_t
*szone
, void *ptr
);
483 static INLINE
void *szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
);
484 static void *szone_malloc(szone_t
*szone
, size_t size
);
485 static void *szone_calloc(szone_t
*szone
, size_t num_items
, size_t size
);
486 static void *szone_valloc(szone_t
*szone
, size_t size
);
487 static size_t szone_size(szone_t
*szone
, const void *ptr
);
488 static void *szone_realloc(szone_t
*szone
, void *ptr
, size_t new_size
);
489 static unsigned szone_batch_malloc(szone_t
*szone
, size_t size
, void **results
, unsigned count
);
490 static void szone_batch_free(szone_t
*szone
, void **to_be_freed
, unsigned count
);
491 static void szone_destroy(szone_t
*szone
);
492 static size_t szone_good_size(szone_t
*szone
, size_t size
);
494 static boolean_t
szone_check_all(szone_t
*szone
, const char *function
);
495 static boolean_t
szone_check(szone_t
*szone
);
496 static kern_return_t
szone_ptr_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t zone_address
, memory_reader_t reader
, vm_range_recorder_t recorder
);
497 static void szone_print(szone_t
*szone
, boolean_t verbose
);
498 static void szone_log(malloc_zone_t
*zone
, void *log_address
);
499 static void szone_force_lock(szone_t
*szone
);
500 static void szone_force_unlock(szone_t
*szone
);
502 static void szone_statistics(szone_t
*szone
, malloc_statistics_t
*stats
);
504 static void *frozen_malloc(szone_t
*zone
, size_t new_size
);
505 static void *frozen_calloc(szone_t
*zone
, size_t num_items
, size_t size
);
506 static void *frozen_valloc(szone_t
*zone
, size_t new_size
);
507 static void *frozen_realloc(szone_t
*zone
, void *ptr
, size_t new_size
);
508 static void frozen_free(szone_t
*zone
, void *ptr
);
509 static void frozen_destroy(szone_t
*zone
);
512 # define LOG(szone,ptr) \
513 (szone->log_address && (((uintptr_t)szone->log_address == -1) || (szone->log_address == (void *)(ptr))))
515 # define LOG(szone,ptr) 0
518 #define SZONE_LOCK(szone) \
523 #define SZONE_UNLOCK(szone) \
525 UNLOCK(szone->lock); \
528 #define LOCK_AND_NOTE_LOCKED(szone,locked) \
530 CHECK(szone, __PRETTY_FUNCTION__); \
531 locked = 1; SZONE_LOCK(szone); \
534 #if DEBUG_MALLOC || DEBUG_CLIENT
535 # define CHECK(szone,fun) \
536 if ((szone)->debug_flags & CHECK_REGIONS) szone_check_all(szone, fun)
538 # define CHECK(szone,fun) do {} while (0)
541 /********************* VERY LOW LEVEL UTILITIES ************************/
543 #if DEBUG_MALLOC || DEBUG_CLIENT
548 if (getenv("MallocErrorSleep")) {
549 malloc_printf("*** sleeping to help debug\n");
550 sleep(3600); // to help debug
556 szone_error(szone_t
*szone
, const char *msg
, const void *ptr
)
559 if (szone
) SZONE_UNLOCK(szone
);
561 malloc_printf("*** error for object %p: %s\n", ptr
, msg
);
563 malloc_printf("*** error: %s\n", msg
);
566 szone_print(szone
, 1);
575 protect(szone_t
*szone
, void *address
, size_t size
, unsigned protection
, unsigned debug_flags
)
579 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_PRELUDE
)) {
580 err
= vm_protect(mach_task_self(), (vm_address_t
)(uintptr_t)address
- vm_page_size
, vm_page_size
, 0, protection
);
582 malloc_printf("*** can't protect(%p) region for prelude guard page at %p\n",
583 protection
,address
- (1 << vm_page_shift
));
586 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE
)) {
587 err
= vm_protect(mach_task_self(), (vm_address_t
)(uintptr_t)address
+ size
, vm_page_size
, 0, protection
);
589 malloc_printf("*** can't protect(%p) region for postlude guard page at %p\n",
590 protection
, address
+ size
);
596 allocate_pages(szone_t
*szone
, size_t size
, unsigned char align
, unsigned debug_flags
, int vm_page_label
)
598 // align specifies a desired alignment (as a log) or 0 if no alignment requested
600 vm_address_t vm_addr
;
601 uintptr_t addr
, aligned_address
;
602 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
603 size_t allocation_size
= round_page(size
);
606 if (align
) add_guard_pages
= 0; // too cumbersome to deal with that
607 if (!allocation_size
) allocation_size
= 1 << vm_page_shift
;
608 if (add_guard_pages
) allocation_size
+= 2 * (1 << vm_page_shift
);
609 if (align
) allocation_size
+= (size_t)1 << align
;
610 err
= vm_allocate(mach_task_self(), &vm_addr
, allocation_size
, vm_page_label
| 1);
612 malloc_printf("*** vm_allocate(size=%lld) failed (error code=%d)\n", (long long)size
, err
);
613 szone_error(szone
, "can't allocate region", NULL
);
616 addr
= (uintptr_t)vm_addr
;
618 aligned_address
= (addr
+ ((uintptr_t)1 << align
) - 1) & ~ (((uintptr_t)1 << align
) - 1);
619 if (aligned_address
!= addr
) {
620 delta
= aligned_address
- addr
;
621 err
= vm_deallocate(mach_task_self(), (vm_address_t
)addr
, delta
);
623 malloc_printf("*** freeing unaligned header failed with %d\n", err
);
624 addr
= aligned_address
;
625 allocation_size
-= delta
;
627 if (allocation_size
> size
) {
628 err
= vm_deallocate(mach_task_self(), (vm_address_t
)addr
+ size
, allocation_size
- size
);
630 malloc_printf("*** freeing unaligned footer failed with %d\n", err
);
633 if (add_guard_pages
) {
634 addr
+= (uintptr_t)1 << vm_page_shift
;
635 protect(szone
, (void *)addr
, size
, 0, debug_flags
);
641 deallocate_pages(szone_t
*szone
, void *addr
, size_t size
, unsigned debug_flags
)
644 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
646 if (add_guard_pages
) {
647 addr
-= 1 << vm_page_shift
;
648 size
+= 2 * (1 << vm_page_shift
);
650 err
= vm_deallocate(mach_task_self(), (vm_address_t
)addr
, size
);
652 szone_error(szone
, "Can't deallocate_pages region", addr
);
656 _szone_default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
)
658 *ptr
= (void *)address
;
663 free_list_checksum(szone_t
*szone
, free_list_t
*ptr
, const char *msg
)
665 // We always checksum, as testing whether to do it (based on szone->debug_flags) is as fast as
667 // XXX not necessarily true for LP64 case
668 if (ptr
->checksum
!= (((uintptr_t)ptr
->previous
) ^ ((uintptr_t)ptr
->next
) ^ CHECKSUM_MAGIC
)) {
670 malloc_printf("*** incorrect checksum: %s\n", msg
);
672 szone_error(szone
, "incorrect checksum for freed object "
673 "- object was probably modified after being freed, break at szone_error to debug", ptr
);
678 free_list_set_checksum(szone_t
*szone
, free_list_t
*ptr
)
680 // We always set checksum, as testing whether to do it (based on
681 // szone->debug_flags) is slower than just doing it
682 // XXX not necessarily true for LP64 case
683 ptr
->checksum
= ((uintptr_t)ptr
->previous
) ^ ((uintptr_t)ptr
->next
) ^ CHECKSUM_MAGIC
;
687 free_list_count(const free_list_t
*ptr
)
698 /* XXX inconsistent use of BITMAP32 and BITARRAY operations could be cleaned up */
700 #define BITMAP32_SET(bitmap,bit) (bitmap |= 1 << (bit))
701 #define BITMAP32_CLR(bitmap,bit) (bitmap &= ~ (1 << (bit)))
702 #define BITMAP32_BIT(bitmap,bit) ((bitmap >> (bit)) & 1)
704 #define BITMAP32_FFS(bitmap) (ffs(bitmap))
705 // returns bit # of first bit that's one, starting at 1 (returns 0 if !bitmap)
707 /********************* TINY FREE LIST UTILITIES ************************/
709 // We encode the meta-headers as follows:
710 // Each quantum has an associated set of 2 bits:
711 // block_header when 1 says this block is the beginning of a block
712 // in_use when 1 says this block is in use
713 // so a block in use of size 3 is 1-1 0-X 0-X
714 // for a free block TINY_FREE_SIZE(ptr) carries the size and the bits are 1-0 X-X X-X
715 // for a block middle the bits are 0-0
717 // Attention double evaluation for these
718 #define BITARRAY_SET(bits,index) (bits[index>>3] |= (1 << (index & 7)))
719 #define BITARRAY_CLR(bits,index) (bits[index>>3] &= ~(1 << (index & 7)))
720 #define BITARRAY_BIT(bits,index) (((bits[index>>3]) >> (index & 7)) & 1)
722 // Following is for start<8 and end<=start+32
723 #define BITARRAY_MCLR_LESS_32(bits,start,end) \
725 unsigned char *_bits = (bits); \
726 unsigned _end = (end); \
727 switch (_end >> 3) { \
728 case 4: _bits[4] &= ~ ((1 << (_end - 32)) - 1); _end = 32; \
729 case 3: _bits[3] &= ~ ((1 << (_end - 24)) - 1); _end = 24; \
730 case 2: _bits[2] &= ~ ((1 << (_end - 16)) - 1); _end = 16; \
731 case 1: _bits[1] &= ~ ((1 << (_end - 8)) - 1); _end = 8; \
732 case 0: _bits[0] &= ~ ((1 << _end) - (1 << (start))); \
736 #if 0 // Simple but slow version
737 #warning Slow version in effect
738 #define BITARRAY_MCLR(bits,index,num) \
740 unsigned _ctr = (num); \
741 unsigned _cur = (index); \
743 while (_ctr--) {BITARRAY_CLR(bits,_cur); _cur++; } \
747 // Following is for num <= 32
748 #define BITARRAY_MCLR(bits,index,num) \
750 unsigned _index = (index); \
751 unsigned char *_rebased = (bits) + (_index >> 3); \
754 BITARRAY_MCLR_LESS_32(_rebased, _index, _index + (num)); \
758 static INLINE msize_t
759 get_tiny_meta_header(const void *ptr
, boolean_t
*is_free
)
761 // returns msize and is_free
762 // may return 0 for the msize component (meaning 65536)
763 unsigned char *block_header
;
764 unsigned char *in_use
;
768 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
769 index
= TINY_INDEX_FOR_PTR(ptr
);
770 byte_index
= index
>> 3;
772 block_header
+= byte_index
;
775 if (!BITMAP32_BIT(*block_header
, index
))
777 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
778 if (!BITMAP32_BIT(*in_use
, index
)) {
780 return TINY_FREE_SIZE(ptr
);
782 uint32_t *addr
= (uint32_t *)((uintptr_t)block_header
& ~3);
783 uint32_t word0
= OSReadLittleInt32(addr
, 0) >> index
;
784 uint32_t word1
= OSReadLittleInt32(addr
, 4) << (8 - index
);
785 uint32_t bits
= (((uintptr_t)block_header
& 3) * 8); // precision loss on LP64 OK here
786 uint32_t word
= (word0
>> bits
) | (word1
<< (24 - bits
));
787 uint32_t result
= ffs(word
>> 1);
792 set_tiny_meta_header_in_use(const void *ptr
, msize_t msize
)
794 unsigned char *block_header
;
795 unsigned char *in_use
;
801 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
802 index
= TINY_INDEX_FOR_PTR(ptr
);
803 byte_index
= index
>> 3;
807 malloc_printf("set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr
, msize
);
808 if ((unsigned)index
+ (unsigned)msize
> 0x10000)
809 malloc_printf("set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr
, msize
);
811 block_header
+= byte_index
;
813 BITMAP32_SET(*block_header
, index
);
814 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
815 BITMAP32_SET(*in_use
, index
);
819 byte_index
= index
>> 3;
820 block_header
+= byte_index
; in_use
+= byte_index
;
822 end_bit
= index
+ clr_msize
;
823 BITARRAY_MCLR_LESS_32(block_header
, index
, end_bit
);
824 BITARRAY_MCLR_LESS_32(in_use
, index
, end_bit
);
826 BITARRAY_SET(block_header
, index
+clr_msize
); // we set the block_header bit for the following block to reaffirm next block is a block
832 mf
= get_tiny_meta_header(ptr
, &ff
);
834 malloc_printf("setting header for tiny in_use %p : %d\n", ptr
, msize
);
835 malloc_printf("reading header for tiny %p : %d %d\n", ptr
, mf
, ff
);
842 set_tiny_meta_header_middle(const void *ptr
)
844 // indicates this block is in the middle of an in use block
845 unsigned char *block_header
;
846 unsigned char *in_use
;
849 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
850 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
851 index
= TINY_INDEX_FOR_PTR(ptr
);
853 BITARRAY_CLR(block_header
, index
);
854 BITARRAY_CLR(in_use
, index
);
855 TINY_FREE_SIZE(ptr
) = 0;
859 set_tiny_meta_header_free(const void *ptr
, msize_t msize
)
861 // !msize is acceptable and means 65536
862 unsigned char *block_header
;
863 unsigned char *in_use
;
866 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
867 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
868 index
= TINY_INDEX_FOR_PTR(ptr
);
871 if ((unsigned)index
+ (unsigned)msize
> 0x10000) {
872 malloc_printf("setting header for tiny free %p msize too large: %d\n", ptr
, msize
);
875 BITARRAY_SET(block_header
, index
); BITARRAY_CLR(in_use
, index
);
876 TINY_FREE_SIZE(ptr
) = msize
;
877 // mark the end of this block
878 if (msize
) { // msize==0 => the whole region is free
879 void *follower
= FOLLOWING_TINY_PTR(ptr
, msize
);
880 TINY_PREVIOUS_MSIZE(follower
) = msize
;
884 msize_t mf
= get_tiny_meta_header(ptr
, &ff
);
885 if ((msize
!= mf
) || !ff
) {
886 malloc_printf("setting header for tiny free %p : %d\n", ptr
, msize
);
887 malloc_printf("reading header for tiny %p : %d %d\n", ptr
, mf
, ff
);
892 static INLINE boolean_t
893 tiny_meta_header_is_free(const void *ptr
)
895 // returns msize and is_free shifted by 16
896 // may return 0 for the msize component (meaning 65536)
897 unsigned char *block_header
;
898 unsigned char *in_use
;
901 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
902 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
903 index
= TINY_INDEX_FOR_PTR(ptr
);
904 if (!BITARRAY_BIT(block_header
, index
))
906 return !BITARRAY_BIT(in_use
, index
);
910 tiny_previous_preceding_free(void *ptr
, msize_t
*prev_msize
)
912 // returns the previous block, assuming and verifying it's free
913 unsigned char *block_header
;
914 unsigned char *in_use
;
916 msize_t previous_msize
;
917 msize_t previous_index
;
920 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
921 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
922 index
= TINY_INDEX_FOR_PTR(ptr
);
926 if ((previous_msize
= TINY_PREVIOUS_MSIZE(ptr
)) > index
)
929 previous_index
= index
- previous_msize
;
930 previous_ptr
= (void *)(TINY_REGION_FOR_PTR(ptr
) + TINY_BYTES_FOR_MSIZE(previous_index
));
931 if (TINY_FREE_SIZE(previous_ptr
) != previous_msize
)
934 if (!BITARRAY_BIT(block_header
, previous_index
))
936 if (BITARRAY_BIT(in_use
, previous_index
))
939 // conservative check did match true check
940 *prev_msize
= previous_msize
;
945 tiny_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
)
947 // Adds an item to the proper free list
948 // Also marks the meta-header of the block properly
949 // Assumes szone has been locked
950 grain_t slot
= (!msize
|| (msize
>= NUM_TINY_SLOTS
)) ? NUM_TINY_SLOTS
- 1 : msize
- 1;
951 free_list_t
*free_ptr
= ptr
;
952 free_list_t
*free_head
= szone
->tiny_free_list
[slot
];
955 if (LOG(szone
,ptr
)) {
956 malloc_printf("in tiny_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
958 if (((unsigned)ptr
) & (TINY_QUANTUM
- 1)) {
959 szone_error(szone
, "tiny_free_list_add_ptr: Unaligned ptr", ptr
);
962 set_tiny_meta_header_free(ptr
, msize
);
964 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
966 if (free_head
->previous
) {
967 malloc_printf("ptr=%p slot=%d free_head=%p previous=%p\n", ptr
, slot
, free_head
, free_head
->previous
);
968 szone_error(szone
, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr
);
970 if (! tiny_meta_header_is_free(free_head
)) {
971 malloc_printf("ptr=%p slot=%d free_head=%p\n", ptr
, slot
, free_head
);
972 szone_error(szone
, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr
);
975 free_head
->previous
= free_ptr
;
976 free_list_set_checksum(szone
, free_head
);
978 BITMAP32_SET(szone
->tiny_bitmap
, slot
);
980 free_ptr
->previous
= NULL
;
981 free_ptr
->next
= free_head
;
982 free_list_set_checksum(szone
, free_ptr
);
983 szone
->tiny_free_list
[slot
] = free_ptr
;
987 tiny_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
)
989 // Removes item in the proper free list
990 // msize could be read, but all callers have it so we pass it in
991 // Assumes szone has been locked
992 grain_t slot
= (!msize
|| (msize
>= NUM_TINY_SLOTS
)) ? NUM_TINY_SLOTS
- 1 : msize
- 1;
993 free_list_t
*free_ptr
= ptr
;
994 free_list_t
*next
= free_ptr
->next
;
995 free_list_t
*previous
= free_ptr
->previous
;
998 if (LOG(szone
,ptr
)) {
999 malloc_printf("In tiny_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
1002 free_list_checksum(szone
, free_ptr
, __PRETTY_FUNCTION__
);
1004 // The block to remove is the head of the free list
1006 if (szone
->tiny_free_list
[slot
] != ptr
) {
1007 malloc_printf("ptr=%p slot=%d msize=%d szone->tiny_free_list[slot]=%p\n",
1008 ptr
, slot
, msize
, szone
->tiny_free_list
[slot
]);
1009 szone_error(szone
, "tiny_free_list_remove_ptr: Internal invariant broken (szone->tiny_free_list[slot])", ptr
);
1013 szone
->tiny_free_list
[slot
] = next
;
1014 if (!next
) BITMAP32_CLR(szone
->tiny_bitmap
, slot
);
1016 previous
->next
= next
;
1017 free_list_set_checksum(szone
, previous
);
1020 next
->previous
= previous
;
1021 free_list_set_checksum(szone
, next
);
1026 * Find the tiny region containing (ptr) (if any).
1028 * We take advantage of the knowledge that tiny regions are always (1 << TINY_BLOCKS_ALIGN) aligned.
1030 static INLINE tiny_region_t
*
1031 tiny_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
)
1033 tiny_region_t
*region
;
1034 tiny_region_t rbase
;
1037 /* mask off irrelevant lower bits */
1038 rbase
= TINY_REGION_FOR_PTR(ptr
);
1039 /* iterate over allocated regions - XXX not terribly efficient for large number of regions */
1040 for (i
= szone
->num_tiny_regions
, region
= szone
->tiny_regions
; i
> 0; i
--, region
++)
1041 if (rbase
== *region
)
1047 tiny_free_no_lock(szone_t
*szone
, tiny_region_t
*region
, void *ptr
, msize_t msize
)
1049 size_t original_size
= TINY_BYTES_FOR_MSIZE(msize
);
1050 void *next_block
= ((char *)ptr
+ original_size
);
1051 msize_t previous_msize
;
1054 free_list_t
*big_free_block
;
1055 free_list_t
*after_next_block
;
1056 free_list_t
*before_next_block
;
1059 if (LOG(szone
,ptr
)) {
1060 malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1063 malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1064 szone_error(szone
, "trying to free tiny block that is too small", ptr
);
1067 // We try to coalesce this block with the preceeding one
1068 previous
= tiny_previous_preceding_free(ptr
, &previous_msize
);
1071 if (LOG(szone
, ptr
) || LOG(szone
,previous
)) {
1072 malloc_printf("in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr
, previous
);
1075 tiny_free_list_remove_ptr(szone
, previous
, previous_msize
);
1077 msize
+= previous_msize
;
1079 // We try to coalesce with the next block
1080 if ((next_block
< TINY_REGION_END(*region
)) && tiny_meta_header_is_free(next_block
)) {
1081 // The next block is free, we coalesce
1082 next_msize
= TINY_FREE_SIZE(next_block
);
1084 if (LOG(szone
, ptr
) || LOG(szone
, next_block
)) {
1085 malloc_printf("in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n",
1086 ptr
, msize
, next_block
, next_msize
);
1089 if (next_msize
>= NUM_TINY_SLOTS
) {
1090 // we take a short cut here to avoid removing next_block from the slot 31 freelist and then adding ptr back to slot 31
1091 msize
+= next_msize
;
1092 big_free_block
= (free_list_t
*)next_block
;
1093 after_next_block
= big_free_block
->next
;
1094 before_next_block
= big_free_block
->previous
;
1095 free_list_checksum(szone
, big_free_block
, __PRETTY_FUNCTION__
);
1096 if (!before_next_block
) {
1097 szone
->tiny_free_list
[NUM_TINY_SLOTS
-1] = ptr
;
1099 before_next_block
->next
= ptr
;
1100 free_list_set_checksum(szone
, before_next_block
);
1102 if (after_next_block
) {
1103 after_next_block
->previous
= ptr
;
1104 free_list_set_checksum(szone
, after_next_block
);
1106 ((free_list_t
*)ptr
)->previous
= before_next_block
;
1107 ((free_list_t
*)ptr
)->next
= after_next_block
;
1108 free_list_set_checksum(szone
, ptr
);
1109 set_tiny_meta_header_free(ptr
, msize
);
1110 set_tiny_meta_header_middle(big_free_block
); // clear the meta_header to enable coalescing backwards
1111 goto tiny_free_ending
;
1113 tiny_free_list_remove_ptr(szone
, next_block
, next_msize
);
1114 set_tiny_meta_header_middle(next_block
); // clear the meta_header to enable coalescing backwards
1115 msize
+= next_msize
;
1117 if ((szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) && msize
) {
1118 memset(ptr
, 0x55, TINY_BYTES_FOR_MSIZE(msize
));
1120 tiny_free_list_add_ptr(szone
, ptr
, msize
);
1122 // When in proper debug mode we write on the memory to help debug memory smashers
1123 szone
->num_tiny_objects
--;
1124 szone
->num_bytes_in_tiny_objects
-= original_size
; // we use original_size and not msize to avoid double counting the coalesced blocks
1128 tiny_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
)
1130 tiny_region_t last_region
, *new_regions
;
1131 void *last_block
, *ptr
, *aligned_address
;
1133 // Allocates from the last region or a freshly allocated region
1134 // Before anything we transform the tiny_bytes_free_at_end - if any - to a regular free block
1135 if (szone
->tiny_bytes_free_at_end
) {
1136 last_region
= szone
->tiny_regions
[szone
->num_tiny_regions
-1];
1137 last_block
= TINY_REGION_END(last_region
) - szone
->tiny_bytes_free_at_end
;
1138 tiny_free_list_add_ptr(szone
, last_block
, TINY_MSIZE_FOR_BYTES(szone
->tiny_bytes_free_at_end
));
1139 szone
->tiny_bytes_free_at_end
= 0;
1141 // time to create a new region
1142 aligned_address
= allocate_pages(szone
, TINY_REGION_SIZE
, TINY_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_TINY
));
1143 if (!aligned_address
) {
1147 // We set the padding after block_header to be all 1
1148 ((uint32_t *)(aligned_address
+ (1 << TINY_BLOCKS_ALIGN
) + (NUM_TINY_BLOCKS
>> 3)))[0] = ~0;
1149 if (szone
->num_tiny_regions
== INITIAL_NUM_TINY_REGIONS
) {
1150 // XXX logic here fails after initial reallocation of tiny regions is exhausted (approx 4GB of
1151 // tiny allocations)
1152 new_regions
= small_malloc_from_region_no_lock(szone
, 16); // 16 * 512 bytes is plenty of tiny regions (more than 4,000)
1153 if (!new_regions
) return NULL
;
1154 memcpy(new_regions
, szone
->tiny_regions
, INITIAL_NUM_TINY_REGIONS
* sizeof(tiny_region_t
));
1155 szone
->tiny_regions
= new_regions
; // we set the pointer after it's all ready to enable enumeration from another thread without locking
1157 szone
->tiny_regions
[szone
->num_tiny_regions
] = aligned_address
;
1158 szone
->num_tiny_regions
++; // we set the number after the pointer is all ready to enable enumeration from another thread without taking the lock
1159 ptr
= aligned_address
;
1160 set_tiny_meta_header_in_use(ptr
, msize
);
1161 szone
->num_tiny_objects
++;
1162 szone
->num_bytes_in_tiny_objects
+= TINY_BYTES_FOR_MSIZE(msize
);
1163 // We put a header on the last block so that it appears in use (for coalescing, etc...)
1164 set_tiny_meta_header_in_use(ptr
+ TINY_BYTES_FOR_MSIZE(msize
), 1);
1165 szone
->tiny_bytes_free_at_end
= TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS
- msize
);
1167 if (LOG(szone
,ptr
)) {
1168 malloc_printf("in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1174 static INLINE boolean_t
1175 try_realloc_tiny_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
)
1177 // returns 1 on success
1180 unsigned next_index
;
1183 msize_t next_msize
, coalesced_msize
, leftover_msize
;
1186 index
= TINY_INDEX_FOR_PTR(ptr
);
1187 old_msize
= TINY_MSIZE_FOR_BYTES(old_size
);
1188 next_index
= index
+ old_msize
;
1190 if (next_index
>= NUM_TINY_BLOCKS
) {
1193 next_block
= (char *)ptr
+ old_size
;
1195 is_free
= tiny_meta_header_is_free(next_block
);
1197 SZONE_UNLOCK(szone
);
1198 return 0; // next_block is in use;
1200 next_msize
= TINY_FREE_SIZE(next_block
);
1201 if (old_size
+ TINY_MSIZE_FOR_BYTES(next_msize
) < new_size
) {
1202 SZONE_UNLOCK(szone
);
1203 return 0; // even with next block, not enough
1205 tiny_free_list_remove_ptr(szone
, next_block
, next_msize
);
1206 set_tiny_meta_header_middle(next_block
); // clear the meta_header to enable coalescing backwards
1207 coalesced_msize
= TINY_MSIZE_FOR_BYTES(new_size
- old_size
+ TINY_QUANTUM
- 1);
1208 leftover_msize
= next_msize
- coalesced_msize
;
1209 if (leftover_msize
) {
1210 leftover
= next_block
+ TINY_BYTES_FOR_MSIZE(coalesced_msize
);
1211 tiny_free_list_add_ptr(szone
, leftover
, leftover_msize
);
1213 set_tiny_meta_header_in_use(ptr
, old_msize
+ coalesced_msize
);
1215 if (LOG(szone
,ptr
)) {
1216 malloc_printf("in try_realloc_tiny_in_place(), ptr=%p, msize=%d\n", ptr
, old_msize
+ coalesced_msize
);
1219 szone
->num_bytes_in_tiny_objects
+= TINY_BYTES_FOR_MSIZE(coalesced_msize
);
1220 SZONE_UNLOCK(szone
);
1221 CHECK(szone
, __PRETTY_FUNCTION__
);
1226 tiny_check_region(szone_t
*szone
, tiny_region_t
*region
)
1228 uintptr_t start
, ptr
, region_end
, follower
;
1229 boolean_t prev_free
= 0;
1232 free_list_t
*free_head
;
1234 /* establish region limits */
1235 start
= (uintptr_t)TINY_REGION_ADDRESS(*region
);
1237 region_end
= (uintptr_t)TINY_REGION_END(*region
);
1240 * The last region may have a trailing chunk which has not been converted into inuse/freelist
1243 if (region
== szone
->tiny_regions
+ szone
->num_tiny_regions
- 1)
1244 region_end
-= szone
->tiny_bytes_free_at_end
;
1248 * Scan blocks within the region.
1250 while (ptr
< region_end
) {
1252 * If the first block is free, and its size is 65536 (msize = 0) then the entire region is
1255 msize
= get_tiny_meta_header((void *)ptr
, &is_free
);
1256 if (is_free
&& !msize
&& (ptr
== start
)) {
1261 * If the block's size is 65536 (msize = 0) then since we're not the first entry the size is
1265 malloc_printf("*** invariant broken for tiny block %p this msize=%d - size is too small\n",
1272 * In use blocks cannot be more than 31 quanta large.
1275 if (msize
> 31 * TINY_QUANTUM
) {
1276 malloc_printf("*** invariant broken for %p this tiny msize=%d[%p] - size is too large\n",
1280 /* move to next block */
1281 ptr
+= TINY_BYTES_FOR_MSIZE(msize
);
1284 * Free blocks must have been coalesced, we cannot have a free block following another
1288 malloc_printf("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n",
1294 * Check the integrity of this block's entry in its freelist.
1296 free_head
= (free_list_t
*)ptr
;
1297 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
1298 if (free_head
->previous
&& !tiny_meta_header_is_free(free_head
->previous
)) {
1299 malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n",
1300 ptr
, free_head
->previous
);
1303 if (free_head
->next
&& !tiny_meta_header_is_free(free_head
->next
)) {
1304 malloc_printf("*** invariant broken for %p (next in free list %p is not a free pointer)\n",
1305 ptr
, free_head
->next
);
1309 * Check the free block's trailing size value.
1311 follower
= (uintptr_t)FOLLOWING_TINY_PTR(ptr
, msize
);
1312 if ((follower
!= region_end
) && (TINY_PREVIOUS_MSIZE(follower
) != msize
)) {
1313 malloc_printf("*** invariant broken for tiny free %p followed by %p in region [%p-%p] "
1314 "(end marker incorrect) should be %d; in fact %d\n",
1315 ptr
, follower
, TINY_REGION_ADDRESS(*region
), region_end
, msize
, TINY_PREVIOUS_MSIZE(follower
));
1318 /* move to next block */
1323 * Ensure that we scanned the entire region
1325 if (ptr
!= region_end
) {
1326 malloc_printf("*** invariant broken for region end %p - %p\n", ptr
, region_end
);
1330 * Check the trailing block's integrity.
1332 if (region
== szone
->tiny_regions
+ szone
->num_tiny_regions
- 1) {
1333 if (szone
->tiny_bytes_free_at_end
) {
1334 msize
= get_tiny_meta_header((void *)ptr
, &is_free
);
1335 if (is_free
|| (msize
!= 1)) {
1336 malloc_printf("*** invariant broken for blocker block %p - %d %d\n", ptr
, msize
, is_free
);
1343 static kern_return_t
1344 tiny_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t tiny_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
)
1346 tiny_region_t
*regions
;
1348 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
1351 tiny_region_t region
;
1353 vm_range_t admin_range
;
1354 vm_range_t ptr_range
;
1355 unsigned char *mapped_region
;
1356 unsigned char *block_header
;
1357 unsigned char *in_use
;
1358 unsigned block_index
;
1359 unsigned block_limit
;
1365 err
= reader(task
, region_address
, sizeof(tiny_region_t
) * num_regions
, (void **)®ions
);
1366 if (err
) return err
;
1367 while (index
< num_regions
) {
1368 // unsigned num_in_use = 0;
1369 // unsigned num_free = 0;
1370 region
= regions
[index
];
1371 range
.address
= (vm_address_t
)TINY_REGION_ADDRESS(region
);
1372 range
.size
= (vm_size_t
)TINY_REGION_SIZE
;
1373 if (type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) {
1374 admin_range
.address
= range
.address
+ (1 << TINY_BLOCKS_ALIGN
);
1375 admin_range
.size
= range
.size
- (1 << TINY_BLOCKS_ALIGN
);
1376 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &admin_range
, 1);
1378 if (type_mask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
)) {
1379 ptr_range
.address
= range
.address
;
1380 ptr_range
.size
= 1 << TINY_BLOCKS_ALIGN
;
1381 recorder(task
, context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptr_range
, 1);
1383 if (type_mask
& MALLOC_PTR_IN_USE_RANGE_TYPE
) {
1384 err
= reader(task
, range
.address
, range
.size
, (void **)&mapped_region
);
1387 block_header
= (unsigned char *)(mapped_region
+ (1 << TINY_BLOCKS_ALIGN
));
1388 in_use
= block_header
+ (NUM_TINY_BLOCKS
>> 3) + 4;
1390 block_limit
= NUM_TINY_BLOCKS
;
1391 if (index
== num_regions
- 1)
1392 block_limit
-= TINY_MSIZE_FOR_BYTES(tiny_bytes_free_at_end
);
1393 while (block_index
< block_limit
) {
1394 is_free
= !BITARRAY_BIT(in_use
, block_index
);
1396 mapped_ptr
= mapped_region
+ TINY_BYTES_FOR_MSIZE(block_index
);
1397 msize
= TINY_FREE_SIZE(mapped_ptr
);
1402 bit
= block_index
+ 1;
1403 while (! BITARRAY_BIT(block_header
, bit
)) {
1407 buffer
[count
].address
= range
.address
+ TINY_BYTES_FOR_MSIZE(block_index
);
1408 buffer
[count
].size
= TINY_BYTES_FOR_MSIZE(msize
);
1410 if (count
>= MAX_RECORDER_BUFFER
) {
1411 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
1415 block_index
+= msize
;
1421 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
1426 static INLINE
void *
1427 tiny_malloc_from_free_list(szone_t
*szone
, msize_t msize
)
1429 // Assumes we've locked the region
1432 grain_t slot
= msize
- 1;
1433 free_list_t
**free_list
= szone
->tiny_free_list
;
1434 free_list_t
**the_slot
= free_list
+ slot
;
1436 free_list_t
**limit
;
1438 msize_t leftover_msize
;
1439 free_list_t
*leftover_ptr
;
1442 * Look for an exact match by checking the freelist for this msize.
1448 next
->previous
= NULL
;
1449 free_list_set_checksum(szone
, next
);
1454 if (LOG(szone
, ptr
)) {
1455 malloc_printf("in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr
, this_msize
);
1458 goto return_tiny_alloc
;
1462 * Iterate over freelists for larger blocks looking for the next-largest block.
1464 bitmap
= szone
->tiny_bitmap
& ~ ((1 << slot
) - 1);
1466 goto try_tiny_malloc_from_end
;
1467 slot
= BITMAP32_FFS(bitmap
) - 1;
1468 limit
= free_list
+ NUM_TINY_SLOTS
- 1;
1470 while (free_list
< limit
) {
1471 // try bigger grains
1476 next
->previous
= NULL
;
1477 free_list_set_checksum(szone
, next
);
1480 this_msize
= TINY_FREE_SIZE(ptr
);
1482 if (LOG(szone
, ptr
)) {
1483 malloc_printf("in tiny_malloc_from_free_list(), bigger grain ptr=%p, msize=%d this_msize=%d\n", ptr
, msize
, this_msize
);
1486 goto add_leftover_and_proceed
;
1490 // we are now looking at the last slot (31)
1493 this_msize
= TINY_FREE_SIZE(ptr
);
1495 if (this_msize
- msize
>= NUM_TINY_SLOTS
) {
1496 // the leftover will go back to the free list, so we optimize by modifying the free list rather than removing the head and then adding back
1497 leftover_msize
= this_msize
- msize
;
1498 leftover_ptr
= (free_list_t
*)((unsigned char *)ptr
+ TINY_BYTES_FOR_MSIZE(msize
));
1499 *limit
= leftover_ptr
;
1501 next
->previous
= leftover_ptr
;
1502 free_list_set_checksum(szone
, next
);
1504 leftover_ptr
->next
= next
;
1505 leftover_ptr
->previous
= NULL
;
1506 free_list_set_checksum(szone
, leftover_ptr
);
1507 set_tiny_meta_header_free(leftover_ptr
, leftover_msize
);
1509 if (LOG(szone
,ptr
)) {
1510 malloc_printf("in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr
, msize
, this_msize
);
1514 goto return_tiny_alloc
;
1518 next
->previous
= NULL
;
1519 free_list_set_checksum(szone
, next
);
1521 goto add_leftover_and_proceed
;
1523 try_tiny_malloc_from_end
:
1524 // Let's see if we can use szone->tiny_bytes_free_at_end
1525 if (szone
->tiny_bytes_free_at_end
>= TINY_BYTES_FOR_MSIZE(msize
)) {
1526 ptr
= (free_list_t
*)(TINY_REGION_END(szone
->tiny_regions
[szone
->num_tiny_regions
-1]) - szone
->tiny_bytes_free_at_end
);
1527 szone
->tiny_bytes_free_at_end
-= TINY_BYTES_FOR_MSIZE(msize
);
1528 if (szone
->tiny_bytes_free_at_end
) {
1529 // let's add an in use block after ptr to serve as boundary
1530 set_tiny_meta_header_in_use((unsigned char *)ptr
+ TINY_BYTES_FOR_MSIZE(msize
), 1);
1534 if (LOG(szone
, ptr
)) {
1535 malloc_printf("in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr
, msize
);
1538 goto return_tiny_alloc
;
1541 add_leftover_and_proceed
:
1542 if (!this_msize
|| (this_msize
> msize
)) {
1543 leftover_msize
= this_msize
- msize
;
1544 leftover_ptr
= (free_list_t
*)((unsigned char *)ptr
+ TINY_BYTES_FOR_MSIZE(msize
));
1546 if (LOG(szone
,ptr
)) {
1547 malloc_printf("in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr
, this_msize
);
1550 tiny_free_list_add_ptr(szone
, leftover_ptr
, leftover_msize
);
1554 szone
->num_tiny_objects
++;
1555 szone
->num_bytes_in_tiny_objects
+= TINY_BYTES_FOR_MSIZE(this_msize
);
1557 if (LOG(szone
,ptr
)) {
1558 malloc_printf("in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr
, this_msize
, msize
);
1561 set_tiny_meta_header_in_use(ptr
, this_msize
);
1565 static INLINE
void *
1566 tiny_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
)
1568 boolean_t locked
= 0;
1573 szone_error(szone
, "invariant broken (!msize) in allocation (region)", NULL
);
1578 ptr
= szone
->last_tiny_free
;
1579 if ((((uintptr_t)ptr
) & (TINY_QUANTUM
- 1)) == msize
) {
1580 // we have a candidate - let's lock to make sure
1581 LOCK_AND_NOTE_LOCKED(szone
, locked
);
1582 if (ptr
== szone
->last_tiny_free
) {
1583 szone
->last_tiny_free
= NULL
;
1584 SZONE_UNLOCK(szone
);
1585 CHECK(szone
, __PRETTY_FUNCTION__
);
1586 ptr
= (void *)((uintptr_t)ptr
& ~ (TINY_QUANTUM
- 1));
1587 if (cleared_requested
) {
1588 memset(ptr
, 0, TINY_BYTES_FOR_MSIZE(msize
));
1591 if (LOG(szone
,ptr
)) {
1592 malloc_printf("in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr
, msize
);
1599 // Except in rare occasions where we need to add a new region, we are going to end up locking, so we might as well lock right away to avoid doing unnecessary optimistic probes
1600 if (!locked
) LOCK_AND_NOTE_LOCKED(szone
, locked
);
1601 ptr
= tiny_malloc_from_free_list(szone
, msize
);
1603 SZONE_UNLOCK(szone
);
1604 CHECK(szone
, __PRETTY_FUNCTION__
);
1605 if (cleared_requested
) {
1606 memset(ptr
, 0, TINY_BYTES_FOR_MSIZE(msize
));
1610 ptr
= tiny_malloc_from_region_no_lock(szone
, msize
);
1611 // we don't clear because this freshly allocated space is pristine
1612 SZONE_UNLOCK(szone
);
1613 CHECK(szone
, __PRETTY_FUNCTION__
);
1618 free_tiny(szone_t
*szone
, void *ptr
, tiny_region_t
*tiny_region
)
1626 // ptr is known to be in tiny_region
1629 ptr2
= szone
->last_tiny_free
;
1630 /* check that we don't already have this pointer in the cache */
1631 if (ptr
== (void *)((uintptr_t)ptr2
& ~ (TINY_QUANTUM
- 1))) {
1632 szone_error(szone
, "double free", ptr
);
1635 #endif /* TINY_CACHE */
1636 msize
= get_tiny_meta_header(ptr
, &is_free
);
1638 szone_error(szone
, "double free", ptr
);
1643 malloc_printf("*** szone_free() block in use is too large: %p\n", ptr
);
1648 if (msize
< TINY_QUANTUM
) { // to see if the bits fit in the last 4 bits
1649 szone
->last_tiny_free
= (void *)(((uintptr_t)ptr
) | msize
);
1651 SZONE_UNLOCK(szone
);
1652 CHECK(szone
, __PRETTY_FUNCTION__
);
1655 msize
= (uintptr_t)ptr2
& (TINY_QUANTUM
- 1);
1656 ptr
= (void *)(((uintptr_t)ptr2
) & ~(TINY_QUANTUM
- 1));
1657 tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
1659 szone_error(szone
, "double free (tiny cache)", ptr
);
1663 tiny_free_no_lock(szone
, tiny_region
, ptr
, msize
);
1664 SZONE_UNLOCK(szone
);
1665 CHECK(szone
, __PRETTY_FUNCTION__
);
1669 print_tiny_free_list(szone_t
*szone
)
1674 malloc_printf("tiny free sizes: ");
1675 while (slot
< NUM_TINY_SLOTS
) {
1676 ptr
= szone
->tiny_free_list
[slot
];
1678 malloc_printf("%s%y[%d]; ", (slot
== NUM_TINY_SLOTS
-1) ? ">=" : "", (slot
+1)*TINY_QUANTUM
, free_list_count(ptr
));
1685 print_tiny_region(boolean_t verbose
, tiny_region_t region
, size_t bytes_at_end
)
1687 unsigned counts
[1024];
1688 unsigned in_use
= 0;
1689 uintptr_t start
= (uintptr_t)TINY_REGION_ADDRESS(region
);
1690 uintptr_t current
= start
;
1691 uintptr_t limit
= (uintptr_t)TINY_REGION_END(region
) - bytes_at_end
;
1696 memset(counts
, 0, 1024 * sizeof(unsigned));
1697 while (current
< limit
) {
1698 msize
= get_tiny_meta_header((void *)current
, &is_free
);
1699 if (is_free
& !msize
&& (current
== start
)) {
1700 // first block is all free
1704 malloc_printf("*** error with %p: msize=%d\n", (void *)current
, (unsigned)msize
);
1710 malloc_printf("*** error at %p msize for in_use is %d\n", (void *)current
, msize
);
1715 current
+= TINY_BYTES_FOR_MSIZE(msize
);
1717 malloc_printf("Tiny region [%p-%p, %y]\t", (void *)start
, TINY_REGION_END(region
), (int)TINY_REGION_SIZE
);
1718 malloc_printf("In_use=%d ", in_use
);
1719 if (bytes_at_end
) malloc_printf("untouched=%y ", bytes_at_end
);
1720 if (verbose
&& in_use
) {
1721 malloc_printf("\tSizes in use: ");
1722 for (ci
= 0; ci
< 1024; ci
++)
1724 malloc_printf("%d[%d]", TINY_BYTES_FOR_MSIZE(ci
), counts
[ci
]);
1726 malloc_printf("\n");
1730 tiny_free_list_check(szone_t
*szone
, grain_t slot
)
1733 free_list_t
*ptr
= szone
->tiny_free_list
[slot
];
1734 free_list_t
*previous
= NULL
;
1737 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1739 free_list_checksum(szone
, ptr
, __PRETTY_FUNCTION__
);
1740 is_free
= tiny_meta_header_is_free(ptr
);
1742 malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot
, count
, ptr
);
1745 if (((uintptr_t)ptr
) & (TINY_QUANTUM
- 1)) {
1746 malloc_printf("*** inaligned ptr in free list slot=%d count=%d ptr=%p\n", slot
, count
, ptr
);
1749 if (!tiny_region_for_ptr_no_lock(szone
, ptr
)) {
1750 malloc_printf("*** itr not in szone slot=%d count=%d ptr=%p\n", slot
, count
, ptr
);
1753 if (ptr
->previous
!= previous
) {
1754 malloc_printf("*** irevious incorrectly set slot=%d count=%d ptr=%p\n", slot
, count
, ptr
);
1764 /********************* SMALL FREE LIST UTILITIES ************************/
1767 * Mark a block as free. Only the first quantum of a block is marked thusly,
1768 * the remainder are marked "middle".
1771 small_meta_header_set_is_free(msize_t
*meta_headers
, unsigned index
, msize_t msize
)
1774 meta_headers
[index
] = msize
| SMALL_IS_FREE
;
1778 * Mark a block as in use. Only the first quantum of a block is marked thusly,
1779 * the remainder are marked "middle".
1782 small_meta_header_set_in_use(msize_t
*meta_headers
, msize_t index
, msize_t msize
)
1785 meta_headers
[index
] = msize
;
1789 * Mark a quantum as being the second or later in a block.
1792 small_meta_header_set_middle(msize_t
*meta_headers
, msize_t index
)
1795 meta_headers
[index
] = 0;
1798 // Adds an item to the proper free list
1799 // Also marks the header of the block properly
1800 // Assumes szone has been locked
1802 small_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
)
1804 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
1805 free_list_t
*free_ptr
= ptr
;
1806 free_list_t
*free_head
= szone
->small_free_list
[grain
];
1809 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1811 if (LOG(szone
,ptr
)) {
1812 malloc_printf("in small_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
1814 if (((uintptr_t)ptr
) & (SMALL_QUANTUM
- 1)) {
1815 szone_error(szone
, "small_free_list_add_ptr: Unaligned ptr", ptr
);
1819 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
1821 if (free_head
->previous
) {
1822 malloc_printf("ptr=%p grain=%d free_head=%p previous=%p\n",
1823 ptr
, grain
, free_head
, free_head
->previous
);
1824 szone_error(szone
, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr
);
1826 if (!SMALL_PTR_IS_FREE(free_head
)) {
1827 malloc_printf("ptr=%p grain=%d free_head=%p\n", ptr
, grain
, free_head
);
1828 szone_error(szone
, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr
);
1831 free_head
->previous
= free_ptr
;
1832 free_list_set_checksum(szone
, free_head
);
1834 BITMAP32_SET(szone
->small_bitmap
, grain
);
1836 free_ptr
->previous
= NULL
;
1837 free_ptr
->next
= free_head
;
1838 free_list_set_checksum(szone
, free_ptr
);
1839 szone
->small_free_list
[grain
] = free_ptr
;
1840 follower
= ptr
+ SMALL_BYTES_FOR_MSIZE(msize
);
1841 SMALL_PREVIOUS_MSIZE(follower
) = msize
;
1844 // Removes item in the proper free list
1845 // msize could be read, but all callers have it so we pass it in
1846 // Assumes szone has been locked
1848 small_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
)
1850 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
1851 free_list_t
*free_ptr
= ptr
;
1852 free_list_t
*next
= free_ptr
->next
;
1853 free_list_t
*previous
= free_ptr
->previous
;
1855 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1857 if (LOG(szone
,ptr
)) {
1858 malloc_printf("in small_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
1861 free_list_checksum(szone
, free_ptr
, __PRETTY_FUNCTION__
);
1864 if (szone
->small_free_list
[grain
] != ptr
) {
1865 malloc_printf("ptr=%p grain=%d msize=%d szone->small_free_list[grain]=%p\n",
1866 ptr
, grain
, msize
, szone
->small_free_list
[grain
]);
1867 szone_error(szone
, "small_free_list_remove_ptr: Internal invariant broken (szone->small_free_list[grain])", ptr
);
1871 szone
->small_free_list
[grain
] = next
;
1872 if (!next
) BITMAP32_CLR(szone
->small_bitmap
, grain
);
1874 previous
->next
= next
;
1875 free_list_set_checksum(szone
, previous
);
1878 next
->previous
= previous
;
1879 free_list_set_checksum(szone
, next
);
1883 static INLINE small_region_t
*
1884 small_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
)
1886 small_region_t
*region
;
1887 small_region_t rbase
;
1890 /* find assumed heap/region base */
1891 rbase
= SMALL_REGION_FOR_PTR(ptr
);
1893 /* scan existing regions for a match */
1894 for (i
= szone
->num_small_regions
, region
= szone
->small_regions
; i
> 0; i
--, region
++)
1895 if (rbase
== *region
)
1901 small_free_no_lock(szone_t
*szone
, small_region_t
*region
, void *ptr
, msize_t msize
)
1903 msize_t
*meta_headers
= SMALL_META_HEADER_FOR_PTR(ptr
);
1904 unsigned index
= SMALL_META_INDEX_FOR_PTR(ptr
);
1905 size_t original_size
= SMALL_BYTES_FOR_MSIZE(msize
);
1906 unsigned char *next_block
= ((unsigned char *)ptr
+ original_size
);
1907 msize_t next_index
= index
+ msize
;
1908 msize_t previous_msize
, next_msize
;
1912 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1914 if (LOG(szone
,ptr
)) {
1915 malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1918 malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1919 szone_error(szone
, "trying to free small block that is too small", ptr
);
1922 // We try to coalesce this block with the preceeding one
1923 if (index
&& (SMALL_PREVIOUS_MSIZE(ptr
) <= index
)) {
1924 previous_msize
= SMALL_PREVIOUS_MSIZE(ptr
);
1925 if (meta_headers
[index
- previous_msize
] == (previous_msize
| SMALL_IS_FREE
)) {
1926 previous
= ptr
- SMALL_BYTES_FOR_MSIZE(previous_msize
);
1927 // previous is really to be coalesced
1929 if (LOG(szone
, ptr
) || LOG(szone
,previous
)) {
1930 malloc_printf("in small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr
, previous
);
1933 small_free_list_remove_ptr(szone
, previous
, previous_msize
);
1934 small_meta_header_set_middle(meta_headers
, index
);
1936 msize
+= previous_msize
;
1937 index
-= previous_msize
;
1940 // We try to coalesce with the next block
1941 if ((next_block
< SMALL_REGION_END(*region
)) && (meta_headers
[next_index
] & SMALL_IS_FREE
)) {
1942 // next block is free, we coalesce
1943 next_msize
= meta_headers
[next_index
] & ~ SMALL_IS_FREE
;
1945 if (LOG(szone
,ptr
)) malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr
, msize
, next_block
, next_msize
);
1947 small_free_list_remove_ptr(szone
, next_block
, next_msize
);
1948 small_meta_header_set_middle(meta_headers
, next_index
);
1949 msize
+= next_msize
;
1951 if (szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) {
1953 szone_error(szone
, "incorrect size information - block header was damaged", ptr
);
1955 memset(ptr
, 0x55, SMALL_BYTES_FOR_MSIZE(msize
));
1958 small_free_list_add_ptr(szone
, ptr
, msize
);
1959 small_meta_header_set_is_free(meta_headers
, index
, msize
);
1960 szone
->num_small_objects
--;
1961 szone
->num_bytes_in_small_objects
-= original_size
; // we use original_size and not msize to avoid double counting the coalesced blocks
1965 small_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
)
1967 small_region_t last_region
;
1971 msize_t
*meta_headers
;
1973 size_t region_capacity
;
1975 small_region_t
*new_regions
;
1978 // Allocates from the last region or a freshly allocated region
1979 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1980 // Before anything we transform the small_bytes_free_at_end - if any - to a regular free block
1981 if (szone
->small_bytes_free_at_end
) {
1982 last_region
= szone
->small_regions
[szone
->num_small_regions
- 1];
1983 last_block
= (void *)(SMALL_REGION_END(last_region
) - szone
->small_bytes_free_at_end
);
1984 small_free_list_add_ptr(szone
, last_block
, SMALL_MSIZE_FOR_BYTES(szone
->small_bytes_free_at_end
));
1985 *SMALL_METADATA_FOR_PTR(last_block
) = SMALL_MSIZE_FOR_BYTES(szone
->small_bytes_free_at_end
) | SMALL_IS_FREE
;
1986 szone
->small_bytes_free_at_end
= 0;
1988 // time to create a new region
1989 new_address
= allocate_pages(szone
, SMALL_REGION_SIZE
, SMALL_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL
));
1995 meta_headers
= SMALL_META_HEADER_FOR_PTR(ptr
);
1997 if (szone
->num_small_regions
== INITIAL_NUM_SMALL_REGIONS
) {
1998 // time to grow the number of regions
1999 region_capacity
= (1 << (32 - SMALL_BLOCKS_ALIGN
)) - 20; // that is for sure the maximum number of small regions we can have
2000 new_msize
= (region_capacity
* sizeof(small_region_t
) + SMALL_QUANTUM
- 1) / SMALL_QUANTUM
;
2002 small_meta_header_set_in_use(meta_headers
, index
, new_msize
);
2003 szone
->num_small_objects
++;
2004 szone
->num_bytes_in_small_objects
+= SMALL_BYTES_FOR_MSIZE(new_msize
);
2005 memcpy(new_regions
, szone
->small_regions
, INITIAL_NUM_SMALL_REGIONS
* sizeof(small_region_t
));
2006 // We intentionally leak the previous regions pointer to avoid multi-threading crashes if
2007 // another thread was reading it (unlocked) while we are changing it.
2008 szone
->small_regions
= new_regions
; // note we set this pointer after it's all set
2009 ptr
+= SMALL_BYTES_FOR_MSIZE(new_msize
);
2012 szone
->small_regions
[szone
->num_small_regions
] = new_address
;
2013 // we bump the number of regions AFTER we have changes the regions pointer to enable finding a
2014 // small region without taking the lock
2015 // XXX naive assumption assumes memory ordering coherence between this and other CPUs
2016 szone
->num_small_regions
++;
2017 small_meta_header_set_in_use(meta_headers
, index
, msize
);
2018 msize_left
= NUM_SMALL_BLOCKS
- index
;
2019 szone
->num_small_objects
++;
2020 szone
->num_bytes_in_small_objects
+= SMALL_BYTES_FOR_MSIZE(msize
);
2021 // add a big free block
2022 index
+= msize
; msize_left
-= msize
;
2023 meta_headers
[index
] = msize_left
;
2024 szone
->small_bytes_free_at_end
= SMALL_BYTES_FOR_MSIZE(msize_left
);
2028 static INLINE boolean_t
2029 try_realloc_small_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
)
2031 // returns 1 on success
2032 msize_t
*meta_headers
= SMALL_META_HEADER_FOR_PTR(ptr
);
2033 unsigned index
= SMALL_META_INDEX_FOR_PTR(ptr
);
2034 msize_t old_msize
= SMALL_MSIZE_FOR_BYTES(old_size
);
2035 msize_t new_msize
= SMALL_MSIZE_FOR_BYTES(new_size
+ SMALL_QUANTUM
- 1);
2036 void *next_block
= (char *)ptr
+ old_size
;
2037 unsigned next_index
= index
+ old_msize
;
2038 msize_t next_msize_and_free
;
2040 msize_t leftover_msize
;
2042 unsigned leftover_index
;
2044 if (next_index
>= NUM_SMALL_BLOCKS
) {
2048 if ((uintptr_t)next_block
& (SMALL_QUANTUM
- 1)) {
2049 szone_error(szone
, "internal invariant broken in realloc(next_block)", next_block
);
2051 if (meta_headers
[index
] != old_msize
)
2052 malloc_printf("*** try_realloc_small_in_place incorrect old %d %d\n",
2053 meta_headers
[index
], old_msize
);
2057 * Look for a free block immediately afterwards. If it's large enough, we can consume (part of)
2060 next_msize_and_free
= meta_headers
[next_index
];
2061 next_msize
= next_msize_and_free
& ~ SMALL_IS_FREE
;
2062 if (!(next_msize_and_free
& SMALL_IS_FREE
) || (old_msize
+ next_msize
< new_msize
)) {
2063 SZONE_UNLOCK(szone
);
2067 * The following block is big enough; pull it from its freelist and chop off enough to satisfy
2070 small_free_list_remove_ptr(szone
, next_block
, next_msize
);
2071 small_meta_header_set_middle(meta_headers
, next_index
);
2072 leftover_msize
= old_msize
+ next_msize
- new_msize
;
2073 if (leftover_msize
) {
2074 /* there's some left, so put the remainder back */
2075 leftover
= (unsigned char *)ptr
+ SMALL_BYTES_FOR_MSIZE(new_msize
);
2076 small_free_list_add_ptr(szone
, leftover
, leftover_msize
);
2077 leftover_index
= index
+ new_msize
;
2078 small_meta_header_set_is_free(meta_headers
, leftover_index
, leftover_msize
);
2081 if (SMALL_BYTES_FOR_MSIZE(new_msize
) >= LARGE_THRESHOLD
) {
2082 malloc_printf("*** realloc in place for %p exceeded msize=%d\n", new_msize
);
2085 small_meta_header_set_in_use(meta_headers
, index
, new_msize
);
2087 if (LOG(szone
,ptr
)) {
2088 malloc_printf("in szone_realloc(), ptr=%p, msize=%d\n", ptr
, *SMALL_METADATA_FOR_PTR(ptr
));
2091 szone
->num_bytes_in_small_objects
+= SMALL_BYTES_FOR_MSIZE(new_msize
- old_msize
);
2092 SZONE_UNLOCK(szone
);
2093 CHECK(szone
, __PRETTY_FUNCTION__
);
2098 szone_check_small_region(szone_t
*szone
, small_region_t
*region
)
2100 unsigned char *ptr
= SMALL_REGION_ADDRESS(*region
);
2101 msize_t
*meta_headers
= SMALL_META_HEADER_FOR_PTR(ptr
);
2102 unsigned char *region_end
= SMALL_REGION_END(*region
);
2103 msize_t prev_free
= 0;
2105 msize_t msize_and_free
;
2107 free_list_t
*free_head
;
2110 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
2111 if (region
== szone
->small_regions
+ szone
->num_small_regions
- 1) region_end
-= szone
->small_bytes_free_at_end
;
2112 while (ptr
< region_end
) {
2113 index
= SMALL_META_INDEX_FOR_PTR(ptr
);
2114 msize_and_free
= meta_headers
[index
];
2115 if (!(msize_and_free
& SMALL_IS_FREE
)) {
2117 msize
= msize_and_free
;
2119 malloc_printf("*** invariant broken: null msize ptr=%p region#=%d num_small_regions=%d end=%p\n",
2120 ptr
, region
- szone
->small_regions
, szone
->num_small_regions
, (void *)region_end
);
2123 if (msize
> (LARGE_THRESHOLD
/ SMALL_QUANTUM
)) {
2124 malloc_printf("*** invariant broken for %p this small msize=%d - size is too large\n",
2125 ptr
, msize_and_free
);
2128 ptr
+= SMALL_BYTES_FOR_MSIZE(msize
);
2132 msize
= msize_and_free
& ~ SMALL_IS_FREE
;
2133 free_head
= (free_list_t
*)ptr
;
2134 follower
= (msize_t
*)FOLLOWING_SMALL_PTR(ptr
, msize
);
2136 malloc_printf("*** invariant broken for free block %p this msize=%d\n", ptr
, msize
);
2140 malloc_printf("*** invariant broken for %p (2 free in a row)\n", ptr
);
2143 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
2144 if (free_head
->previous
&& !SMALL_PTR_IS_FREE(free_head
->previous
)) {
2145 malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n",
2146 ptr
, free_head
->previous
);
2149 if (free_head
->next
&& !SMALL_PTR_IS_FREE(free_head
->next
)) {
2150 malloc_printf("*** invariant broken for %p (next is not a free pointer)\n", ptr
);
2153 if (SMALL_PREVIOUS_MSIZE(follower
) != msize
) {
2154 malloc_printf("*** invariant broken for small free %p followed by %p in region [%p-%p] "
2155 "(end marker incorrect) should be %d; in fact %d\n",
2156 ptr
, follower
, SMALL_REGION_ADDRESS(*region
), region_end
, msize
, SMALL_PREVIOUS_MSIZE(follower
));
2159 ptr
= (unsigned char *)follower
;
2160 prev_free
= SMALL_IS_FREE
;
2166 static kern_return_t
2167 small_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t small_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
)
2169 small_region_t
*regions
;
2171 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
2174 small_region_t region
;
2176 vm_range_t admin_range
;
2177 vm_range_t ptr_range
;
2178 unsigned char *mapped_region
;
2179 msize_t
*block_header
;
2180 unsigned block_index
;
2181 unsigned block_limit
;
2182 msize_t msize_and_free
;
2185 err
= reader(task
, region_address
, sizeof(small_region_t
) * num_regions
, (void **)®ions
);
2186 if (err
) return err
;
2187 while (index
< num_regions
) {
2188 region
= regions
[index
];
2189 range
.address
= (vm_address_t
)SMALL_REGION_ADDRESS(region
);
2190 range
.size
= SMALL_REGION_SIZE
;
2191 if (type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) {
2192 admin_range
.address
= range
.address
+ (1 << SMALL_BLOCKS_ALIGN
);
2193 admin_range
.size
= range
.size
- (1 << SMALL_BLOCKS_ALIGN
);
2194 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &admin_range
, 1);
2196 if (type_mask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
)) {
2197 ptr_range
.address
= range
.address
;
2198 ptr_range
.size
= 1 << SMALL_BLOCKS_ALIGN
;
2199 recorder(task
, context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptr_range
, 1);
2201 if (type_mask
& MALLOC_PTR_IN_USE_RANGE_TYPE
) {
2202 err
= reader(task
, range
.address
, range
.size
, (void **)&mapped_region
);
2203 if (err
) return err
;
2204 block_header
= (msize_t
*)(mapped_region
+ (1 << SMALL_BLOCKS_ALIGN
));
2206 block_limit
= NUM_SMALL_BLOCKS
;
2207 if (index
== num_regions
- 1)
2208 block_limit
-= SMALL_MSIZE_FOR_BYTES(small_bytes_free_at_end
);
2209 while (block_index
< block_limit
) {
2210 msize_and_free
= block_header
[block_index
];
2211 msize
= msize_and_free
& ~ SMALL_IS_FREE
;
2212 if (! (msize_and_free
& SMALL_IS_FREE
)) {
2214 buffer
[count
].address
= range
.address
+ SMALL_BYTES_FOR_MSIZE(block_index
);
2215 buffer
[count
].size
= SMALL_BYTES_FOR_MSIZE(msize
);
2217 if (count
>= MAX_RECORDER_BUFFER
) {
2218 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
2222 block_index
+= msize
;
2228 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
2233 static INLINE
void *
2234 small_malloc_from_free_list(szone_t
*szone
, msize_t msize
)
2236 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
2237 unsigned bitmap
= szone
->small_bitmap
& ~ ((1 << grain
) - 1);
2240 free_list_t
**free_list
;
2241 free_list_t
**limit
;
2243 msize_t leftover_msize
;
2245 msize_t
*meta_headers
;
2246 unsigned leftover_index
;
2249 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
2251 if (!bitmap
) goto try_small_from_end
;
2252 grain
= BITMAP32_FFS(bitmap
) - 1;
2253 // first try the small grains
2254 limit
= szone
->small_free_list
+ NUM_SMALL_SLOTS
- 1;
2255 free_list
= szone
->small_free_list
+ grain
;
2256 while (free_list
< limit
) {
2257 // try bigger grains
2260 next
= ((free_list_t
*)ptr
)->next
;
2262 next
->previous
= NULL
;
2263 free_list_set_checksum(szone
, next
);
2266 this_msize
= SMALL_PTR_SIZE(ptr
);
2267 goto add_leftover_and_proceed
;
2271 // We now check the large grains for one that is big enough
2274 this_msize
= SMALL_PTR_SIZE(ptr
);
2275 if (this_msize
>= msize
) {
2276 small_free_list_remove_ptr(szone
, ptr
, this_msize
);
2277 goto add_leftover_and_proceed
;
2279 ptr
= ((free_list_t
*)ptr
)->next
;
2282 // Let's see if we can use szone->small_bytes_free_at_end
2283 if (szone
->small_bytes_free_at_end
>= SMALL_BYTES_FOR_MSIZE(msize
)) {
2284 ptr
= (void *)(SMALL_REGION_END(szone
->small_regions
[szone
->num_small_regions
-1]) - szone
->small_bytes_free_at_end
);
2285 szone
->small_bytes_free_at_end
-= SMALL_BYTES_FOR_MSIZE(msize
);
2286 if (szone
->small_bytes_free_at_end
) {
2287 // let's mark this block as in use to serve as boundary
2288 *SMALL_METADATA_FOR_PTR(ptr
+ SMALL_BYTES_FOR_MSIZE(msize
)) = SMALL_MSIZE_FOR_BYTES(szone
->small_bytes_free_at_end
);
2291 goto return_small_alloc
;
2294 add_leftover_and_proceed
:
2295 if (this_msize
> msize
) {
2296 leftover_msize
= this_msize
- msize
;
2297 leftover_ptr
= ptr
+ SMALL_BYTES_FOR_MSIZE(msize
);
2299 if (LOG(szone
,ptr
)) {
2300 malloc_printf("in small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr
, this_msize
);
2303 small_free_list_add_ptr(szone
, leftover_ptr
, leftover_msize
);
2304 meta_headers
= SMALL_META_HEADER_FOR_PTR(leftover_ptr
);
2305 leftover_index
= SMALL_META_INDEX_FOR_PTR(leftover_ptr
);
2306 small_meta_header_set_is_free(meta_headers
, leftover_index
, leftover_msize
);
2310 szone
->num_small_objects
++;
2311 szone
->num_bytes_in_small_objects
+= SMALL_BYTES_FOR_MSIZE(this_msize
);
2313 if (LOG(szone
,ptr
)) {
2314 malloc_printf("in small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr
, this_msize
, msize
);
2317 *SMALL_METADATA_FOR_PTR(ptr
) = this_msize
;
2321 static INLINE
void *
2322 small_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
)
2324 boolean_t locked
= 0;
2330 ptr
= (void *)szone
->last_small_free
;
2331 if ((((uintptr_t)ptr
) & (SMALL_QUANTUM
- 1)) == msize
) {
2332 // we have a candidate - let's lock to make sure
2333 LOCK_AND_NOTE_LOCKED(szone
, locked
);
2334 if (ptr
== (void *)szone
->last_small_free
) {
2335 szone
->last_small_free
= NULL
;
2336 SZONE_UNLOCK(szone
);
2337 CHECK(szone
, __PRETTY_FUNCTION__
);
2338 ptr
= (void *)((uintptr_t)ptr
& ~ (SMALL_QUANTUM
- 1));
2339 if (cleared_requested
) {
2340 memset(ptr
, 0, SMALL_BYTES_FOR_MSIZE(msize
));
2346 // Except in rare occasions where we need to add a new region, we are going to end up locking,
2347 // so we might as well lock right away to avoid doing unnecessary optimistic probes
2348 if (!locked
) LOCK_AND_NOTE_LOCKED(szone
, locked
);
2349 ptr
= small_malloc_from_free_list(szone
, msize
);
2351 SZONE_UNLOCK(szone
);
2352 CHECK(szone
, __PRETTY_FUNCTION__
);
2353 if (cleared_requested
) {
2354 memset(ptr
, 0, SMALL_BYTES_FOR_MSIZE(msize
));
2358 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
2359 // we don't clear because this freshly allocated space is pristine
2360 SZONE_UNLOCK(szone
);
2361 CHECK(szone
, __PRETTY_FUNCTION__
);
2365 // tries to allocate a small, cleared block
2366 static INLINE
void *
2367 small_malloc_cleared_no_lock(szone_t
*szone
, msize_t msize
)
2371 // Assumes already locked
2372 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
2373 ptr
= small_malloc_from_free_list(szone
, msize
);
2375 memset(ptr
, 0, SMALL_BYTES_FOR_MSIZE(msize
));
2378 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
2379 // we don't clear because this freshly allocated space is pristine
2385 free_small(szone_t
*szone
, void *ptr
, small_region_t
*small_region
)
2387 msize_t msize_and_free
;
2392 // ptr is known to be in small_region
2393 msize_and_free
= *SMALL_METADATA_FOR_PTR(ptr
);
2394 if (msize_and_free
& SMALL_IS_FREE
) {
2395 szone_error(szone
, "Object already freed being freed", ptr
);
2398 CHECK(szone
, __PRETTY_FUNCTION__
);
2401 ptr2
= szone
->last_small_free
;
2402 szone
->last_small_free
= (void *)(((uintptr_t)ptr
) | msize_and_free
);
2404 SZONE_UNLOCK(szone
);
2405 CHECK(szone
, __PRETTY_FUNCTION__
);
2408 msize_and_free
= (uintptr_t)ptr2
& (SMALL_QUANTUM
- 1);
2409 ptr
= (void *)(((uintptr_t)ptr2
) & ~ (SMALL_QUANTUM
- 1));
2410 small_region
= small_region_for_ptr_no_lock(szone
, ptr
);
2411 if (!small_region
) {
2412 szone_error(szone
, "double free (small cache)", ptr
);
2416 small_free_no_lock(szone
, small_region
, ptr
, msize_and_free
);
2417 SZONE_UNLOCK(szone
);
2418 CHECK(szone
, __PRETTY_FUNCTION__
);
2422 print_small_free_list(szone_t
*szone
)
2427 malloc_printf("small free sizes: ");
2428 while (grain
< NUM_SMALL_SLOTS
) {
2429 ptr
= szone
->small_free_list
[grain
];
2431 malloc_printf("%s%y[%d]; ", (grain
== NUM_SMALL_SLOTS
-1) ? ">=" : "", (grain
+ 1) * SMALL_QUANTUM
, free_list_count(ptr
));
2435 malloc_printf("\n");
2439 print_small_region(szone_t
*szone
, boolean_t verbose
, small_region_t
*region
, size_t bytes_at_end
)
2441 unsigned counts
[1024];
2442 unsigned in_use
= 0;
2443 void *start
= SMALL_REGION_ADDRESS(*region
);
2444 void *limit
= SMALL_REGION_END(*region
) - bytes_at_end
;
2445 msize_t msize_and_free
;
2449 memset(counts
, 0, 1024 * sizeof(unsigned));
2450 while (start
< limit
) {
2451 msize_and_free
= *SMALL_METADATA_FOR_PTR(start
);
2452 msize
= msize_and_free
& ~ SMALL_IS_FREE
;
2453 if (!(msize_and_free
& SMALL_IS_FREE
)) {
2459 start
+= SMALL_BYTES_FOR_MSIZE(msize
);
2461 malloc_printf("Small region [%p-%p, %y]\tIn_use=%d ",
2462 SMALL_REGION_ADDRESS(*region
), SMALL_REGION_END(*region
), (int)SMALL_REGION_SIZE
, in_use
);
2464 malloc_printf("Untouched=%y ", bytes_at_end
);
2465 if (verbose
&& in_use
) {
2466 malloc_printf("\n\tSizes in use: ");
2467 for (ci
= 0; ci
< 1024; ci
++)
2469 malloc_printf("%d[%d] ", SMALL_BYTES_FOR_MSIZE(ci
), counts
[ci
]);
2471 malloc_printf("\n");
2475 small_free_list_check(szone_t
*szone
, grain_t grain
)
2478 free_list_t
*ptr
= szone
->small_free_list
[grain
];
2479 free_list_t
*previous
= NULL
;
2480 msize_t msize_and_free
;
2482 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
2484 msize_and_free
= *SMALL_METADATA_FOR_PTR(ptr
);
2486 if (!(msize_and_free
& SMALL_IS_FREE
)) {
2487 malloc_printf("*** in-use ptr in free list grain=%d count=%d ptr=%p\n", grain
, count
, ptr
);
2490 if (((uintptr_t)ptr
) & (SMALL_QUANTUM
- 1)) {
2491 malloc_printf("*** unaligned ptr in free list grain=%d count=%d ptr=%p\n", grain
, count
, ptr
);
2494 if (!small_region_for_ptr_no_lock(szone
, ptr
)) {
2495 malloc_printf("*** ptr not in szone grain=%d count=%d ptr=%p\n", grain
, count
, ptr
);
2498 free_list_checksum(szone
, ptr
, __PRETTY_FUNCTION__
);
2499 if (ptr
->previous
!= previous
) {
2500 malloc_printf("*** previous incorrectly set grain=%d count=%d ptr=%p\n", grain
, count
, ptr
);
2509 /********************* LARGE ENTRY UTILITIES ************************/
2514 large_debug_print(szone_t
*szone
)
2516 unsigned num_large_entries
= szone
->num_large_entries
;
2517 unsigned index
= num_large_entries
;
2518 large_entry_t
*range
;
2520 for (index
= 0, range
= szone
->large_entries
; index
< szone
->num_large_entries
; index
++, range
++)
2521 if (!LARGE_ENTRY_IS_EMPTY(*range
))
2522 malloc_printf("%d: %p(%y); ", index
, LARGE_ENTRY_ADDRESS(*range
), LARGE_ENTRY_SIZE(*range
));
2524 malloc_printf("\n");
2529 * Scan the hash ring looking for an entry for the given pointer.
2531 static large_entry_t
*
2532 large_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
)
2534 // result only valid with lock held
2535 unsigned num_large_entries
= szone
->num_large_entries
;
2536 unsigned hash_index
;
2538 large_entry_t
*range
;
2540 if (!num_large_entries
)
2542 hash_index
= ((uintptr_t)ptr
>> vm_page_shift
) % num_large_entries
;
2545 range
= szone
->large_entries
+ index
;
2546 if (LARGE_ENTRY_MATCHES(*range
, ptr
))
2548 if (LARGE_ENTRY_IS_EMPTY(*range
))
2549 return NULL
; // end of chain
2551 if (index
== num_large_entries
)
2553 } while (index
!= hash_index
);
2558 large_entry_insert_no_lock(szone_t
*szone
, large_entry_t range
)
2560 unsigned num_large_entries
= szone
->num_large_entries
;
2561 unsigned hash_index
= (range
.address_and_num_pages
>> vm_page_shift
) % num_large_entries
;
2562 unsigned index
= hash_index
;
2563 large_entry_t
*entry
;
2566 entry
= szone
->large_entries
+ index
;
2567 if (LARGE_ENTRY_IS_EMPTY(*entry
)) {
2569 return; // end of chain
2572 if (index
== num_large_entries
)
2574 } while (index
!= hash_index
);
2578 large_entries_rehash_after_entry_no_lock(szone_t
*szone
, large_entry_t
*entry
)
2580 unsigned num_large_entries
= szone
->num_large_entries
;
2581 unsigned hash_index
= entry
- szone
->large_entries
;
2582 unsigned index
= hash_index
;
2583 large_entry_t range
;
2587 if (index
== num_large_entries
)
2589 range
= szone
->large_entries
[index
];
2590 if (LARGE_ENTRY_IS_EMPTY(range
))
2592 szone
->large_entries
[index
].address_and_num_pages
= 0;
2593 large_entry_insert_no_lock(szone
, range
); // this will reinsert in the
2595 } while (index
!= hash_index
);
2598 static INLINE large_entry_t
*
2599 large_entries_alloc_no_lock(szone_t
*szone
, unsigned num
)
2601 size_t size
= num
* sizeof(large_entry_t
);
2602 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
2604 if (is_vm_allocation
) {
2605 // Note that we allocate memory (via a system call) under a spin lock
2606 // That is certainly evil, however it's very rare in the lifetime of a process
2607 // The alternative would slow down the normal case
2608 return (void *)allocate_pages(szone
, round_page(size
), 0, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
2610 return small_malloc_cleared_no_lock(szone
, SMALL_MSIZE_FOR_BYTES(size
+ SMALL_QUANTUM
- 1));
2615 large_entries_free_no_lock(szone_t
*szone
, large_entry_t
*entries
, unsigned num
, vm_range_t
*range_to_deallocate
)
2617 // returns range to deallocate
2618 size_t size
= num
* sizeof(large_entry_t
);
2619 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
2620 small_region_t
*region
;
2621 msize_t msize_and_free
;
2623 if (is_vm_allocation
) {
2624 range_to_deallocate
->address
= (vm_address_t
)entries
;
2625 range_to_deallocate
->size
= round_page(size
);
2627 range_to_deallocate
->size
= 0;
2628 region
= small_region_for_ptr_no_lock(szone
, entries
);
2629 msize_and_free
= *SMALL_METADATA_FOR_PTR(entries
);
2630 if (msize_and_free
& SMALL_IS_FREE
) {
2631 szone_error(szone
, "object already freed being freed", entries
);
2634 small_free_no_lock(szone
, region
, entries
, msize_and_free
);
2639 large_entries_grow_no_lock(szone_t
*szone
, vm_range_t
*range_to_deallocate
)
2641 // sets range_to_deallocate
2642 unsigned old_num_entries
= szone
->num_large_entries
;
2643 large_entry_t
*old_entries
= szone
->large_entries
;
2644 unsigned new_num_entries
= (old_num_entries
) ? old_num_entries
* 2 + 1 : 63; // always an odd number for good hashing
2645 large_entry_t
*new_entries
= large_entries_alloc_no_lock(szone
, new_num_entries
);
2646 unsigned index
= old_num_entries
;
2647 large_entry_t oldRange
;
2649 szone
->num_large_entries
= new_num_entries
;
2650 szone
->large_entries
= new_entries
;
2652 /* rehash entries into the new list */
2654 oldRange
= old_entries
[index
];
2655 if (!LARGE_ENTRY_IS_EMPTY(oldRange
)) {
2656 large_entry_insert_no_lock(szone
, oldRange
);
2660 large_entries_free_no_lock(szone
, old_entries
, old_num_entries
, range_to_deallocate
);
2662 range_to_deallocate
->size
= 0;
2666 // frees the specific entry in the size table
2667 // returns a range to truly deallocate
2669 large_free_no_lock(szone_t
*szone
, large_entry_t
*entry
)
2673 range
.address
= (vm_address_t
)LARGE_ENTRY_ADDRESS(*entry
);
2674 range
.size
= (vm_size_t
)LARGE_ENTRY_SIZE(*entry
);
2675 szone
->num_large_objects_in_use
--;
2676 szone
->num_bytes_in_large_objects
-= range
.size
;
2677 if (szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) {
2678 protect(szone
, (void *)range
.address
, range
.size
, VM_PROT_READ
| VM_PROT_WRITE
, szone
->debug_flags
);
2679 range
.address
-= vm_page_size
;
2680 range
.size
+= 2 * vm_page_size
;
2682 entry
->address_and_num_pages
= 0;
2683 large_entries_rehash_after_entry_no_lock(szone
, entry
);
2685 if (large_entry_for_pointer_no_lock(szone
, (void *)range
.address
)) {
2686 malloc_printf("*** freed entry %p still in use; num_large_entries=%d\n",
2687 range
.address
, szone
->num_large_entries
);
2688 large_debug_print(szone
);
2695 static kern_return_t
2696 large_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t large_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
)
2699 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
2701 large_entry_t
*entries
;
2704 large_entry_t entry
;
2706 err
= reader(task
, large_entries_address
, sizeof(large_entry_t
) * num_entries
, (void **)&entries
);
2709 index
= num_entries
;
2710 if ((type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) &&
2711 (num_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
)) {
2712 range
.address
= large_entries_address
;
2713 range
.size
= round_page(num_entries
* sizeof(large_entry_t
));
2714 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &range
, 1);
2716 if (type_mask
& (MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
))
2718 entry
= entries
[index
];
2719 if (!LARGE_ENTRY_IS_EMPTY(entry
)) {
2720 range
.address
= (vm_address_t
)LARGE_ENTRY_ADDRESS(entry
);
2721 range
.size
= (vm_size_t
)LARGE_ENTRY_SIZE(entry
);
2722 buffer
[count
++] = range
;
2723 if (count
>= MAX_RECORDER_BUFFER
) {
2724 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
2730 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
2731 | MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
2736 /********************* HUGE ENTRY UTILITIES ************************/
2738 static huge_entry_t
*
2739 huge_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
)
2744 for (index
= szone
->num_huge_entries
, huge
= szone
->huge_entries
;
2748 if ((void *)huge
->address
== ptr
)
2755 huge_entry_append(szone_t
*szone
, huge_entry_t huge
)
2757 huge_entry_t
*new_huge_entries
= NULL
, *old_huge_entries
;
2758 unsigned num_huge_entries
;
2760 // We do a little dance with locking because doing allocation (even in the
2761 // default szone) may cause something to get freed in this szone, with a
2763 // Returns 1 on success
2766 num_huge_entries
= szone
->num_huge_entries
;
2767 SZONE_UNLOCK(szone
);
2768 /* check for counter wrap */
2769 if ((num_huge_entries
+ 1) < num_huge_entries
)
2771 /* stale allocation from last time around the loop? */
2772 if (new_huge_entries
)
2773 szone_free(szone
, new_huge_entries
);
2774 new_huge_entries
= szone_malloc(szone
, (num_huge_entries
+ 1) * sizeof(huge_entry_t
));
2775 if (new_huge_entries
== NULL
)
2778 if (num_huge_entries
== szone
->num_huge_entries
) {
2779 // No change - our malloc still applies
2780 old_huge_entries
= szone
->huge_entries
;
2781 if (num_huge_entries
) {
2782 memcpy(new_huge_entries
, old_huge_entries
, num_huge_entries
* sizeof(huge_entry_t
));
2784 new_huge_entries
[szone
->num_huge_entries
++] = huge
;
2785 szone
->huge_entries
= new_huge_entries
;
2786 SZONE_UNLOCK(szone
);
2787 szone_free(szone
, old_huge_entries
);
2794 static kern_return_t
2795 huge_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t huge_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
)
2797 huge_entry_t
*entries
;
2800 err
= reader(task
, huge_entries_address
, sizeof(huge_entry_t
) * num_entries
, (void **)&entries
);
2804 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, entries
, num_entries
);
2810 large_and_huge_malloc(szone_t
*szone
, unsigned num_pages
)
2813 vm_range_t range_to_deallocate
;
2814 huge_entry_t huge_entry
;
2816 large_entry_t large_entry
;
2819 num_pages
= 1; // minimal allocation size for this szone
2820 size
= (size_t)num_pages
<< vm_page_shift
;
2821 range_to_deallocate
.size
= 0;
2822 if (num_pages
>= (1 << vm_page_shift
)) {
2823 addr
= allocate_pages(szone
, size
, 0, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_HUGE
));
2826 huge_entry
.size
= size
;
2827 huge_entry
.address
= (vm_address_t
)addr
;
2828 if (!huge_entry_append(szone
, huge_entry
))
2829 return NULL
; // we are leaking the allocation here
2831 szone
->num_bytes_in_huge_objects
+= size
;
2834 addr
= allocate_pages(szone
, size
, 0, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
2836 if (LOG(szone
, addr
))
2837 malloc_printf("in szone_malloc true large allocation at %p for %y\n", (void *)addr
, size
);
2841 SZONE_UNLOCK(szone
);
2845 if (large_entry_for_pointer_no_lock(szone
, addr
)) {
2846 malloc_printf("freshly allocated is already in use: %p\n", addr
);
2847 large_debug_print(szone
);
2851 if ((szone
->num_large_objects_in_use
+ 1) * 4 > szone
->num_large_entries
) {
2852 // density of hash table too high; grow table
2853 // we do that under lock to avoid a race
2854 large_entries_grow_no_lock(szone
, &range_to_deallocate
);
2856 large_entry
.address_and_num_pages
= (uintptr_t)addr
| num_pages
;
2858 if (large_entry_for_pointer_no_lock(szone
, addr
)) {
2859 malloc_printf("entry about to be added already in use: %p\n", addr
);
2860 large_debug_print(szone
);
2864 large_entry_insert_no_lock(szone
, large_entry
);
2866 if (!large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
2867 malloc_printf("can't find entry just added\n");
2868 large_debug_print(szone
);
2872 szone
->num_large_objects_in_use
++;
2873 szone
->num_bytes_in_large_objects
+= size
;
2875 SZONE_UNLOCK(szone
);
2876 if (range_to_deallocate
.size
) {
2877 deallocate_pages(szone
, (void *)range_to_deallocate
.address
, range_to_deallocate
.size
, 0); // we deallocate outside the lock
2879 return (void *)addr
;
2883 free_large_or_huge(szone_t
*szone
, void *ptr
)
2885 // We have established ptr is page-aligned and not tiny nor small
2886 large_entry_t
*entry
;
2887 vm_range_t vm_range_to_deallocate
;
2891 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2893 vm_range_to_deallocate
= large_free_no_lock(szone
, entry
);
2895 if (large_entry_for_pointer_no_lock(szone
, ptr
)) {
2896 malloc_printf("*** just after freeing %p still in use num_large_entries=%d\n", ptr
, szone
->num_large_entries
);
2897 large_debug_print(szone
);
2901 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
2902 vm_range_to_deallocate
= *huge
;
2903 *huge
= szone
->huge_entries
[--szone
->num_huge_entries
]; // last entry fills that spot
2904 szone
->num_bytes_in_huge_objects
-= (size_t)vm_range_to_deallocate
.size
;
2907 large_debug_print(szone
);
2909 szone_error(szone
, "pointer being freed was not allocated", ptr
);
2912 SZONE_UNLOCK(szone
); // we release the lock asap
2913 CHECK(szone
, __PRETTY_FUNCTION__
);
2914 // we deallocate_pages, including guard pages
2915 if (vm_range_to_deallocate
.address
) {
2917 if (large_entry_for_pointer_no_lock(szone
, (void *)vm_range_to_deallocate
.address
)) {
2918 malloc_printf("*** invariant broken: %p still in use num_large_entries=%d\n", vm_range_to_deallocate
.address
, szone
->num_large_entries
);
2919 large_debug_print(szone
);
2923 deallocate_pages(szone
, (void *)vm_range_to_deallocate
.address
, (size_t)vm_range_to_deallocate
.size
, 0);
2928 try_realloc_large_or_huge_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
)
2930 vm_address_t addr
= (vm_address_t
)ptr
+ old_size
;
2931 large_entry_t
*large_entry
, saved_entry
;
2932 huge_entry_t
*huge_entry
, huge
;
2936 if (old_size
!= ((old_size
>> vm_page_shift
) << vm_page_shift
)) {
2937 malloc_printf("*** old_size is %d\n", old_size
);
2941 large_entry
= large_entry_for_pointer_no_lock(szone
, (void *)addr
);
2942 SZONE_UNLOCK(szone
);
2944 return 0; // large pointer already exists in table - extension is not going to work
2946 new_size
= round_page(new_size
);
2948 * Ask for allocation at a specific address, and mark as realloc
2949 * to request coalescing with previous realloc'ed extensions.
2951 err
= vm_allocate(mach_task_self(), &addr
, new_size
- old_size
, VM_MAKE_TAG(VM_MEMORY_REALLOC
));
2952 if (err
!= KERN_SUCCESS
) {
2957 * If the new size is still under the large/huge threshold, we can just
2958 * extend the existing large block.
2960 * Note: this logic is predicated on the understanding that an allocated
2961 * block can never really shrink, so that the new size will always be
2962 * larger than the old size.
2964 * Note: the use of 1 << vm_page_shift here has to do with the subdivision
2965 * of the bits in the large_entry_t, and not the size of a page (directly).
2967 if ((new_size
>> vm_page_shift
) < (1 << vm_page_shift
)) {
2968 /* extend existing large entry */
2969 large_entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2971 szone_error(szone
, "large entry reallocated is not properly in table", ptr
);
2972 /* XXX will cause fault on next reference to entry */
2974 large_entry
->address_and_num_pages
= (uintptr_t)ptr
| (new_size
>> vm_page_shift
);
2975 szone
->num_bytes_in_large_objects
+= new_size
- old_size
;
2976 } else if ((old_size
>> vm_page_shift
) >= (1 << vm_page_shift
)) {
2977 /* extend existing huge entry */
2978 huge_entry
= huge_entry_for_pointer_no_lock(szone
, ptr
);
2980 szone_error(szone
, "huge entry reallocated is not properly in table", ptr
);
2981 /* XXX will cause fault on next reference to huge_entry */
2983 huge_entry
->size
= new_size
;
2984 szone
->num_bytes_in_huge_objects
+= new_size
- old_size
;
2986 /* need to convert large entry to huge entry */
2988 /* release large entry, note we still have the VM allocation */
2989 large_entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2990 saved_entry
= *large_entry
; // in case we need to put it back
2991 large_free_no_lock(szone
, large_entry
);
2992 szone
->num_bytes_in_large_objects
-= old_size
;
2994 /* and get a huge entry */
2995 huge
.address
= (vm_address_t
)ptr
;
2996 huge
.size
= new_size
; /* fix up size */
2997 SZONE_UNLOCK(szone
);
2998 if (huge_entry_append(szone
, huge
)) {
2999 szone
->num_bytes_in_huge_objects
+= new_size
;
3000 return 1; // success!
3003 // we leak memory (the extra space appended) but data structures are correct
3004 large_entry_insert_no_lock(szone
, saved_entry
); // this will reinsert the large entry
3006 SZONE_UNLOCK(szone
); // we release the lock asap
3010 /********************* Zone call backs ************************/
3013 szone_free(szone_t
*szone
, void *ptr
)
3015 tiny_region_t
*tiny_region
;
3016 small_region_t
*small_region
;
3019 if (LOG(szone
, ptr
))
3020 malloc_printf("in szone_free with %p\n", ptr
);
3025 * Try to free to a tiny region.
3027 if ((uintptr_t)ptr
& (TINY_QUANTUM
- 1)) {
3028 szone_error(szone
, "Non-aligned pointer being freed", ptr
);
3031 if ((tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
)) != NULL
) {
3032 free_tiny(szone
, ptr
, tiny_region
);
3037 * Try to free to a small region.
3039 if ((uintptr_t)ptr
& (SMALL_QUANTUM
- 1)) {
3040 szone_error(szone
, "Non-aligned pointer being freed (2)", ptr
);
3043 if ((small_region
= small_region_for_ptr_no_lock(szone
, ptr
)) != NULL
) {
3044 free_small(szone
, ptr
, small_region
);
3048 /* check that it's a legal large/huge allocation */
3049 if ((uintptr_t)ptr
& (vm_page_size
- 1)) {
3050 szone_error(szone
, "non-page-aligned, non-allocated pointer being freed", ptr
);
3053 free_large_or_huge(szone
, ptr
);
3056 static INLINE
void *
3057 szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
)
3063 if (size
<= 31*TINY_QUANTUM
) {
3065 msize
= TINY_MSIZE_FOR_BYTES(size
+ TINY_QUANTUM
- 1);
3068 ptr
= tiny_malloc_should_clear(szone
, msize
, cleared_requested
);
3069 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
3071 msize
= SMALL_MSIZE_FOR_BYTES(size
+ SMALL_QUANTUM
- 1);
3072 if (! msize
) msize
= 1;
3073 ptr
= small_malloc_should_clear(szone
, msize
, cleared_requested
);
3076 num_pages
= round_page(size
) >> vm_page_shift
;
3077 ptr
= large_and_huge_malloc(szone
, num_pages
);
3080 if (LOG(szone
, ptr
))
3081 malloc_printf("szone_malloc returned %p\n", ptr
);
3084 * If requested, scribble on allocated memory.
3086 if ((szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) && ptr
&& !cleared_requested
&& size
)
3087 memset(ptr
, 0xaa, size
);
3093 szone_malloc(szone_t
*szone
, size_t size
) {
3094 return szone_malloc_should_clear(szone
, size
, 0);
3098 szone_calloc(szone_t
*szone
, size_t num_items
, size_t size
)
3100 return szone_malloc_should_clear(szone
, num_items
* size
, 1);
3104 szone_valloc(szone_t
*szone
, size_t size
)
3109 num_pages
= round_page(size
) >> vm_page_shift
;
3110 ptr
= large_and_huge_malloc(szone
, num_pages
);
3112 if (LOG(szone
, ptr
))
3113 malloc_printf("szone_valloc returned %p\n", ptr
);
3119 szone_size(szone_t
*szone
, const void *ptr
)
3123 msize_t msize
, msize_and_free
;
3124 large_entry_t
*entry
;
3130 if (LOG(szone
, ptr
)) {
3131 malloc_printf("in szone_size for %p (szone=%p)\n", ptr
, szone
);
3136 * Look for it in a tiny region.
3138 if ((uintptr_t)ptr
& (TINY_QUANTUM
- 1))
3140 if (tiny_region_for_ptr_no_lock(szone
, ptr
)) {
3141 msize
= get_tiny_meta_header(ptr
, &is_free
);
3142 return (is_free
) ? 0 : TINY_BYTES_FOR_MSIZE(msize
);
3146 * Look for it in a small region.
3148 if ((uintptr_t)ptr
& (SMALL_QUANTUM
- 1))
3150 if (small_region_for_ptr_no_lock(szone
, ptr
)) {
3151 msize_and_free
= *SMALL_METADATA_FOR_PTR(ptr
);
3152 return (msize_and_free
& SMALL_IS_FREE
) ? 0 : SMALL_BYTES_FOR_MSIZE(msize_and_free
);
3156 * If not page-aligned, it cannot have come from a large or huge allocation.
3158 if ((uintptr_t)ptr
& (vm_page_size
- 1))
3162 * Look for it in a large or huge entry.
3165 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
3167 size
= LARGE_ENTRY_SIZE(*entry
);
3168 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
3171 SZONE_UNLOCK(szone
);
3173 if (LOG(szone
, ptr
)) {
3174 malloc_printf("szone_size for %p returned %d\n", ptr
, (unsigned)size
);
3181 szone_realloc(szone_t
*szone
, void *ptr
, size_t new_size
)
3187 if (LOG(szone
, ptr
)) {
3188 malloc_printf("in szone_realloc for %p, %d\n", ptr
, (unsigned)new_size
);
3192 ptr
= szone_malloc(szone
, new_size
);
3195 old_size
= szone_size(szone
, ptr
);
3197 szone_error(szone
, "pointer being reallocated was not allocated", ptr
);
3200 /* we never shrink an allocation */
3201 if (old_size
>= new_size
)
3205 * If the old and new sizes both suit the tiny allocator, try to reallocate in-place.
3207 if ((new_size
+ TINY_QUANTUM
- 1) <= 31 * TINY_QUANTUM
) {
3208 if (try_realloc_tiny_in_place(szone
, ptr
, old_size
, new_size
)) {
3213 * If the old and new sizes both suit the small allocator, and we're not protecting the
3214 * small allocations, try to reallocate in-place.
3216 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) &&
3217 ((new_size
+ SMALL_QUANTUM
- 1) < LARGE_THRESHOLD
) &&
3218 (old_size
> 31 * TINY_QUANTUM
)) {
3219 if (try_realloc_small_in_place(szone
, ptr
, old_size
, new_size
)) {
3224 * If the allocation's a large or huge allocation, try to reallocate in-place there.
3226 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (old_size
> LARGE_THRESHOLD
)) {
3227 if (try_realloc_large_or_huge_in_place(szone
, ptr
, old_size
, new_size
)) {
3233 * Can't reallocate in place for whatever reason; allocate a new buffer and copy.
3235 new_ptr
= szone_malloc(szone
, new_size
);
3236 if (new_ptr
== NULL
)
3240 * If the allocation's large enough, try to copy using VM. If that fails, or
3241 * if it's too small, just copy by hand.
3243 if ((old_size
< VM_COPY_THRESHOLD
) ||
3244 vm_copy(mach_task_self(), (vm_address_t
)ptr
, old_size
, (vm_address_t
)new_ptr
))
3245 memcpy(new_ptr
, ptr
, old_size
);
3246 szone_free(szone
, ptr
);
3249 if (LOG(szone
, ptr
)) {
3250 malloc_printf("szone_realloc returned %p for %d\n", new_ptr
, (unsigned)new_size
);
3256 // given a size, returns pointers capable of holding that size
3257 // returns the number of pointers allocated
3258 // may return 0 - this function will do best attempts, but just that
3260 szone_batch_malloc(szone_t
*szone
, size_t size
, void **results
, unsigned count
)
3262 msize_t msize
= TINY_MSIZE_FOR_BYTES(size
+ TINY_QUANTUM
- 1);
3263 size_t chunk_size
= TINY_BYTES_FOR_MSIZE(msize
);
3264 free_list_t
**free_list
= szone
->tiny_free_list
+ msize
- 1;
3265 free_list_t
*ptr
= *free_list
;
3268 if (size
> 31*TINY_QUANTUM
)
3269 return 0; // only bother implementing this for tiny
3272 CHECK(szone
, __PRETTY_FUNCTION__
);
3273 SZONE_LOCK(szone
); // might as well lock right here to avoid concurrency issues
3274 while (found
< count
) {
3279 set_tiny_meta_header_in_use(ptr
, msize
);
3283 ptr
->previous
= NULL
;
3284 free_list_set_checksum(szone
, ptr
);
3288 // Note that we could allocate from the free lists for larger msize
3289 // But that may un-necessarily fragment - so we might as well let the client do that
3290 // We could also allocate from szone->tiny_bytes_free_at_end
3291 // But that means we'll "eat-up" the untouched area faster, increasing the working set
3292 // So we just return what we have and just that
3293 szone
->num_tiny_objects
+= found
;
3294 szone
->num_bytes_in_tiny_objects
+= chunk_size
* found
;
3295 SZONE_UNLOCK(szone
);
3300 szone_batch_free(szone_t
*szone
, void **to_be_freed
, unsigned count
)
3304 tiny_region_t
*tiny_region
;
3308 // frees all the pointers in to_be_freed
3309 // note that to_be_freed may be overwritten during the process
3312 CHECK(szone
, __PRETTY_FUNCTION__
);
3314 while (cc
< count
) {
3315 ptr
= to_be_freed
[cc
];
3316 /* XXX this really slows us down */
3317 tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
3319 // this is a tiny pointer
3320 msize
= get_tiny_meta_header(ptr
, &is_free
);
3322 break; // a double free; let the standard free deal with it
3323 tiny_free_no_lock(szone
, tiny_region
, ptr
, msize
);
3324 to_be_freed
[cc
] = NULL
;
3328 SZONE_UNLOCK(szone
);
3329 CHECK(szone
, __PRETTY_FUNCTION__
);
3331 ptr
= to_be_freed
[count
];
3333 szone_free(szone
, ptr
);
3338 szone_destroy(szone_t
*szone
)
3341 small_region_t pended_region
= 0;
3342 large_entry_t
*large
;
3343 vm_range_t range_to_deallocate
;
3345 tiny_region_t tiny_region
;
3346 small_region_t small_region
;
3348 /* destroy large entries */
3349 index
= szone
->num_large_entries
;
3351 large
= szone
->large_entries
+ index
;
3352 if (!LARGE_ENTRY_IS_EMPTY(*large
)) {
3353 // we deallocate_pages, including guard pages
3354 deallocate_pages(szone
, (void *)LARGE_ENTRY_ADDRESS(*large
), LARGE_ENTRY_SIZE(*large
), szone
->debug_flags
);
3357 if (szone
->num_large_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
) {
3358 // we do not free in the small chunk case
3359 large_entries_free_no_lock(szone
, szone
->large_entries
, szone
->num_large_entries
, &range_to_deallocate
);
3360 if (range_to_deallocate
.size
)
3361 deallocate_pages(szone
, (void *)range_to_deallocate
.address
, (size_t)range_to_deallocate
.size
, 0);
3364 /* destroy huge entries */
3365 index
= szone
->num_huge_entries
;
3367 huge
= szone
->huge_entries
+ index
;
3368 deallocate_pages(szone
, (void *)huge
->address
, huge
->size
, szone
->debug_flags
);
3371 /* destroy tiny regions */
3372 index
= szone
->num_tiny_regions
;
3374 tiny_region
= szone
->tiny_regions
[index
];
3375 deallocate_pages(szone
, TINY_REGION_ADDRESS(tiny_region
), TINY_REGION_SIZE
, 0);
3377 /* destroy small regions; region 0 must go last as it contains the szone */
3378 index
= szone
->num_small_regions
;
3380 small_region
= szone
->small_regions
[index
];
3382 * If we've allocated more than the basic set of small regions, avoid destroying the
3383 * region that contains the array.
3386 (SMALL_REGION_FOR_PTR(szone
->small_regions
) == SMALL_REGION_ADDRESS(small_region
))) {
3387 pended_region
= small_region
;
3389 deallocate_pages(szone
, (void *)SMALL_REGION_ADDRESS(small_region
), SMALL_REGION_SIZE
, 0);
3393 deallocate_pages(NULL
, (void *)SMALL_REGION_ADDRESS(pended_region
), SMALL_REGION_SIZE
, 0);
3397 szone_good_size(szone_t
*szone
, size_t size
)
3402 if (size
<= 31 * TINY_QUANTUM
) {
3404 msize
= TINY_MSIZE_FOR_BYTES(size
+ TINY_QUANTUM
- 1);
3405 if (! msize
) msize
= 1;
3406 return TINY_BYTES_FOR_MSIZE(msize
<< SHIFT_TINY_QUANTUM
);
3408 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
3410 msize
= SMALL_MSIZE_FOR_BYTES(size
+ SMALL_QUANTUM
- 1);
3411 if (! msize
) msize
= 1;
3412 return SMALL_BYTES_FOR_MSIZE(msize
);
3414 num_pages
= round_page(size
) >> vm_page_shift
;
3416 num_pages
= 1; // minimal allocation size for this
3417 return num_pages
<< vm_page_shift
;
3421 unsigned szone_check_counter
= 0;
3422 unsigned szone_check_start
= 0;
3423 unsigned szone_check_modulo
= 1;
3426 szone_check_all(szone_t
*szone
, const char *function
)
3429 tiny_region_t
*tiny
;
3430 small_region_t
*small
;
3433 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
3435 /* check tiny regions - chould check region count */
3436 for (index
= szone
->num_tiny_regions
- 1, tiny
= szone
->tiny_regions
;
3439 if (!tiny_check_region(szone
, tiny
)) {
3440 SZONE_UNLOCK(szone
);
3441 szone
->debug_flags
&= ~ CHECK_REGIONS
;
3442 malloc_printf("*** tiny region %d incorrect szone_check_all(%s) counter=%d\n",
3443 szone
->num_tiny_regions
- index
, function
, szone_check_counter
);
3444 szone_error(szone
, "check: tiny region incorrect", NULL
);
3449 for (index
= NUM_TINY_SLOTS
- 1; index
>= 0; index
--) {
3450 if (!tiny_free_list_check(szone
, index
)) {
3451 SZONE_UNLOCK(szone
);
3452 szone
->debug_flags
&= ~ CHECK_REGIONS
;
3453 malloc_printf("*** tiny free list incorrect (slot=%d) szone_check_all(%s) counter=%d\n",
3454 index
, function
, szone_check_counter
);
3455 szone_error(szone
, "check: tiny free list incorrect", NULL
);
3460 /* check small regions - could check region count */
3461 for (index
= szone
->num_small_regions
- 1, small
= szone
->small_regions
;
3464 if (!szone_check_small_region(szone
, small
)) {
3465 SZONE_UNLOCK(szone
);
3466 szone
->debug_flags
&= ~ CHECK_REGIONS
;
3467 malloc_printf("*** small region %d incorrect szone_check_all(%s) counter=%d\n",
3468 szone
->num_small_regions
, index
, function
, szone_check_counter
);
3469 szone_error(szone
, "check: small region incorrect", NULL
);
3473 for (index
= NUM_SMALL_SLOTS
- 1; index
>= 0; index
--) {
3474 if (!small_free_list_check(szone
, index
)) {
3475 SZONE_UNLOCK(szone
);
3476 szone
->debug_flags
&= ~ CHECK_REGIONS
;
3477 malloc_printf("*** small free list incorrect (grain=%d) szone_check_all(%s) counter=%d\n", index
, function
, szone_check_counter
);
3478 szone_error(szone
, "check: small free list incorrect", NULL
);
3482 SZONE_UNLOCK(szone
);
3483 // szone_print(szone, 1);
3488 szone_check(szone_t
*szone
)
3491 if ((++szone_check_counter
% 10000) == 0)
3492 malloc_printf("at szone_check counter=%d\n", szone_check_counter
);
3493 if (szone_check_counter
< szone_check_start
)
3495 if (szone_check_counter
% szone_check_modulo
)
3497 return szone_check_all(szone
, "");
3500 static kern_return_t
3501 szone_ptr_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t zone_address
, memory_reader_t reader
, vm_range_recorder_t recorder
)
3506 if (!reader
) reader
= _szone_default_reader
;
3507 err
= reader(task
, zone_address
, sizeof(szone_t
), (void **)&szone
);
3508 if (err
) return err
;
3509 err
= tiny_in_use_enumerator(task
, context
, type_mask
,
3510 (vm_address_t
)szone
->tiny_regions
, szone
->num_tiny_regions
, szone
->tiny_bytes_free_at_end
, reader
, recorder
);
3511 if (err
) return err
;
3512 err
= small_in_use_enumerator(task
, context
, type_mask
,
3513 (vm_address_t
)szone
->small_regions
, szone
->num_small_regions
, szone
->small_bytes_free_at_end
, reader
, recorder
);
3514 if (err
) return err
;
3515 err
= large_in_use_enumerator(task
, context
, type_mask
,
3516 (vm_address_t
)szone
->large_entries
, szone
->num_large_entries
, reader
,
3518 if (err
) return err
;
3519 err
= huge_in_use_enumerator(task
, context
, type_mask
,
3520 (vm_address_t
)szone
->huge_entries
, szone
->num_huge_entries
, reader
,
3525 // Following method is deprecated: use scalable_zone_statistics instead
3527 scalable_zone_info(malloc_zone_t
*zone
, unsigned *info_to_fill
, unsigned count
)
3529 szone_t
*szone
= (void *)zone
;
3532 // We do not lock to facilitate debug
3533 info
[4] = szone
->num_tiny_objects
;
3534 info
[5] = szone
->num_bytes_in_tiny_objects
;
3535 info
[6] = szone
->num_small_objects
;
3536 info
[7] = szone
->num_bytes_in_small_objects
;
3537 info
[8] = szone
->num_large_objects_in_use
;
3538 info
[9] = szone
->num_bytes_in_large_objects
;
3539 info
[10] = szone
->num_huge_entries
;
3540 info
[11] = szone
->num_bytes_in_huge_objects
;
3541 info
[12] = szone
->debug_flags
;
3542 info
[0] = info
[4] + info
[6] + info
[8] + info
[10];
3543 info
[1] = info
[5] + info
[7] + info
[9] + info
[11];
3544 info
[3] = szone
->num_tiny_regions
* TINY_REGION_SIZE
+ szone
->num_small_regions
* SMALL_REGION_SIZE
+ info
[9] + info
[11];
3545 info
[2] = info
[3] - szone
->tiny_bytes_free_at_end
- szone
->small_bytes_free_at_end
;
3546 memcpy(info_to_fill
, info
, sizeof(unsigned)*count
);
3550 szone_print(szone_t
*szone
, boolean_t verbose
)
3554 tiny_region_t
*region
;
3557 scalable_zone_info((void *)szone
, info
, 13);
3558 malloc_printf("Scalable zone %p: inUse=%d(%y) touched=%y allocated=%y flags=%d\n",
3559 szone
, info
[0], info
[1], info
[2], info
[3], info
[12]);
3560 malloc_printf("\ttiny=%d(%y) small=%d(%y) large=%d(%y) huge=%d(%y)\n",
3561 info
[4], info
[5], info
[6], info
[7], info
[8], info
[9], info
[10], info
[11]);
3563 malloc_printf("%d tiny regions: \n", szone
->num_tiny_regions
);
3564 while (index
< szone
->num_tiny_regions
) {
3565 region
= szone
->tiny_regions
+ index
;
3566 print_tiny_region(verbose
, *region
, (index
== szone
->num_tiny_regions
- 1) ? szone
->tiny_bytes_free_at_end
: 0);
3569 if (verbose
) print_tiny_free_list(szone
);
3571 malloc_printf("%d small regions: \n", szone
->num_small_regions
);
3573 while (index
< szone
->num_small_regions
) {
3574 region
= szone
->small_regions
+ index
;
3575 print_small_region(szone
, verbose
, region
, (index
== szone
->num_small_regions
- 1) ? szone
->small_bytes_free_at_end
: 0);
3579 print_small_free_list(szone
);
3580 SZONE_UNLOCK(szone
);
3584 szone_log(malloc_zone_t
*zone
, void *log_address
)
3586 szone_t
*szone
= (szone_t
*)zone
;
3588 szone
->log_address
= log_address
;
3592 szone_force_lock(szone_t
*szone
)
3598 szone_force_unlock(szone_t
*szone
)
3600 SZONE_UNLOCK(szone
);
3604 scalable_zone_statistics(malloc_zone_t
*zone
, malloc_statistics_t
*stats
, unsigned subzone
)
3606 szone_t
*szone
= (szone_t
*)zone
;
3610 stats
->blocks_in_use
= szone
->num_tiny_objects
;
3611 stats
->size_in_use
= szone
->num_bytes_in_tiny_objects
;
3612 stats
->size_allocated
= szone
->num_tiny_regions
* TINY_REGION_SIZE
;
3613 stats
->max_size_in_use
= stats
->size_allocated
- szone
->tiny_bytes_free_at_end
;
3616 stats
->blocks_in_use
= szone
->num_small_objects
;
3617 stats
->size_in_use
= szone
->num_bytes_in_small_objects
;
3618 stats
->size_allocated
= szone
->num_small_regions
* SMALL_REGION_SIZE
;
3619 stats
->max_size_in_use
= stats
->size_allocated
- szone
->small_bytes_free_at_end
;
3622 stats
->blocks_in_use
= szone
->num_large_objects_in_use
;
3623 stats
->size_in_use
= szone
->num_bytes_in_large_objects
;
3624 stats
->max_size_in_use
= stats
->size_allocated
= stats
->size_in_use
;
3627 stats
->blocks_in_use
= szone
->num_huge_entries
;
3628 stats
->size_in_use
= szone
->num_bytes_in_huge_objects
;
3629 stats
->max_size_in_use
= stats
->size_allocated
= stats
->size_in_use
;
3636 szone_statistics(szone_t
*szone
, malloc_statistics_t
*stats
)
3638 size_t big_and_huge
;
3640 stats
->blocks_in_use
=
3641 szone
->num_tiny_objects
+
3642 szone
->num_small_objects
+
3643 szone
->num_large_objects_in_use
+
3644 szone
->num_huge_entries
;
3645 big_and_huge
= szone
->num_bytes_in_large_objects
+ szone
->num_bytes_in_huge_objects
;
3646 stats
->size_in_use
= szone
->num_bytes_in_tiny_objects
+ szone
->num_bytes_in_small_objects
+ big_and_huge
;
3647 stats
->max_size_in_use
= stats
->size_allocated
=
3648 szone
->num_tiny_regions
* TINY_REGION_SIZE
+
3649 szone
->num_small_regions
* SMALL_REGION_SIZE
+
3652 // Now we account for the untouched areas
3653 stats
->max_size_in_use
-= szone
->tiny_bytes_free_at_end
;
3654 stats
->max_size_in_use
-= szone
->small_bytes_free_at_end
;
3657 static const struct malloc_introspection_t szone_introspect
= {
3658 (void *)szone_ptr_in_use_enumerator
,
3659 (void *)szone_good_size
,
3660 (void *)szone_check
,
3661 (void *)szone_print
,
3663 (void *)szone_force_lock
,
3664 (void *)szone_force_unlock
,
3665 (void *)szone_statistics
3666 }; // marked as const to spare the DATA section
3669 create_scalable_zone(size_t initial_size
, unsigned debug_flags
)
3677 * Sanity-check our build-time assumptions about the size of a page.
3678 * Since we have sized various things assuming the default page size,
3679 * attempting to determine it dynamically is not useful.
3681 if ((vm_page_size
!= _vm_page_size
) || (vm_page_shift
!= _vm_page_shift
)) {
3682 malloc_printf("*** FATAL ERROR - machine page size does not match our assumptions.\n");
3686 /* get memory for the zone */
3687 szone
= allocate_pages(NULL
, SMALL_REGION_SIZE
, SMALL_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC
));
3690 /* set up the szone structure */
3691 szone
->tiny_regions
= szone
->initial_tiny_regions
;
3692 szone
->small_regions
= szone
->initial_small_regions
;
3693 msize_used
= msize
; szone
->num_small_objects
++;
3694 szone
->basic_zone
.version
= 3;
3695 szone
->basic_zone
.size
= (void *)szone_size
;
3696 szone
->basic_zone
.malloc
= (void *)szone_malloc
;
3697 szone
->basic_zone
.calloc
= (void *)szone_calloc
;
3698 szone
->basic_zone
.valloc
= (void *)szone_valloc
;
3699 szone
->basic_zone
.free
= (void *)szone_free
;
3700 szone
->basic_zone
.realloc
= (void *)szone_realloc
;
3701 szone
->basic_zone
.destroy
= (void *)szone_destroy
;
3702 szone
->basic_zone
.batch_malloc
= (void *)szone_batch_malloc
;
3703 szone
->basic_zone
.batch_free
= (void *)szone_batch_free
;
3704 szone
->basic_zone
.introspect
= (struct malloc_introspection_t
*)&szone_introspect
;
3705 szone
->debug_flags
= debug_flags
;
3707 /* as the szone is allocated out of the first tiny, region, reflect that allocation */
3708 szone
->small_regions
[0] = szone
;
3709 szone
->num_small_regions
= 1;
3710 msize
= SMALL_MSIZE_FOR_BYTES(sizeof(szone_t
) + SMALL_QUANTUM
- 1);
3711 free_msize
= NUM_SMALL_BLOCKS
- msize
;
3712 *SMALL_METADATA_FOR_PTR(szone
) = msize
;
3713 *(SMALL_METADATA_FOR_PTR(szone
) + msize
) = free_msize
;
3714 szone
->small_bytes_free_at_end
= SMALL_BYTES_FOR_MSIZE(free_msize
);
3716 LOCK_INIT(szone
->lock
);
3718 #warning CHECK_REGIONS enabled
3719 debug_flags
|= CHECK_REGIONS
;
3722 #warning LOG enabled
3723 szone
->log_address
= ~0;
3725 CHECK(szone
, __PRETTY_FUNCTION__
);
3726 return (malloc_zone_t
*)szone
;
3729 /********* Support code for emacs unexec ************/
3731 /* History of freezedry version numbers:
3733 * 1) Old malloc (before the scalable malloc implementation in this file
3735 * 2) Original freezedrying code for scalable malloc. This code was apparently
3736 * based on the old freezedrying code and was fundamentally flawed in its
3737 * assumption that tracking allocated memory regions was adequate to fake
3738 * operations on freezedried memory. This doesn't work, since scalable
3739 * malloc does not store flags in front of large page-aligned allocations.
3740 * 3) Original szone-based freezedrying code.
3741 * 4) Fresher malloc with tiny zone
3742 * 5) 32/64bit compatible malloc
3744 * No version backward compatibility is provided, but the version number does
3745 * make it possible for malloc_jumpstart() to return an error if the application
3746 * was freezedried with an older version of malloc.
3748 #define MALLOC_FREEZEDRY_VERSION 5
3757 frozen_malloc(szone_t
*zone
, size_t new_size
)
3759 return malloc(new_size
);
3763 frozen_calloc(szone_t
*zone
, size_t num_items
, size_t size
)
3765 return calloc(num_items
, size
);
3769 frozen_valloc(szone_t
*zone
, size_t new_size
)
3771 return valloc(new_size
);
3775 frozen_realloc(szone_t
*zone
, void *ptr
, size_t new_size
)
3777 size_t old_size
= szone_size(zone
, ptr
);
3780 if (new_size
<= old_size
) {
3783 new_ptr
= malloc(new_size
);
3785 memcpy(new_ptr
, ptr
, old_size
);
3791 frozen_free(szone_t
*zone
, void *ptr
)
3796 frozen_destroy(szone_t
*zone
)
3800 /********* Pseudo-private API for emacs unexec ************/
3803 * malloc_freezedry() records all of the szones in use, so that they can be
3804 * partially reconstituted by malloc_jumpstart(). Due to the differences
3805 * between reconstituted memory regions and those created by the szone code,
3806 * care is taken not to reallocate from the freezedried memory, except in the
3807 * case of a non-growing realloc().
3809 * Due to the flexibility provided by the zone registration mechanism, it is
3810 * impossible to implement generic freezedrying for any zone type. This code
3811 * only handles applications that use the szone allocator, so malloc_freezedry()
3812 * returns 0 (error) if any non-szone zones are encountered.
3816 malloc_freezedry(void)
3818 extern unsigned malloc_num_zones
;
3819 extern malloc_zone_t
**malloc_zones
;
3820 malloc_frozen
*data
;
3823 /* Allocate space in which to store the freezedry state. */
3824 data
= (malloc_frozen
*) malloc(sizeof(malloc_frozen
));
3826 /* Set freezedry version number so that malloc_jumpstart() can check for compatibility. */
3827 data
->version
= MALLOC_FREEZEDRY_VERSION
;
3829 /* Allocate the array of szone pointers. */
3830 data
->nszones
= malloc_num_zones
;
3831 data
->szones
= (szone_t
*) calloc(malloc_num_zones
, sizeof(szone_t
));
3834 * Fill in the array of szone structures. They are copied rather than
3835 * referenced, since the originals are likely to be clobbered during malloc
3838 for (i
= 0; i
< malloc_num_zones
; i
++) {
3839 if (strcmp(malloc_zones
[i
]->zone_name
, "DefaultMallocZone")) {
3840 /* Unknown zone type. */
3845 memcpy(&data
->szones
[i
], malloc_zones
[i
], sizeof(szone_t
));
3848 return((uintptr_t)data
);
3852 malloc_jumpstart(uintptr_t cookie
)
3854 malloc_frozen
*data
= (malloc_frozen
*)cookie
;
3857 if (data
->version
!= MALLOC_FREEZEDRY_VERSION
) {
3858 /* Unsupported freezedry version. */
3862 for (i
= 0; i
< data
->nszones
; i
++) {
3863 /* Set function pointers. Even the functions that stay the same must be
3864 * set, since there are no guarantees that they will be mapped to the
3865 * same addresses. */
3866 data
->szones
[i
].basic_zone
.size
= (void *) szone_size
;
3867 data
->szones
[i
].basic_zone
.malloc
= (void *) frozen_malloc
;
3868 data
->szones
[i
].basic_zone
.calloc
= (void *) frozen_calloc
;
3869 data
->szones
[i
].basic_zone
.valloc
= (void *) frozen_valloc
;
3870 data
->szones
[i
].basic_zone
.free
= (void *) frozen_free
;
3871 data
->szones
[i
].basic_zone
.realloc
= (void *) frozen_realloc
;
3872 data
->szones
[i
].basic_zone
.destroy
= (void *) frozen_destroy
;
3873 data
->szones
[i
].basic_zone
.introspect
= (struct malloc_introspection_t
*)&szone_introspect
;
3875 /* Register the freezedried zone. */
3876 malloc_zone_register(&data
->szones
[i
].basic_zone
);