2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 /* Author: Bertrand Serlet, August 1999 */
26 #include "scalable_malloc.h"
28 #include <pthread_internals.h>
32 #include <mach/vm_statistics.h>
33 #include <mach/mach_init.h>
35 /********************* DEFINITIONS ************************/
37 #define DEBUG_MALLOC 0 // set to one to debug malloc itself
39 #define DEBUG_CLIENT 0 // set to one to help debug a nasty memory smasher
42 #warning DEBUG_MALLOC ENABLED
45 # define CHECK_LOCKED(szone, fun) \
47 if (__is_threaded && TRY_LOCK(szone->lock)) { \
48 malloc_printf("*** lock was not set %p in %s\n", szone->lock, fun); \
52 # define INLINE __inline__
53 # define ALWAYSINLINE __attribute__((always_inline))
54 # define CHECK_LOCKED(szone, fun) {}
58 * Access to global variables is slow, so optimise our handling of vm_page_size
61 #define _vm_page_size vm_page_size /* to get to the originals */
62 #define _vm_page_shift vm_page_shift
63 #define vm_page_size 4096 /* our normal working sizes */
64 #define vm_page_shift 12
66 typedef unsigned short msize_t
; // a size in multiples of SHIFT_SMALL_QUANTUM or SHIFT_TINY_QUANTUM
69 * Note that in the LP64 case, this is 24 bytes, necessitating the 32-byte tiny grain size.
78 uintptr_t address_and_num_pages
;
79 // this type represents both an address and a number of pages
80 // the low bits are the number of pages; the high bits are the address
81 // note that the exact number of bits used for depends on the page size
82 // also, this cannot represent pointers larger than 1 << (vm_page_shift * 2)
85 typedef unsigned char grain_t
;
87 #define CHECK_REGIONS (1 << 31)
90 # define CHECKSUM_MAGIC 0xdeadbeef0badc0de
92 # define CHECKSUM_MAGIC 0x357B
95 #define MAX_RECORDER_BUFFER 256
97 /********************* DEFINITIONS for tiny ************************/
100 * Memory in the Tiny range is allocated from regions (heaps) pointed to by the szone's tiny_regions
103 * Each region is laid out as a heap (1MB in 32-bit mode, 2MB in 64-bit mode), followed by a header
104 * block. The header block is arranged:
109 * 0xffffffff pad word
113 * pad word (not written)
115 * Each bitfield comprises NUM_TINY_BLOCKS bits, and refers to the corresponding TINY_QUANTUM block
118 * The bitfields are used to encode the state of memory within the heap. The header bit indicates
119 * that the corresponding quantum is the first quantum in a block (either in use or free). The
120 * in-use bit is set for the header if the block has been handed out (allocated). If the header
121 * bit is not set, the in-use bit is invalid.
123 * The szone maintains an array of 32 freelists, each of which is used to hold free objects
124 * of the corresponding quantum size.
126 * A free block is laid out as:
128 * Offset (32-bit mode) (64-bit mode)
136 * size (in quantum counts)
138 * size (in quantum counts)
141 * All fields are pointer-sized, except for the size which is an unsigned short.
146 # define SHIFT_TINY_QUANTUM 5 // Required to fit free_list_t
148 # define SHIFT_TINY_QUANTUM 4 // Required for AltiVec
150 #define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM)
152 #define FOLLOWING_TINY_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM))
154 #define NUM_TINY_SLOTS 32 // number of slots for free-lists
156 #define SHIFT_NUM_TINY_BLOCKS 16
157 #define NUM_TINY_BLOCKS (1 << SHIFT_NUM_TINY_BLOCKS)
158 #define TINY_BLOCKS_ALIGN (SHIFT_NUM_TINY_BLOCKS + SHIFT_TINY_QUANTUM)
161 * Enough room for the data, followed by the bit arrays (2-bits per block) plus 2 words of padding
162 * as our bitmap operators overflow, plus rounding to the nearest page.
164 #define TINY_REGION_SIZE ((NUM_TINY_BLOCKS * TINY_QUANTUM + (NUM_TINY_BLOCKS >> 2) + 8 + vm_page_size - 1) & ~ (vm_page_size - 1))
167 * Obtain the size of a free tiny block (in msize_t units).
170 # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[14])
172 # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[6])
175 * The size is also kept at the very end of a free block.
177 #define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
180 * Beginning and end pointers for a region's heap.
182 #define TINY_REGION_ADDRESS(region) ((void *)(region))
183 #define TINY_REGION_END(region) (TINY_REGION_ADDRESS(region) + (1 << TINY_BLOCKS_ALIGN))
186 * Locate the heap base for a pointer known to be within a tiny region.
188 #define TINY_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << TINY_BLOCKS_ALIGN) - 1)))
191 * Convert between byte and msize units.
193 #define TINY_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_TINY_QUANTUM)
194 #define TINY_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_TINY_QUANTUM)
197 * Locate the block header for a pointer known to be within a tiny region.
199 #define TINY_BLOCK_HEADER_FOR_PTR(_p) ((void *)(((((uintptr_t)(_p)) >> TINY_BLOCKS_ALIGN) + 1) << TINY_BLOCKS_ALIGN))
202 * Locate the inuse map for a given block header pointer.
204 #define TINY_INUSE_FOR_HEADER(_h) ((void *)((uintptr_t)(_h) + (NUM_TINY_BLOCKS >> 3) + 4))
207 * Compute the bitmap index for a pointer known to be within a tiny region.
209 #define TINY_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_TINY_QUANTUM) & (NUM_TINY_BLOCKS - 1))
211 typedef void *tiny_region_t
;
213 #define INITIAL_NUM_TINY_REGIONS 24 // must be even for szone to be aligned
215 #define TINY_CACHE 1 // This governs a last-free cache of 1 that bypasses the free-list
218 #warning TINY_CACHE turned off
221 /********************* DEFINITIONS for small ************************/
224 * Memory in the Small range is allocated from regions (heaps) pointed to by the szone's small_regions
227 * Each region is laid out as a heap (8MB in 32-bit and 64-bit mode), followed by the metadata array.
228 * The array is arranged as an array of shorts, one for each SMALL_QUANTUM in the heap.
230 * The MSB of each short is set for the first quantum in a free block. The low 15 bits encode the
231 * block size (in SMALL_QUANTUM units), or are zero if the quantum is not the first in a block.
233 * The szone maintains an array of 32 freelists, each of which is used to hold free objects
234 * of the corresponding quantum size.
236 * A free block is laid out as:
238 * Offset (32-bit mode) (64-bit mode)
246 * size (in quantum counts)
248 * size (in quantum counts)
251 * All fields are pointer-sized, except for the size which is an unsigned short.
255 #define SMALL_IS_FREE (1 << 15)
257 #define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9
258 #define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes
260 #define FOLLOWING_SMALL_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM))
262 #define NUM_SMALL_SLOTS 32 // number of slots for free-lists
265 * We can only represent up to 1<<15 for msize; but we choose to stay even below that to avoid the
266 * convention msize=0 => msize = (1<<15)
268 #define SHIFT_NUM_SMALL_BLOCKS 14
269 #define NUM_SMALL_BLOCKS (1 << SHIFT_NUM_SMALL_BLOCKS)
270 #define SMALL_BLOCKS_ALIGN (SHIFT_NUM_SMALL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23
271 #define SMALL_REGION_SIZE (NUM_SMALL_BLOCKS * SMALL_QUANTUM + NUM_SMALL_BLOCKS * 2) // data + meta data
273 #define SMALL_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
276 * Convert between byte and msize units.
278 #define SMALL_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_SMALL_QUANTUM)
279 #define SMALL_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_SMALL_QUANTUM)
282 #define SMALL_REGION_ADDRESS(region) ((unsigned char *)region)
283 #define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region) + (1 << SMALL_BLOCKS_ALIGN))
286 * Locate the heap base for a pointer known to be within a small region.
288 #define SMALL_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << SMALL_BLOCKS_ALIGN) - 1)))
291 * Locate the metadata base for a pointer known to be within a small region.
293 #define SMALL_META_HEADER_FOR_PTR(_p) ((msize_t *)(((((uintptr_t)(_p)) >> SMALL_BLOCKS_ALIGN) + 1) << SMALL_BLOCKS_ALIGN))
296 * Compute the metadata index for a pointer known to be within a small region.
298 #define SMALL_META_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_SMALL_QUANTUM) & (NUM_SMALL_BLOCKS - 1))
301 * Find the metadata word for a pointer known to be within a small region.
303 #define SMALL_METADATA_FOR_PTR(_p) (SMALL_META_HEADER_FOR_PTR(_p) + SMALL_META_INDEX_FOR_PTR(_p))
306 * Determine whether a pointer known to be within a small region points to memory which is free.
308 #define SMALL_PTR_IS_FREE(_p) (*SMALL_METADATA_FOR_PTR(_p) & SMALL_IS_FREE)
311 * Extract the msize value for a pointer known to be within a small region.
313 #define SMALL_PTR_SIZE(_p) (*SMALL_METADATA_FOR_PTR(_p) & ~SMALL_IS_FREE)
315 typedef void * small_region_t
;
317 #define INITIAL_NUM_SMALL_REGIONS 6 // must be even for szone to be aligned
319 #define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use
321 #define SMALL_CACHE 1
323 #warning SMALL_CACHE turned off
326 /********************* DEFINITIONS for large ************************/
328 #define LARGE_THRESHOLD (15 * 1024) // at or above this use "large"
330 #if (LARGE_THRESHOLD > NUM_SMALL_SLOTS * SMALL_QUANTUM)
331 #error LARGE_THRESHOLD should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM
334 #define VM_COPY_THRESHOLD (40 * 1024)
335 // When all memory is touched after a copy, vm_copy() is always a lose
336 // But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages (on a G3/300MHz)
337 // This must be larger than LARGE_THRESHOLD
340 * Given a large_entry, return the address of the allocated block.
342 #define LARGE_ENTRY_ADDRESS(entry) \
343 (void *)(((entry).address_and_num_pages >> vm_page_shift) << vm_page_shift)
346 * Given a large entry, return the number of pages or bytes in the allocated block.
348 #define LARGE_ENTRY_NUM_PAGES(entry) \
349 ((entry).address_and_num_pages & (vm_page_size - 1))
350 #define LARGE_ENTRY_SIZE(entry) \
351 (LARGE_ENTRY_NUM_PAGES(entry) << vm_page_shift)
354 * Compare a pointer with a large entry.
356 #define LARGE_ENTRY_MATCHES(entry,ptr) \
357 ((((entry).address_and_num_pages - (uintptr_t)(ptr)) >> vm_page_shift) == 0)
359 #define LARGE_ENTRY_IS_EMPTY(entry) (((entry).address_and_num_pages) == 0)
361 typedef compact_range_t large_entry_t
;
363 /********************* DEFINITIONS for huge ************************/
365 typedef vm_range_t huge_entry_t
;
367 /********************* zone itself ************************/
370 malloc_zone_t basic_zone
;
372 unsigned debug_flags
;
375 /* Regions for tiny objects */
376 unsigned num_tiny_regions
;
377 tiny_region_t
*tiny_regions
;
378 void *last_tiny_free
; // low SHIFT_TINY_QUANTUM indicate the msize
379 unsigned tiny_bitmap
; // cache of the 32 free lists
380 free_list_t
*tiny_free_list
[NUM_TINY_SLOTS
]; // 31 free lists for 1*TINY_QUANTUM to 31*TINY_QUANTUM plus 1 for larger than 32*SMALL_QUANTUM
381 size_t tiny_bytes_free_at_end
; // the last free region in the last block is treated as a big block in use that is not accounted for
382 unsigned num_tiny_objects
;
383 size_t num_bytes_in_tiny_objects
;
385 /* Regions for small objects */
386 unsigned num_small_regions
;
387 small_region_t
*small_regions
;
388 void *last_small_free
; // low SHIFT_SMALL_QUANTUM indicate the msize
389 unsigned small_bitmap
; // cache of the free list
390 free_list_t
*small_free_list
[NUM_SMALL_SLOTS
];
391 size_t small_bytes_free_at_end
; // the last free region in the last block is treated as a big block in use that is not accounted for
392 unsigned num_small_objects
;
393 size_t num_bytes_in_small_objects
;
395 /* large objects: vm_page_shift <= log2(size) < 2 *vm_page_shift */
396 unsigned num_large_objects_in_use
;
397 unsigned num_large_entries
;
398 large_entry_t
*large_entries
; // hashed by location; null entries don't count
399 size_t num_bytes_in_large_objects
;
401 /* huge objects: log2(size) >= 2 *vm_page_shift */
402 unsigned num_huge_entries
;
403 huge_entry_t
*huge_entries
;
404 size_t num_bytes_in_huge_objects
;
406 /* Initial region list */
407 tiny_region_t initial_tiny_regions
[INITIAL_NUM_TINY_REGIONS
];
408 small_region_t initial_small_regions
[INITIAL_NUM_SMALL_REGIONS
];
411 #if DEBUG_MALLOC || DEBUG_CLIENT
412 static void szone_sleep(void);
414 static void szone_error(szone_t
*szone
, const char *msg
, const void *ptr
);
415 static void protect(szone_t
*szone
, void *address
, size_t size
, unsigned protection
, unsigned debug_flags
);
416 static void *allocate_pages(szone_t
*szone
, size_t size
, unsigned char align
, unsigned debug_flags
, int vm_page_label
);
417 static void deallocate_pages(szone_t
*szone
, void *addr
, size_t size
, unsigned debug_flags
);
418 static kern_return_t
_szone_default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
);
420 static INLINE
void free_list_checksum(szone_t
*szone
, free_list_t
*ptr
, const char *msg
) ALWAYSINLINE
;
421 static INLINE
void free_list_set_checksum(szone_t
*szone
, free_list_t
*ptr
) ALWAYSINLINE
;
422 static unsigned free_list_count(const free_list_t
*ptr
);
424 static INLINE msize_t
get_tiny_meta_header(const void *ptr
, boolean_t
*is_free
) ALWAYSINLINE
;
425 static INLINE
void set_tiny_meta_header_in_use(const void *ptr
, msize_t msize
) ALWAYSINLINE
;
426 static INLINE
void set_tiny_meta_header_middle(const void *ptr
) ALWAYSINLINE
;
427 static INLINE
void set_tiny_meta_header_free(const void *ptr
, msize_t msize
) ALWAYSINLINE
;
428 static INLINE boolean_t
tiny_meta_header_is_free(const void *ptr
) ALWAYSINLINE
;
429 static INLINE
void *tiny_previous_preceding_free(void *ptr
, msize_t
*prev_msize
) ALWAYSINLINE
;
430 static INLINE
void tiny_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) ALWAYSINLINE
;
431 static INLINE
void tiny_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
) ALWAYSINLINE
;
432 static INLINE tiny_region_t
*tiny_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
) ALWAYSINLINE
;
433 static INLINE
void tiny_free_no_lock(szone_t
*szone
, tiny_region_t
*region
, void *ptr
, msize_t msize
) ALWAYSINLINE
;
434 static void *tiny_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
);
435 static INLINE boolean_t
try_realloc_tiny_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) ALWAYSINLINE
;
436 static boolean_t
tiny_check_region(szone_t
*szone
, tiny_region_t
*region
);
437 static kern_return_t
tiny_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t tiny_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
);
438 static INLINE
void *tiny_malloc_from_free_list(szone_t
*szone
, msize_t msize
) ALWAYSINLINE
;
439 static INLINE
void *tiny_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
) ALWAYSINLINE
;
440 static INLINE
void free_tiny(szone_t
*szone
, void *ptr
, tiny_region_t
*tiny_region
) ALWAYSINLINE
;
441 static void print_tiny_free_list(szone_t
*szone
);
442 static void print_tiny_region(boolean_t verbose
, tiny_region_t region
, size_t bytes_at_end
);
443 static boolean_t
tiny_free_list_check(szone_t
*szone
, grain_t slot
);
445 static INLINE
void small_meta_header_set_is_free(msize_t
*meta_headers
, unsigned index
, msize_t msize
) ALWAYSINLINE
;
446 static INLINE
void small_meta_header_set_in_use(msize_t
*meta_headers
, msize_t index
, msize_t msize
) ALWAYSINLINE
;
447 static INLINE
void small_meta_header_set_middle(msize_t
*meta_headers
, msize_t index
) ALWAYSINLINE
;
448 static void small_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
);
449 static void small_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
);
450 static INLINE small_region_t
*small_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
) ALWAYSINLINE
;
451 static INLINE
void small_free_no_lock(szone_t
*szone
, small_region_t
*region
, void *ptr
, msize_t msize
) ALWAYSINLINE
;
452 static void *small_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
);
453 static INLINE boolean_t
try_realloc_small_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) ALWAYSINLINE
;
454 static boolean_t
szone_check_small_region(szone_t
*szone
, small_region_t
*region
);
455 static kern_return_t
small_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t small_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
);
456 static INLINE
void *small_malloc_from_free_list(szone_t
*szone
, msize_t msize
) ALWAYSINLINE
;
457 static INLINE
void *small_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
) ALWAYSINLINE
;
458 static INLINE
void *small_malloc_cleared_no_lock(szone_t
*szone
, msize_t msize
) ALWAYSINLINE
;
459 static INLINE
void free_small(szone_t
*szone
, void *ptr
, small_region_t
*small_region
) ALWAYSINLINE
;
460 static void print_small_free_list(szone_t
*szone
);
461 static void print_small_region(szone_t
*szone
, boolean_t verbose
, small_region_t
*region
, size_t bytes_at_end
);
462 static boolean_t
small_free_list_check(szone_t
*szone
, grain_t grain
);
465 static void large_debug_print(szone_t
*szone
);
467 static large_entry_t
*large_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
);
468 static void large_entry_insert_no_lock(szone_t
*szone
, large_entry_t range
);
469 static INLINE
void large_entries_rehash_after_entry_no_lock(szone_t
*szone
, large_entry_t
*entry
) ALWAYSINLINE
;
470 static INLINE large_entry_t
*large_entries_alloc_no_lock(szone_t
*szone
, unsigned num
) ALWAYSINLINE
;
471 static void large_entries_free_no_lock(szone_t
*szone
, large_entry_t
*entries
, unsigned num
, vm_range_t
*range_to_deallocate
);
472 static void large_entries_grow_no_lock(szone_t
*szone
, vm_range_t
*range_to_deallocate
);
473 static vm_range_t
large_free_no_lock(szone_t
*szone
, large_entry_t
*entry
);
474 static kern_return_t
large_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t large_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
);
475 static huge_entry_t
*huge_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
);
476 static boolean_t
huge_entry_append(szone_t
*szone
, huge_entry_t huge
);
477 static kern_return_t
huge_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t huge_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
);
478 static void *large_and_huge_malloc(szone_t
*szone
, unsigned num_pages
);
479 static INLINE
void free_large_or_huge(szone_t
*szone
, void *ptr
) ALWAYSINLINE
;
480 static INLINE
int try_realloc_large_or_huge_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
) ALWAYSINLINE
;
482 static void szone_free(szone_t
*szone
, void *ptr
);
483 static INLINE
void *szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
) ALWAYSINLINE
;
484 static void *szone_malloc(szone_t
*szone
, size_t size
);
485 static void *szone_calloc(szone_t
*szone
, size_t num_items
, size_t size
);
486 static void *szone_valloc(szone_t
*szone
, size_t size
);
487 static size_t szone_size(szone_t
*szone
, const void *ptr
);
488 static void *szone_realloc(szone_t
*szone
, void *ptr
, size_t new_size
);
489 static unsigned szone_batch_malloc(szone_t
*szone
, size_t size
, void **results
, unsigned count
);
490 static void szone_batch_free(szone_t
*szone
, void **to_be_freed
, unsigned count
);
491 static void szone_destroy(szone_t
*szone
);
492 static size_t szone_good_size(szone_t
*szone
, size_t size
);
494 static boolean_t
szone_check_all(szone_t
*szone
, const char *function
);
495 static boolean_t
szone_check(szone_t
*szone
);
496 static kern_return_t
szone_ptr_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t zone_address
, memory_reader_t reader
, vm_range_recorder_t recorder
);
497 static void szone_print(szone_t
*szone
, boolean_t verbose
);
498 static void szone_log(malloc_zone_t
*zone
, void *log_address
);
499 static void szone_force_lock(szone_t
*szone
);
500 static void szone_force_unlock(szone_t
*szone
);
502 static void szone_statistics(szone_t
*szone
, malloc_statistics_t
*stats
);
504 static void *frozen_malloc(szone_t
*zone
, size_t new_size
);
505 static void *frozen_calloc(szone_t
*zone
, size_t num_items
, size_t size
);
506 static void *frozen_valloc(szone_t
*zone
, size_t new_size
);
507 static void *frozen_realloc(szone_t
*zone
, void *ptr
, size_t new_size
);
508 static void frozen_free(szone_t
*zone
, void *ptr
);
509 static void frozen_destroy(szone_t
*zone
);
512 # define LOG(szone,ptr) \
513 (szone->log_address && (((uintptr_t)szone->log_address == -1) || (szone->log_address == (void *)(ptr))))
515 # define LOG(szone,ptr) 0
518 #define SZONE_LOCK(szone) \
523 #define SZONE_UNLOCK(szone) \
525 UNLOCK(szone->lock); \
528 #define LOCK_AND_NOTE_LOCKED(szone,locked) \
530 CHECK(szone, __PRETTY_FUNCTION__); \
531 locked = 1; SZONE_LOCK(szone); \
534 #if DEBUG_MALLOC || DEBUG_CLIENT
535 # define CHECK(szone,fun) \
536 if ((szone)->debug_flags & CHECK_REGIONS) szone_check_all(szone, fun)
538 # define CHECK(szone,fun) do {} while (0)
541 /********************* VERY LOW LEVEL UTILITIES ************************/
543 #if DEBUG_MALLOC || DEBUG_CLIENT
548 if (getenv("MallocErrorSleep")) {
549 malloc_printf("*** sleeping to help debug\n");
550 sleep(3600); // to help debug
556 szone_error(szone_t
*szone
, const char *msg
, const void *ptr
)
559 if (szone
) SZONE_UNLOCK(szone
);
561 malloc_printf("*** error for object %p: %s\n", ptr
, msg
);
563 malloc_printf("*** error: %s\n", msg
);
565 malloc_printf("*** set a breakpoint in szone_error to debug\n");
567 szone_print(szone
, 1);
576 protect(szone_t
*szone
, void *address
, size_t size
, unsigned protection
, unsigned debug_flags
)
580 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_PRELUDE
)) {
581 err
= vm_protect(mach_task_self(), (vm_address_t
)(uintptr_t)address
- vm_page_size
, vm_page_size
, 0, protection
);
583 malloc_printf("*** can't protect(%p) region for prelude guard page at %p\n",
584 protection
,address
- (1 << vm_page_shift
));
587 if (!(debug_flags
& SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE
)) {
588 err
= vm_protect(mach_task_self(), (vm_address_t
)(uintptr_t)address
+ size
, vm_page_size
, 0, protection
);
590 malloc_printf("*** can't protect(%p) region for postlude guard page at %p\n",
591 protection
, address
+ size
);
597 allocate_pages(szone_t
*szone
, size_t size
, unsigned char align
, unsigned debug_flags
, int vm_page_label
)
599 // align specifies a desired alignment (as a log) or 0 if no alignment requested
601 vm_address_t vm_addr
;
602 uintptr_t addr
, aligned_address
;
603 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
604 size_t allocation_size
= round_page(size
);
607 if (align
) add_guard_pages
= 0; // too cumbersome to deal with that
608 if (!allocation_size
) allocation_size
= 1 << vm_page_shift
;
609 if (add_guard_pages
) allocation_size
+= 2 * (1 << vm_page_shift
);
610 if (align
) allocation_size
+= (size_t)1 << align
;
611 err
= vm_allocate(mach_task_self(), &vm_addr
, allocation_size
, vm_page_label
| 1);
613 malloc_printf("*** vm_allocate(size=%lld) failed (error code=%d)\n", (long long)size
, err
);
614 szone_error(szone
, "can't allocate region", NULL
);
617 addr
= (uintptr_t)vm_addr
;
619 aligned_address
= (addr
+ ((uintptr_t)1 << align
) - 1) & ~ (((uintptr_t)1 << align
) - 1);
620 if (aligned_address
!= addr
) {
621 delta
= aligned_address
- addr
;
622 err
= vm_deallocate(mach_task_self(), (vm_address_t
)addr
, delta
);
624 malloc_printf("*** freeing unaligned header failed with %d\n", err
);
625 addr
= aligned_address
;
626 allocation_size
-= delta
;
628 if (allocation_size
> size
) {
629 err
= vm_deallocate(mach_task_self(), (vm_address_t
)addr
+ size
, allocation_size
- size
);
631 malloc_printf("*** freeing unaligned footer failed with %d\n", err
);
634 if (add_guard_pages
) {
635 addr
+= (uintptr_t)1 << vm_page_shift
;
636 protect(szone
, (void *)addr
, size
, 0, debug_flags
);
642 deallocate_pages(szone_t
*szone
, void *addr
, size_t size
, unsigned debug_flags
)
645 boolean_t add_guard_pages
= debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
;
647 if (add_guard_pages
) {
648 addr
-= 1 << vm_page_shift
;
649 size
+= 2 * (1 << vm_page_shift
);
651 err
= vm_deallocate(mach_task_self(), (vm_address_t
)addr
, size
);
653 szone_error(szone
, "Can't deallocate_pages region", addr
);
657 _szone_default_reader(task_t task
, vm_address_t address
, vm_size_t size
, void **ptr
)
659 *ptr
= (void *)address
;
664 free_list_checksum(szone_t
*szone
, free_list_t
*ptr
, const char *msg
)
666 // We always checksum, as testing whether to do it (based on szone->debug_flags) is as fast as
668 // XXX not necessarily true for LP64 case
669 if (ptr
->checksum
!= (((uintptr_t)ptr
->previous
) ^ ((uintptr_t)ptr
->next
) ^ CHECKSUM_MAGIC
)) {
671 malloc_printf("*** incorrect checksum: %s\n", msg
);
673 szone_error(szone
, "incorrect checksum for freed object "
674 "- object was probably modified after being freed, break at szone_error to debug", ptr
);
679 free_list_set_checksum(szone_t
*szone
, free_list_t
*ptr
)
681 // We always set checksum, as testing whether to do it (based on
682 // szone->debug_flags) is slower than just doing it
683 // XXX not necessarily true for LP64 case
684 ptr
->checksum
= ((uintptr_t)ptr
->previous
) ^ ((uintptr_t)ptr
->next
) ^ CHECKSUM_MAGIC
;
688 free_list_count(const free_list_t
*ptr
)
699 /* XXX inconsistent use of BITMAP32 and BITARRAY operations could be cleaned up */
701 #define BITMAP32_SET(bitmap,bit) (bitmap |= 1 << (bit))
702 #define BITMAP32_CLR(bitmap,bit) (bitmap &= ~ (1 << (bit)))
703 #define BITMAP32_BIT(bitmap,bit) ((bitmap >> (bit)) & 1)
705 #define BITMAP32_FFS(bitmap) (ffs(bitmap))
706 // returns bit # of first bit that's one, starting at 1 (returns 0 if !bitmap)
708 /********************* TINY FREE LIST UTILITIES ************************/
710 // We encode the meta-headers as follows:
711 // Each quantum has an associated set of 2 bits:
712 // block_header when 1 says this block is the beginning of a block
713 // in_use when 1 says this block is in use
714 // so a block in use of size 3 is 1-1 0-X 0-X
715 // for a free block TINY_FREE_SIZE(ptr) carries the size and the bits are 1-0 X-X X-X
716 // for a block middle the bits are 0-0
718 // Attention double evaluation for these
719 #define BITARRAY_SET(bits,index) (bits[index>>3] |= (1 << (index & 7)))
720 #define BITARRAY_CLR(bits,index) (bits[index>>3] &= ~(1 << (index & 7)))
721 #define BITARRAY_BIT(bits,index) (((bits[index>>3]) >> (index & 7)) & 1)
723 // Following is for start<8 and end<=start+32
724 #define BITARRAY_MCLR_LESS_32(bits,start,end) \
726 unsigned char *_bits = (bits); \
727 unsigned _end = (end); \
728 switch (_end >> 3) { \
729 case 4: _bits[4] &= ~ ((1 << (_end - 32)) - 1); _end = 32; \
730 case 3: _bits[3] &= ~ ((1 << (_end - 24)) - 1); _end = 24; \
731 case 2: _bits[2] &= ~ ((1 << (_end - 16)) - 1); _end = 16; \
732 case 1: _bits[1] &= ~ ((1 << (_end - 8)) - 1); _end = 8; \
733 case 0: _bits[0] &= ~ ((1 << _end) - (1 << (start))); \
737 #if 0 // Simple but slow version
738 #warning Slow version in effect
739 #define BITARRAY_MCLR(bits,index,num) \
741 unsigned _ctr = (num); \
742 unsigned _cur = (index); \
744 while (_ctr--) {BITARRAY_CLR(bits,_cur); _cur++; } \
748 // Following is for num <= 32
749 #define BITARRAY_MCLR(bits,index,num) \
751 unsigned _index = (index); \
752 unsigned char *_rebased = (bits) + (_index >> 3); \
755 BITARRAY_MCLR_LESS_32(_rebased, _index, _index + (num)); \
759 static INLINE msize_t
760 get_tiny_meta_header(const void *ptr
, boolean_t
*is_free
)
762 // returns msize and is_free
763 // may return 0 for the msize component (meaning 65536)
764 unsigned char *block_header
;
765 unsigned char *in_use
;
769 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
770 index
= TINY_INDEX_FOR_PTR(ptr
);
771 byte_index
= index
>> 3;
773 block_header
+= byte_index
;
776 if (!BITMAP32_BIT(*block_header
, index
))
778 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
779 if (!BITMAP32_BIT(*in_use
, index
)) {
781 return TINY_FREE_SIZE(ptr
);
783 uint32_t *addr
= (uint32_t *)((uintptr_t)block_header
& ~3);
784 uint32_t word0
= OSReadLittleInt32(addr
, 0) >> index
;
785 uint32_t word1
= OSReadLittleInt32(addr
, 4) << (8 - index
);
786 uint32_t bits
= (((uintptr_t)block_header
& 3) * 8); // precision loss on LP64 OK here
787 uint32_t word
= (word0
>> bits
) | (word1
<< (24 - bits
));
788 uint32_t result
= ffs(word
>> 1);
793 set_tiny_meta_header_in_use(const void *ptr
, msize_t msize
)
795 unsigned char *block_header
;
796 unsigned char *in_use
;
802 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
803 index
= TINY_INDEX_FOR_PTR(ptr
);
804 byte_index
= index
>> 3;
808 malloc_printf("set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr
, msize
);
809 if ((unsigned)index
+ (unsigned)msize
> 0x10000)
810 malloc_printf("set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr
, msize
);
812 block_header
+= byte_index
;
814 BITMAP32_SET(*block_header
, index
);
815 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
816 BITMAP32_SET(*in_use
, index
);
820 byte_index
= index
>> 3;
821 block_header
+= byte_index
; in_use
+= byte_index
;
823 end_bit
= index
+ clr_msize
;
824 BITARRAY_MCLR_LESS_32(block_header
, index
, end_bit
);
825 BITARRAY_MCLR_LESS_32(in_use
, index
, end_bit
);
827 BITARRAY_SET(block_header
, index
+clr_msize
); // we set the block_header bit for the following block to reaffirm next block is a block
833 mf
= get_tiny_meta_header(ptr
, &ff
);
835 malloc_printf("setting header for tiny in_use %p : %d\n", ptr
, msize
);
836 malloc_printf("reading header for tiny %p : %d %d\n", ptr
, mf
, ff
);
843 set_tiny_meta_header_middle(const void *ptr
)
845 // indicates this block is in the middle of an in use block
846 unsigned char *block_header
;
847 unsigned char *in_use
;
850 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
851 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
852 index
= TINY_INDEX_FOR_PTR(ptr
);
854 BITARRAY_CLR(block_header
, index
);
855 BITARRAY_CLR(in_use
, index
);
856 TINY_FREE_SIZE(ptr
) = 0;
860 set_tiny_meta_header_free(const void *ptr
, msize_t msize
)
862 // !msize is acceptable and means 65536
863 unsigned char *block_header
;
864 unsigned char *in_use
;
867 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
868 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
869 index
= TINY_INDEX_FOR_PTR(ptr
);
872 if ((unsigned)index
+ (unsigned)msize
> 0x10000) {
873 malloc_printf("setting header for tiny free %p msize too large: %d\n", ptr
, msize
);
876 BITARRAY_SET(block_header
, index
); BITARRAY_CLR(in_use
, index
);
877 TINY_FREE_SIZE(ptr
) = msize
;
878 // mark the end of this block
879 if (msize
) { // msize==0 => the whole region is free
880 void *follower
= FOLLOWING_TINY_PTR(ptr
, msize
);
881 TINY_PREVIOUS_MSIZE(follower
) = msize
;
885 msize_t mf
= get_tiny_meta_header(ptr
, &ff
);
886 if ((msize
!= mf
) || !ff
) {
887 malloc_printf("setting header for tiny free %p : %d\n", ptr
, msize
);
888 malloc_printf("reading header for tiny %p : %d %d\n", ptr
, mf
, ff
);
893 static INLINE boolean_t
894 tiny_meta_header_is_free(const void *ptr
)
896 // returns msize and is_free shifted by 16
897 // may return 0 for the msize component (meaning 65536)
898 unsigned char *block_header
;
899 unsigned char *in_use
;
902 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
903 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
904 index
= TINY_INDEX_FOR_PTR(ptr
);
905 if (!BITARRAY_BIT(block_header
, index
))
907 return !BITARRAY_BIT(in_use
, index
);
911 tiny_previous_preceding_free(void *ptr
, msize_t
*prev_msize
)
913 // returns the previous block, assuming and verifying it's free
914 unsigned char *block_header
;
915 unsigned char *in_use
;
917 msize_t previous_msize
;
918 msize_t previous_index
;
921 block_header
= TINY_BLOCK_HEADER_FOR_PTR(ptr
);
922 in_use
= TINY_INUSE_FOR_HEADER(block_header
);
923 index
= TINY_INDEX_FOR_PTR(ptr
);
927 if ((previous_msize
= TINY_PREVIOUS_MSIZE(ptr
)) > index
)
930 previous_index
= index
- previous_msize
;
931 previous_ptr
= (void *)(TINY_REGION_FOR_PTR(ptr
) + TINY_BYTES_FOR_MSIZE(previous_index
));
932 if (TINY_FREE_SIZE(previous_ptr
) != previous_msize
)
935 if (!BITARRAY_BIT(block_header
, previous_index
))
937 if (BITARRAY_BIT(in_use
, previous_index
))
940 // conservative check did match true check
941 *prev_msize
= previous_msize
;
946 tiny_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
)
948 // Adds an item to the proper free list
949 // Also marks the meta-header of the block properly
950 // Assumes szone has been locked
951 grain_t slot
= (!msize
|| (msize
>= NUM_TINY_SLOTS
)) ? NUM_TINY_SLOTS
- 1 : msize
- 1;
952 free_list_t
*free_ptr
= ptr
;
953 free_list_t
*free_head
= szone
->tiny_free_list
[slot
];
956 if (LOG(szone
,ptr
)) {
957 malloc_printf("in tiny_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
959 if (((unsigned)ptr
) & (TINY_QUANTUM
- 1)) {
960 szone_error(szone
, "tiny_free_list_add_ptr: Unaligned ptr", ptr
);
963 set_tiny_meta_header_free(ptr
, msize
);
965 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
967 if (free_head
->previous
) {
968 malloc_printf("ptr=%p slot=%d free_head=%p previous=%p\n", ptr
, slot
, free_head
, free_head
->previous
);
969 szone_error(szone
, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr
);
971 if (! tiny_meta_header_is_free(free_head
)) {
972 malloc_printf("ptr=%p slot=%d free_head=%p\n", ptr
, slot
, free_head
);
973 szone_error(szone
, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr
);
976 free_head
->previous
= free_ptr
;
977 free_list_set_checksum(szone
, free_head
);
979 BITMAP32_SET(szone
->tiny_bitmap
, slot
);
981 free_ptr
->previous
= NULL
;
982 free_ptr
->next
= free_head
;
983 free_list_set_checksum(szone
, free_ptr
);
984 szone
->tiny_free_list
[slot
] = free_ptr
;
988 tiny_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
)
990 // Removes item in the proper free list
991 // msize could be read, but all callers have it so we pass it in
992 // Assumes szone has been locked
993 grain_t slot
= (!msize
|| (msize
>= NUM_TINY_SLOTS
)) ? NUM_TINY_SLOTS
- 1 : msize
- 1;
994 free_list_t
*free_ptr
= ptr
;
995 free_list_t
*next
= free_ptr
->next
;
996 free_list_t
*previous
= free_ptr
->previous
;
999 if (LOG(szone
,ptr
)) {
1000 malloc_printf("In tiny_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
1003 free_list_checksum(szone
, free_ptr
, __PRETTY_FUNCTION__
);
1005 // The block to remove is the head of the free list
1007 if (szone
->tiny_free_list
[slot
] != ptr
) {
1008 malloc_printf("ptr=%p slot=%d msize=%d szone->tiny_free_list[slot]=%p\n",
1009 ptr
, slot
, msize
, szone
->tiny_free_list
[slot
]);
1010 szone_error(szone
, "tiny_free_list_remove_ptr: Internal invariant broken (szone->tiny_free_list[slot])", ptr
);
1014 szone
->tiny_free_list
[slot
] = next
;
1015 if (!next
) BITMAP32_CLR(szone
->tiny_bitmap
, slot
);
1017 previous
->next
= next
;
1018 free_list_set_checksum(szone
, previous
);
1021 next
->previous
= previous
;
1022 free_list_set_checksum(szone
, next
);
1027 * Find the tiny region containing (ptr) (if any).
1029 * We take advantage of the knowledge that tiny regions are always (1 << TINY_BLOCKS_ALIGN) aligned.
1031 static INLINE tiny_region_t
*
1032 tiny_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
)
1034 tiny_region_t
*region
;
1035 tiny_region_t rbase
;
1038 /* mask off irrelevant lower bits */
1039 rbase
= TINY_REGION_FOR_PTR(ptr
);
1040 /* iterate over allocated regions - XXX not terribly efficient for large number of regions */
1041 for (i
= szone
->num_tiny_regions
, region
= szone
->tiny_regions
; i
> 0; i
--, region
++)
1042 if (rbase
== *region
)
1048 tiny_free_no_lock(szone_t
*szone
, tiny_region_t
*region
, void *ptr
, msize_t msize
)
1050 size_t original_size
= TINY_BYTES_FOR_MSIZE(msize
);
1051 void *next_block
= ((char *)ptr
+ original_size
);
1052 msize_t previous_msize
;
1055 free_list_t
*big_free_block
;
1056 free_list_t
*after_next_block
;
1057 free_list_t
*before_next_block
;
1060 if (LOG(szone
,ptr
)) {
1061 malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1064 malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1065 szone_error(szone
, "trying to free tiny block that is too small", ptr
);
1068 // We try to coalesce this block with the preceeding one
1069 previous
= tiny_previous_preceding_free(ptr
, &previous_msize
);
1072 if (LOG(szone
, ptr
) || LOG(szone
,previous
)) {
1073 malloc_printf("in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr
, previous
);
1076 tiny_free_list_remove_ptr(szone
, previous
, previous_msize
);
1078 msize
+= previous_msize
;
1080 // We try to coalesce with the next block
1081 if ((next_block
< TINY_REGION_END(*region
)) && tiny_meta_header_is_free(next_block
)) {
1082 // The next block is free, we coalesce
1083 next_msize
= TINY_FREE_SIZE(next_block
);
1085 if (LOG(szone
, ptr
) || LOG(szone
, next_block
)) {
1086 malloc_printf("in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n",
1087 ptr
, msize
, next_block
, next_msize
);
1090 if (next_msize
>= NUM_TINY_SLOTS
) {
1091 // we take a short cut here to avoid removing next_block from the slot 31 freelist and then adding ptr back to slot 31
1092 msize
+= next_msize
;
1093 big_free_block
= (free_list_t
*)next_block
;
1094 after_next_block
= big_free_block
->next
;
1095 before_next_block
= big_free_block
->previous
;
1096 free_list_checksum(szone
, big_free_block
, __PRETTY_FUNCTION__
);
1097 if (!before_next_block
) {
1098 szone
->tiny_free_list
[NUM_TINY_SLOTS
-1] = ptr
;
1100 before_next_block
->next
= ptr
;
1101 free_list_set_checksum(szone
, before_next_block
);
1103 if (after_next_block
) {
1104 after_next_block
->previous
= ptr
;
1105 free_list_set_checksum(szone
, after_next_block
);
1107 ((free_list_t
*)ptr
)->previous
= before_next_block
;
1108 ((free_list_t
*)ptr
)->next
= after_next_block
;
1109 free_list_set_checksum(szone
, ptr
);
1110 set_tiny_meta_header_free(ptr
, msize
);
1111 set_tiny_meta_header_middle(big_free_block
); // clear the meta_header to enable coalescing backwards
1112 goto tiny_free_ending
;
1114 tiny_free_list_remove_ptr(szone
, next_block
, next_msize
);
1115 set_tiny_meta_header_middle(next_block
); // clear the meta_header to enable coalescing backwards
1116 msize
+= next_msize
;
1118 if ((szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) && msize
) {
1119 memset(ptr
, 0x55, TINY_BYTES_FOR_MSIZE(msize
));
1121 tiny_free_list_add_ptr(szone
, ptr
, msize
);
1123 // When in proper debug mode we write on the memory to help debug memory smashers
1124 szone
->num_tiny_objects
--;
1125 szone
->num_bytes_in_tiny_objects
-= original_size
; // we use original_size and not msize to avoid double counting the coalesced blocks
1129 tiny_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
)
1131 tiny_region_t last_region
, *new_regions
;
1132 void *last_block
, *ptr
, *aligned_address
;
1134 // Allocates from the last region or a freshly allocated region
1135 // Before anything we transform the tiny_bytes_free_at_end - if any - to a regular free block
1136 if (szone
->tiny_bytes_free_at_end
) {
1137 last_region
= szone
->tiny_regions
[szone
->num_tiny_regions
-1];
1138 last_block
= TINY_REGION_END(last_region
) - szone
->tiny_bytes_free_at_end
;
1139 tiny_free_list_add_ptr(szone
, last_block
, TINY_MSIZE_FOR_BYTES(szone
->tiny_bytes_free_at_end
));
1140 szone
->tiny_bytes_free_at_end
= 0;
1142 // time to create a new region
1143 aligned_address
= allocate_pages(szone
, TINY_REGION_SIZE
, TINY_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_TINY
));
1144 if (!aligned_address
) {
1148 // We set the padding after block_header to be all 1
1149 ((uint32_t *)(aligned_address
+ (1 << TINY_BLOCKS_ALIGN
) + (NUM_TINY_BLOCKS
>> 3)))[0] = ~0;
1150 if (szone
->num_tiny_regions
== INITIAL_NUM_TINY_REGIONS
) {
1151 // XXX logic here fails after initial reallocation of tiny regions is exhausted (approx 4GB of
1152 // tiny allocations)
1153 new_regions
= small_malloc_from_region_no_lock(szone
, 16); // 16 * 512 bytes is plenty of tiny regions (more than 4,000)
1154 if (!new_regions
) return NULL
;
1155 memcpy(new_regions
, szone
->tiny_regions
, INITIAL_NUM_TINY_REGIONS
* sizeof(tiny_region_t
));
1156 szone
->tiny_regions
= new_regions
; // we set the pointer after it's all ready to enable enumeration from another thread without locking
1158 szone
->tiny_regions
[szone
->num_tiny_regions
] = aligned_address
;
1159 szone
->num_tiny_regions
++; // we set the number after the pointer is all ready to enable enumeration from another thread without taking the lock
1160 ptr
= aligned_address
;
1161 set_tiny_meta_header_in_use(ptr
, msize
);
1162 szone
->num_tiny_objects
++;
1163 szone
->num_bytes_in_tiny_objects
+= TINY_BYTES_FOR_MSIZE(msize
);
1164 // We put a header on the last block so that it appears in use (for coalescing, etc...)
1165 set_tiny_meta_header_in_use(ptr
+ TINY_BYTES_FOR_MSIZE(msize
), 1);
1166 szone
->tiny_bytes_free_at_end
= TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS
- msize
);
1168 if (LOG(szone
,ptr
)) {
1169 malloc_printf("in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1175 static INLINE boolean_t
1176 try_realloc_tiny_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
)
1178 // returns 1 on success
1181 unsigned next_index
;
1184 msize_t next_msize
, coalesced_msize
, leftover_msize
;
1187 index
= TINY_INDEX_FOR_PTR(ptr
);
1188 old_msize
= TINY_MSIZE_FOR_BYTES(old_size
);
1189 next_index
= index
+ old_msize
;
1191 if (next_index
>= NUM_TINY_BLOCKS
) {
1194 next_block
= (char *)ptr
+ old_size
;
1196 is_free
= tiny_meta_header_is_free(next_block
);
1198 SZONE_UNLOCK(szone
);
1199 return 0; // next_block is in use;
1201 next_msize
= TINY_FREE_SIZE(next_block
);
1202 if (old_size
+ TINY_MSIZE_FOR_BYTES(next_msize
) < new_size
) {
1203 SZONE_UNLOCK(szone
);
1204 return 0; // even with next block, not enough
1206 tiny_free_list_remove_ptr(szone
, next_block
, next_msize
);
1207 set_tiny_meta_header_middle(next_block
); // clear the meta_header to enable coalescing backwards
1208 coalesced_msize
= TINY_MSIZE_FOR_BYTES(new_size
- old_size
+ TINY_QUANTUM
- 1);
1209 leftover_msize
= next_msize
- coalesced_msize
;
1210 if (leftover_msize
) {
1211 leftover
= next_block
+ TINY_BYTES_FOR_MSIZE(coalesced_msize
);
1212 tiny_free_list_add_ptr(szone
, leftover
, leftover_msize
);
1214 set_tiny_meta_header_in_use(ptr
, old_msize
+ coalesced_msize
);
1216 if (LOG(szone
,ptr
)) {
1217 malloc_printf("in try_realloc_tiny_in_place(), ptr=%p, msize=%d\n", ptr
, old_msize
+ coalesced_msize
);
1220 szone
->num_bytes_in_tiny_objects
+= TINY_BYTES_FOR_MSIZE(coalesced_msize
);
1221 SZONE_UNLOCK(szone
);
1222 CHECK(szone
, __PRETTY_FUNCTION__
);
1227 tiny_check_region(szone_t
*szone
, tiny_region_t
*region
)
1229 uintptr_t start
, ptr
, region_end
, follower
;
1230 boolean_t prev_free
= 0;
1233 free_list_t
*free_head
;
1235 /* establish region limits */
1236 start
= (uintptr_t)TINY_REGION_ADDRESS(*region
);
1238 region_end
= (uintptr_t)TINY_REGION_END(*region
);
1241 * The last region may have a trailing chunk which has not been converted into inuse/freelist
1244 if (region
== szone
->tiny_regions
+ szone
->num_tiny_regions
- 1)
1245 region_end
-= szone
->tiny_bytes_free_at_end
;
1249 * Scan blocks within the region.
1251 while (ptr
< region_end
) {
1253 * If the first block is free, and its size is 65536 (msize = 0) then the entire region is
1256 msize
= get_tiny_meta_header((void *)ptr
, &is_free
);
1257 if (is_free
&& !msize
&& (ptr
== start
)) {
1262 * If the block's size is 65536 (msize = 0) then since we're not the first entry the size is
1266 malloc_printf("*** invariant broken for tiny block %p this msize=%d - size is too small\n",
1273 * In use blocks cannot be more than 31 quanta large.
1276 if (msize
> 31 * TINY_QUANTUM
) {
1277 malloc_printf("*** invariant broken for %p this tiny msize=%d[%p] - size is too large\n",
1281 /* move to next block */
1282 ptr
+= TINY_BYTES_FOR_MSIZE(msize
);
1285 * Free blocks must have been coalesced, we cannot have a free block following another
1289 malloc_printf("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n",
1295 * Check the integrity of this block's entry in its freelist.
1297 free_head
= (free_list_t
*)ptr
;
1298 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
1299 if (free_head
->previous
&& !tiny_meta_header_is_free(free_head
->previous
)) {
1300 malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n",
1301 ptr
, free_head
->previous
);
1304 if (free_head
->next
&& !tiny_meta_header_is_free(free_head
->next
)) {
1305 malloc_printf("*** invariant broken for %p (next in free list %p is not a free pointer)\n",
1306 ptr
, free_head
->next
);
1310 * Check the free block's trailing size value.
1312 follower
= (uintptr_t)FOLLOWING_TINY_PTR(ptr
, msize
);
1313 if ((follower
!= region_end
) && (TINY_PREVIOUS_MSIZE(follower
) != msize
)) {
1314 malloc_printf("*** invariant broken for tiny free %p followed by %p in region [%p-%p] "
1315 "(end marker incorrect) should be %d; in fact %d\n",
1316 ptr
, follower
, TINY_REGION_ADDRESS(*region
), region_end
, msize
, TINY_PREVIOUS_MSIZE(follower
));
1319 /* move to next block */
1324 * Ensure that we scanned the entire region
1326 if (ptr
!= region_end
) {
1327 malloc_printf("*** invariant broken for region end %p - %p\n", ptr
, region_end
);
1331 * Check the trailing block's integrity.
1333 if (region
== szone
->tiny_regions
+ szone
->num_tiny_regions
- 1) {
1334 if (szone
->tiny_bytes_free_at_end
) {
1335 msize
= get_tiny_meta_header((void *)ptr
, &is_free
);
1336 if (is_free
|| (msize
!= 1)) {
1337 malloc_printf("*** invariant broken for blocker block %p - %d %d\n", ptr
, msize
, is_free
);
1344 static kern_return_t
1345 tiny_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t tiny_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
)
1347 tiny_region_t
*regions
;
1349 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
1352 tiny_region_t region
;
1354 vm_range_t admin_range
;
1355 vm_range_t ptr_range
;
1356 unsigned char *mapped_region
;
1357 unsigned char *block_header
;
1358 unsigned char *in_use
;
1359 unsigned block_index
;
1360 unsigned block_limit
;
1366 err
= reader(task
, region_address
, sizeof(tiny_region_t
) * num_regions
, (void **)®ions
);
1367 if (err
) return err
;
1368 while (index
< num_regions
) {
1369 // unsigned num_in_use = 0;
1370 // unsigned num_free = 0;
1371 region
= regions
[index
];
1372 range
.address
= (vm_address_t
)TINY_REGION_ADDRESS(region
);
1373 range
.size
= (vm_size_t
)TINY_REGION_SIZE
;
1374 if (type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) {
1375 admin_range
.address
= range
.address
+ (1 << TINY_BLOCKS_ALIGN
);
1376 admin_range
.size
= range
.size
- (1 << TINY_BLOCKS_ALIGN
);
1377 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &admin_range
, 1);
1379 if (type_mask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
)) {
1380 ptr_range
.address
= range
.address
;
1381 ptr_range
.size
= 1 << TINY_BLOCKS_ALIGN
;
1382 recorder(task
, context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptr_range
, 1);
1384 if (type_mask
& MALLOC_PTR_IN_USE_RANGE_TYPE
) {
1385 err
= reader(task
, range
.address
, range
.size
, (void **)&mapped_region
);
1388 block_header
= (unsigned char *)(mapped_region
+ (1 << TINY_BLOCKS_ALIGN
));
1389 in_use
= block_header
+ (NUM_TINY_BLOCKS
>> 3) + 4;
1391 block_limit
= NUM_TINY_BLOCKS
;
1392 if (index
== num_regions
- 1)
1393 block_limit
-= TINY_MSIZE_FOR_BYTES(tiny_bytes_free_at_end
);
1394 while (block_index
< block_limit
) {
1395 is_free
= !BITARRAY_BIT(in_use
, block_index
);
1397 mapped_ptr
= mapped_region
+ TINY_BYTES_FOR_MSIZE(block_index
);
1398 msize
= TINY_FREE_SIZE(mapped_ptr
);
1403 bit
= block_index
+ 1;
1404 while (! BITARRAY_BIT(block_header
, bit
)) {
1408 buffer
[count
].address
= range
.address
+ TINY_BYTES_FOR_MSIZE(block_index
);
1409 buffer
[count
].size
= TINY_BYTES_FOR_MSIZE(msize
);
1411 if (count
>= MAX_RECORDER_BUFFER
) {
1412 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
1416 block_index
+= msize
;
1422 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
1427 static INLINE
void *
1428 tiny_malloc_from_free_list(szone_t
*szone
, msize_t msize
)
1430 // Assumes we've locked the region
1433 grain_t slot
= msize
- 1;
1434 free_list_t
**free_list
= szone
->tiny_free_list
;
1435 free_list_t
**the_slot
= free_list
+ slot
;
1437 free_list_t
**limit
;
1439 msize_t leftover_msize
;
1440 free_list_t
*leftover_ptr
;
1443 * Look for an exact match by checking the freelist for this msize.
1449 next
->previous
= NULL
;
1450 free_list_set_checksum(szone
, next
);
1455 if (LOG(szone
, ptr
)) {
1456 malloc_printf("in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr
, this_msize
);
1459 goto return_tiny_alloc
;
1463 * Iterate over freelists for larger blocks looking for the next-largest block.
1465 bitmap
= szone
->tiny_bitmap
& ~ ((1 << slot
) - 1);
1467 goto try_tiny_malloc_from_end
;
1468 slot
= BITMAP32_FFS(bitmap
) - 1;
1469 limit
= free_list
+ NUM_TINY_SLOTS
- 1;
1471 while (free_list
< limit
) {
1472 // try bigger grains
1477 next
->previous
= NULL
;
1478 free_list_set_checksum(szone
, next
);
1481 this_msize
= TINY_FREE_SIZE(ptr
);
1483 if (LOG(szone
, ptr
)) {
1484 malloc_printf("in tiny_malloc_from_free_list(), bigger grain ptr=%p, msize=%d this_msize=%d\n", ptr
, msize
, this_msize
);
1487 goto add_leftover_and_proceed
;
1491 // we are now looking at the last slot (31)
1494 this_msize
= TINY_FREE_SIZE(ptr
);
1496 if (this_msize
- msize
>= NUM_TINY_SLOTS
) {
1497 // the leftover will go back to the free list, so we optimize by modifying the free list rather than removing the head and then adding back
1498 leftover_msize
= this_msize
- msize
;
1499 leftover_ptr
= (free_list_t
*)((unsigned char *)ptr
+ TINY_BYTES_FOR_MSIZE(msize
));
1500 *limit
= leftover_ptr
;
1502 next
->previous
= leftover_ptr
;
1503 free_list_set_checksum(szone
, next
);
1505 leftover_ptr
->next
= next
;
1506 leftover_ptr
->previous
= NULL
;
1507 free_list_set_checksum(szone
, leftover_ptr
);
1508 set_tiny_meta_header_free(leftover_ptr
, leftover_msize
);
1510 if (LOG(szone
,ptr
)) {
1511 malloc_printf("in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr
, msize
, this_msize
);
1515 goto return_tiny_alloc
;
1519 next
->previous
= NULL
;
1520 free_list_set_checksum(szone
, next
);
1522 goto add_leftover_and_proceed
;
1524 try_tiny_malloc_from_end
:
1525 // Let's see if we can use szone->tiny_bytes_free_at_end
1526 if (szone
->tiny_bytes_free_at_end
>= TINY_BYTES_FOR_MSIZE(msize
)) {
1527 ptr
= (free_list_t
*)(TINY_REGION_END(szone
->tiny_regions
[szone
->num_tiny_regions
-1]) - szone
->tiny_bytes_free_at_end
);
1528 szone
->tiny_bytes_free_at_end
-= TINY_BYTES_FOR_MSIZE(msize
);
1529 if (szone
->tiny_bytes_free_at_end
) {
1530 // let's add an in use block after ptr to serve as boundary
1531 set_tiny_meta_header_in_use((unsigned char *)ptr
+ TINY_BYTES_FOR_MSIZE(msize
), 1);
1535 if (LOG(szone
, ptr
)) {
1536 malloc_printf("in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr
, msize
);
1539 goto return_tiny_alloc
;
1542 add_leftover_and_proceed
:
1543 if (!this_msize
|| (this_msize
> msize
)) {
1544 leftover_msize
= this_msize
- msize
;
1545 leftover_ptr
= (free_list_t
*)((unsigned char *)ptr
+ TINY_BYTES_FOR_MSIZE(msize
));
1547 if (LOG(szone
,ptr
)) {
1548 malloc_printf("in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr
, this_msize
);
1551 tiny_free_list_add_ptr(szone
, leftover_ptr
, leftover_msize
);
1555 szone
->num_tiny_objects
++;
1556 szone
->num_bytes_in_tiny_objects
+= TINY_BYTES_FOR_MSIZE(this_msize
);
1558 if (LOG(szone
,ptr
)) {
1559 malloc_printf("in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr
, this_msize
, msize
);
1562 set_tiny_meta_header_in_use(ptr
, this_msize
);
1566 static INLINE
void *
1567 tiny_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
)
1569 boolean_t locked
= 0;
1574 szone_error(szone
, "invariant broken (!msize) in allocation (region)", NULL
);
1579 ptr
= szone
->last_tiny_free
;
1580 if ((((uintptr_t)ptr
) & (TINY_QUANTUM
- 1)) == msize
) {
1581 // we have a candidate - let's lock to make sure
1582 LOCK_AND_NOTE_LOCKED(szone
, locked
);
1583 if (ptr
== szone
->last_tiny_free
) {
1584 szone
->last_tiny_free
= NULL
;
1585 SZONE_UNLOCK(szone
);
1586 CHECK(szone
, __PRETTY_FUNCTION__
);
1587 ptr
= (void *)((uintptr_t)ptr
& ~ (TINY_QUANTUM
- 1));
1588 if (cleared_requested
) {
1589 memset(ptr
, 0, TINY_BYTES_FOR_MSIZE(msize
));
1592 if (LOG(szone
,ptr
)) {
1593 malloc_printf("in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr
, msize
);
1600 // Except in rare occasions where we need to add a new region, we are going to end up locking, so we might as well lock right away to avoid doing unnecessary optimistic probes
1601 if (!locked
) LOCK_AND_NOTE_LOCKED(szone
, locked
);
1602 ptr
= tiny_malloc_from_free_list(szone
, msize
);
1604 SZONE_UNLOCK(szone
);
1605 CHECK(szone
, __PRETTY_FUNCTION__
);
1606 if (cleared_requested
) {
1607 memset(ptr
, 0, TINY_BYTES_FOR_MSIZE(msize
));
1611 ptr
= tiny_malloc_from_region_no_lock(szone
, msize
);
1612 // we don't clear because this freshly allocated space is pristine
1613 SZONE_UNLOCK(szone
);
1614 CHECK(szone
, __PRETTY_FUNCTION__
);
1619 free_tiny(szone_t
*szone
, void *ptr
, tiny_region_t
*tiny_region
)
1627 // ptr is known to be in tiny_region
1630 ptr2
= szone
->last_tiny_free
;
1631 /* check that we don't already have this pointer in the cache */
1632 if (ptr
== (void *)((uintptr_t)ptr2
& ~ (TINY_QUANTUM
- 1))) {
1633 szone_error(szone
, "double free", ptr
);
1636 #endif /* TINY_CACHE */
1637 msize
= get_tiny_meta_header(ptr
, &is_free
);
1639 szone_error(szone
, "double free", ptr
);
1644 malloc_printf("*** szone_free() block in use is too large: %p\n", ptr
);
1649 if (msize
< TINY_QUANTUM
) { // to see if the bits fit in the last 4 bits
1650 szone
->last_tiny_free
= (void *)(((uintptr_t)ptr
) | msize
);
1652 SZONE_UNLOCK(szone
);
1653 CHECK(szone
, __PRETTY_FUNCTION__
);
1656 msize
= (uintptr_t)ptr2
& (TINY_QUANTUM
- 1);
1657 ptr
= (void *)(((uintptr_t)ptr2
) & ~(TINY_QUANTUM
- 1));
1658 tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
1660 szone_error(szone
, "double free (tiny cache)", ptr
);
1664 tiny_free_no_lock(szone
, tiny_region
, ptr
, msize
);
1665 SZONE_UNLOCK(szone
);
1666 CHECK(szone
, __PRETTY_FUNCTION__
);
1670 print_tiny_free_list(szone_t
*szone
)
1675 malloc_printf("tiny free sizes: ");
1676 while (slot
< NUM_TINY_SLOTS
) {
1677 ptr
= szone
->tiny_free_list
[slot
];
1679 malloc_printf("%s%y[%d]; ", (slot
== NUM_TINY_SLOTS
-1) ? ">=" : "", (slot
+1)*TINY_QUANTUM
, free_list_count(ptr
));
1686 print_tiny_region(boolean_t verbose
, tiny_region_t region
, size_t bytes_at_end
)
1688 unsigned counts
[1024];
1689 unsigned in_use
= 0;
1690 uintptr_t start
= (uintptr_t)TINY_REGION_ADDRESS(region
);
1691 uintptr_t current
= start
;
1692 uintptr_t limit
= (uintptr_t)TINY_REGION_END(region
) - bytes_at_end
;
1697 memset(counts
, 0, 1024 * sizeof(unsigned));
1698 while (current
< limit
) {
1699 msize
= get_tiny_meta_header((void *)current
, &is_free
);
1700 if (is_free
& !msize
&& (current
== start
)) {
1701 // first block is all free
1705 malloc_printf("*** error with %p: msize=%d\n", (void *)current
, (unsigned)msize
);
1711 malloc_printf("*** error at %p msize for in_use is %d\n", (void *)current
, msize
);
1716 current
+= TINY_BYTES_FOR_MSIZE(msize
);
1718 malloc_printf("Tiny region [%p-%p, %y]\t", (void *)start
, TINY_REGION_END(region
), (int)TINY_REGION_SIZE
);
1719 malloc_printf("In_use=%d ", in_use
);
1720 if (bytes_at_end
) malloc_printf("untouched=%y ", bytes_at_end
);
1721 if (verbose
&& in_use
) {
1722 malloc_printf("\tSizes in use: ");
1723 for (ci
= 0; ci
< 1024; ci
++)
1725 malloc_printf("%d[%d]", TINY_BYTES_FOR_MSIZE(ci
), counts
[ci
]);
1727 malloc_printf("\n");
1731 tiny_free_list_check(szone_t
*szone
, grain_t slot
)
1734 free_list_t
*ptr
= szone
->tiny_free_list
[slot
];
1735 free_list_t
*previous
= NULL
;
1738 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1740 free_list_checksum(szone
, ptr
, __PRETTY_FUNCTION__
);
1741 is_free
= tiny_meta_header_is_free(ptr
);
1743 malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot
, count
, ptr
);
1746 if (((uintptr_t)ptr
) & (TINY_QUANTUM
- 1)) {
1747 malloc_printf("*** inaligned ptr in free list slot=%d count=%d ptr=%p\n", slot
, count
, ptr
);
1750 if (!tiny_region_for_ptr_no_lock(szone
, ptr
)) {
1751 malloc_printf("*** itr not in szone slot=%d count=%d ptr=%p\n", slot
, count
, ptr
);
1754 if (ptr
->previous
!= previous
) {
1755 malloc_printf("*** irevious incorrectly set slot=%d count=%d ptr=%p\n", slot
, count
, ptr
);
1765 /********************* SMALL FREE LIST UTILITIES ************************/
1768 * Mark a block as free. Only the first quantum of a block is marked thusly,
1769 * the remainder are marked "middle".
1772 small_meta_header_set_is_free(msize_t
*meta_headers
, unsigned index
, msize_t msize
)
1775 meta_headers
[index
] = msize
| SMALL_IS_FREE
;
1779 * Mark a block as in use. Only the first quantum of a block is marked thusly,
1780 * the remainder are marked "middle".
1783 small_meta_header_set_in_use(msize_t
*meta_headers
, msize_t index
, msize_t msize
)
1786 meta_headers
[index
] = msize
;
1790 * Mark a quantum as being the second or later in a block.
1793 small_meta_header_set_middle(msize_t
*meta_headers
, msize_t index
)
1796 meta_headers
[index
] = 0;
1799 // Adds an item to the proper free list
1800 // Also marks the header of the block properly
1801 // Assumes szone has been locked
1803 small_free_list_add_ptr(szone_t
*szone
, void *ptr
, msize_t msize
)
1805 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
1806 free_list_t
*free_ptr
= ptr
;
1807 free_list_t
*free_head
= szone
->small_free_list
[grain
];
1810 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1812 if (LOG(szone
,ptr
)) {
1813 malloc_printf("in small_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
1815 if (((uintptr_t)ptr
) & (SMALL_QUANTUM
- 1)) {
1816 szone_error(szone
, "small_free_list_add_ptr: Unaligned ptr", ptr
);
1820 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
1822 if (free_head
->previous
) {
1823 malloc_printf("ptr=%p grain=%d free_head=%p previous=%p\n",
1824 ptr
, grain
, free_head
, free_head
->previous
);
1825 szone_error(szone
, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr
);
1827 if (!SMALL_PTR_IS_FREE(free_head
)) {
1828 malloc_printf("ptr=%p grain=%d free_head=%p\n", ptr
, grain
, free_head
);
1829 szone_error(szone
, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr
);
1832 free_head
->previous
= free_ptr
;
1833 free_list_set_checksum(szone
, free_head
);
1835 BITMAP32_SET(szone
->small_bitmap
, grain
);
1837 free_ptr
->previous
= NULL
;
1838 free_ptr
->next
= free_head
;
1839 free_list_set_checksum(szone
, free_ptr
);
1840 szone
->small_free_list
[grain
] = free_ptr
;
1841 follower
= ptr
+ SMALL_BYTES_FOR_MSIZE(msize
);
1842 SMALL_PREVIOUS_MSIZE(follower
) = msize
;
1845 // Removes item in the proper free list
1846 // msize could be read, but all callers have it so we pass it in
1847 // Assumes szone has been locked
1849 small_free_list_remove_ptr(szone_t
*szone
, void *ptr
, msize_t msize
)
1851 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
1852 free_list_t
*free_ptr
= ptr
;
1853 free_list_t
*next
= free_ptr
->next
;
1854 free_list_t
*previous
= free_ptr
->previous
;
1856 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1858 if (LOG(szone
,ptr
)) {
1859 malloc_printf("in small_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr
, msize
);
1862 free_list_checksum(szone
, free_ptr
, __PRETTY_FUNCTION__
);
1865 if (szone
->small_free_list
[grain
] != ptr
) {
1866 malloc_printf("ptr=%p grain=%d msize=%d szone->small_free_list[grain]=%p\n",
1867 ptr
, grain
, msize
, szone
->small_free_list
[grain
]);
1868 szone_error(szone
, "small_free_list_remove_ptr: Internal invariant broken (szone->small_free_list[grain])", ptr
);
1872 szone
->small_free_list
[grain
] = next
;
1873 if (!next
) BITMAP32_CLR(szone
->small_bitmap
, grain
);
1875 previous
->next
= next
;
1876 free_list_set_checksum(szone
, previous
);
1879 next
->previous
= previous
;
1880 free_list_set_checksum(szone
, next
);
1884 static INLINE small_region_t
*
1885 small_region_for_ptr_no_lock(szone_t
*szone
, const void *ptr
)
1887 small_region_t
*region
;
1888 small_region_t rbase
;
1891 /* find assumed heap/region base */
1892 rbase
= SMALL_REGION_FOR_PTR(ptr
);
1894 /* scan existing regions for a match */
1895 for (i
= szone
->num_small_regions
, region
= szone
->small_regions
; i
> 0; i
--, region
++)
1896 if (rbase
== *region
)
1902 small_free_no_lock(szone_t
*szone
, small_region_t
*region
, void *ptr
, msize_t msize
)
1904 msize_t
*meta_headers
= SMALL_META_HEADER_FOR_PTR(ptr
);
1905 unsigned index
= SMALL_META_INDEX_FOR_PTR(ptr
);
1906 size_t original_size
= SMALL_BYTES_FOR_MSIZE(msize
);
1907 unsigned char *next_block
= ((unsigned char *)ptr
+ original_size
);
1908 msize_t next_index
= index
+ msize
;
1909 msize_t previous_msize
, next_msize
;
1913 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1915 if (LOG(szone
,ptr
)) {
1916 malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1919 malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr
, msize
);
1920 szone_error(szone
, "trying to free small block that is too small", ptr
);
1923 // We try to coalesce this block with the preceeding one
1924 if (index
&& (SMALL_PREVIOUS_MSIZE(ptr
) <= index
)) {
1925 previous_msize
= SMALL_PREVIOUS_MSIZE(ptr
);
1926 if (meta_headers
[index
- previous_msize
] == (previous_msize
| SMALL_IS_FREE
)) {
1927 previous
= ptr
- SMALL_BYTES_FOR_MSIZE(previous_msize
);
1928 // previous is really to be coalesced
1930 if (LOG(szone
, ptr
) || LOG(szone
,previous
)) {
1931 malloc_printf("in small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr
, previous
);
1934 small_free_list_remove_ptr(szone
, previous
, previous_msize
);
1935 small_meta_header_set_middle(meta_headers
, index
);
1937 msize
+= previous_msize
;
1938 index
-= previous_msize
;
1941 // We try to coalesce with the next block
1942 if ((next_block
< SMALL_REGION_END(*region
)) && (meta_headers
[next_index
] & SMALL_IS_FREE
)) {
1943 // next block is free, we coalesce
1944 next_msize
= meta_headers
[next_index
] & ~ SMALL_IS_FREE
;
1946 if (LOG(szone
,ptr
)) malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr
, msize
, next_block
, next_msize
);
1948 small_free_list_remove_ptr(szone
, next_block
, next_msize
);
1949 small_meta_header_set_middle(meta_headers
, next_index
);
1950 msize
+= next_msize
;
1952 if (szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) {
1954 szone_error(szone
, "incorrect size information - block header was damaged", ptr
);
1956 memset(ptr
, 0x55, SMALL_BYTES_FOR_MSIZE(msize
));
1959 small_free_list_add_ptr(szone
, ptr
, msize
);
1960 small_meta_header_set_is_free(meta_headers
, index
, msize
);
1961 szone
->num_small_objects
--;
1962 szone
->num_bytes_in_small_objects
-= original_size
; // we use original_size and not msize to avoid double counting the coalesced blocks
1966 small_malloc_from_region_no_lock(szone_t
*szone
, msize_t msize
)
1968 small_region_t last_region
;
1972 msize_t
*meta_headers
;
1974 size_t region_capacity
;
1976 small_region_t
*new_regions
;
1979 // Allocates from the last region or a freshly allocated region
1980 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
1981 // Before anything we transform the small_bytes_free_at_end - if any - to a regular free block
1982 if (szone
->small_bytes_free_at_end
) {
1983 last_region
= szone
->small_regions
[szone
->num_small_regions
- 1];
1984 last_block
= (void *)(SMALL_REGION_END(last_region
) - szone
->small_bytes_free_at_end
);
1985 small_free_list_add_ptr(szone
, last_block
, SMALL_MSIZE_FOR_BYTES(szone
->small_bytes_free_at_end
));
1986 *SMALL_METADATA_FOR_PTR(last_block
) = SMALL_MSIZE_FOR_BYTES(szone
->small_bytes_free_at_end
) | SMALL_IS_FREE
;
1987 szone
->small_bytes_free_at_end
= 0;
1989 // time to create a new region
1990 new_address
= allocate_pages(szone
, SMALL_REGION_SIZE
, SMALL_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL
));
1996 meta_headers
= SMALL_META_HEADER_FOR_PTR(ptr
);
1998 if (szone
->num_small_regions
== INITIAL_NUM_SMALL_REGIONS
) {
1999 // time to grow the number of regions
2000 region_capacity
= (1 << (32 - SMALL_BLOCKS_ALIGN
)) - 20; // that is for sure the maximum number of small regions we can have
2001 new_msize
= (region_capacity
* sizeof(small_region_t
) + SMALL_QUANTUM
- 1) / SMALL_QUANTUM
;
2003 small_meta_header_set_in_use(meta_headers
, index
, new_msize
);
2004 szone
->num_small_objects
++;
2005 szone
->num_bytes_in_small_objects
+= SMALL_BYTES_FOR_MSIZE(new_msize
);
2006 memcpy(new_regions
, szone
->small_regions
, INITIAL_NUM_SMALL_REGIONS
* sizeof(small_region_t
));
2007 // We intentionally leak the previous regions pointer to avoid multi-threading crashes if
2008 // another thread was reading it (unlocked) while we are changing it.
2009 szone
->small_regions
= new_regions
; // note we set this pointer after it's all set
2010 ptr
+= SMALL_BYTES_FOR_MSIZE(new_msize
);
2013 szone
->small_regions
[szone
->num_small_regions
] = new_address
;
2014 // we bump the number of regions AFTER we have changes the regions pointer to enable finding a
2015 // small region without taking the lock
2016 // XXX naive assumption assumes memory ordering coherence between this and other CPUs
2017 szone
->num_small_regions
++;
2018 small_meta_header_set_in_use(meta_headers
, index
, msize
);
2019 msize_left
= NUM_SMALL_BLOCKS
- index
;
2020 szone
->num_small_objects
++;
2021 szone
->num_bytes_in_small_objects
+= SMALL_BYTES_FOR_MSIZE(msize
);
2022 // add a big free block
2023 index
+= msize
; msize_left
-= msize
;
2024 meta_headers
[index
] = msize_left
;
2025 szone
->small_bytes_free_at_end
= SMALL_BYTES_FOR_MSIZE(msize_left
);
2029 static INLINE boolean_t
2030 try_realloc_small_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
)
2032 // returns 1 on success
2033 msize_t
*meta_headers
= SMALL_META_HEADER_FOR_PTR(ptr
);
2034 unsigned index
= SMALL_META_INDEX_FOR_PTR(ptr
);
2035 msize_t old_msize
= SMALL_MSIZE_FOR_BYTES(old_size
);
2036 msize_t new_msize
= SMALL_MSIZE_FOR_BYTES(new_size
+ SMALL_QUANTUM
- 1);
2037 void *next_block
= (char *)ptr
+ old_size
;
2038 unsigned next_index
= index
+ old_msize
;
2039 msize_t next_msize_and_free
;
2041 msize_t leftover_msize
;
2043 unsigned leftover_index
;
2045 if (next_index
>= NUM_SMALL_BLOCKS
) {
2049 if ((uintptr_t)next_block
& (SMALL_QUANTUM
- 1)) {
2050 szone_error(szone
, "internal invariant broken in realloc(next_block)", next_block
);
2052 if (meta_headers
[index
] != old_msize
)
2053 malloc_printf("*** try_realloc_small_in_place incorrect old %d %d\n",
2054 meta_headers
[index
], old_msize
);
2058 * Look for a free block immediately afterwards. If it's large enough, we can consume (part of)
2061 next_msize_and_free
= meta_headers
[next_index
];
2062 next_msize
= next_msize_and_free
& ~ SMALL_IS_FREE
;
2063 if (!(next_msize_and_free
& SMALL_IS_FREE
) || (old_msize
+ next_msize
< new_msize
)) {
2064 SZONE_UNLOCK(szone
);
2068 * The following block is big enough; pull it from its freelist and chop off enough to satisfy
2071 small_free_list_remove_ptr(szone
, next_block
, next_msize
);
2072 small_meta_header_set_middle(meta_headers
, next_index
);
2073 leftover_msize
= old_msize
+ next_msize
- new_msize
;
2074 if (leftover_msize
) {
2075 /* there's some left, so put the remainder back */
2076 leftover
= (unsigned char *)ptr
+ SMALL_BYTES_FOR_MSIZE(new_msize
);
2077 small_free_list_add_ptr(szone
, leftover
, leftover_msize
);
2078 leftover_index
= index
+ new_msize
;
2079 small_meta_header_set_is_free(meta_headers
, leftover_index
, leftover_msize
);
2082 if (SMALL_BYTES_FOR_MSIZE(new_msize
) >= LARGE_THRESHOLD
) {
2083 malloc_printf("*** realloc in place for %p exceeded msize=%d\n", new_msize
);
2086 small_meta_header_set_in_use(meta_headers
, index
, new_msize
);
2088 if (LOG(szone
,ptr
)) {
2089 malloc_printf("in szone_realloc(), ptr=%p, msize=%d\n", ptr
, *SMALL_METADATA_FOR_PTR(ptr
));
2092 szone
->num_bytes_in_small_objects
+= SMALL_BYTES_FOR_MSIZE(new_msize
- old_msize
);
2093 SZONE_UNLOCK(szone
);
2094 CHECK(szone
, __PRETTY_FUNCTION__
);
2099 szone_check_small_region(szone_t
*szone
, small_region_t
*region
)
2101 unsigned char *ptr
= SMALL_REGION_ADDRESS(*region
);
2102 msize_t
*meta_headers
= SMALL_META_HEADER_FOR_PTR(ptr
);
2103 unsigned char *region_end
= SMALL_REGION_END(*region
);
2104 msize_t prev_free
= 0;
2106 msize_t msize_and_free
;
2108 free_list_t
*free_head
;
2111 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
2112 if (region
== szone
->small_regions
+ szone
->num_small_regions
- 1) region_end
-= szone
->small_bytes_free_at_end
;
2113 while (ptr
< region_end
) {
2114 index
= SMALL_META_INDEX_FOR_PTR(ptr
);
2115 msize_and_free
= meta_headers
[index
];
2116 if (!(msize_and_free
& SMALL_IS_FREE
)) {
2118 msize
= msize_and_free
;
2120 malloc_printf("*** invariant broken: null msize ptr=%p region#=%d num_small_regions=%d end=%p\n",
2121 ptr
, region
- szone
->small_regions
, szone
->num_small_regions
, (void *)region_end
);
2124 if (msize
> (LARGE_THRESHOLD
/ SMALL_QUANTUM
)) {
2125 malloc_printf("*** invariant broken for %p this small msize=%d - size is too large\n",
2126 ptr
, msize_and_free
);
2129 ptr
+= SMALL_BYTES_FOR_MSIZE(msize
);
2133 msize
= msize_and_free
& ~ SMALL_IS_FREE
;
2134 free_head
= (free_list_t
*)ptr
;
2135 follower
= (msize_t
*)FOLLOWING_SMALL_PTR(ptr
, msize
);
2137 malloc_printf("*** invariant broken for free block %p this msize=%d\n", ptr
, msize
);
2141 malloc_printf("*** invariant broken for %p (2 free in a row)\n", ptr
);
2144 free_list_checksum(szone
, free_head
, __PRETTY_FUNCTION__
);
2145 if (free_head
->previous
&& !SMALL_PTR_IS_FREE(free_head
->previous
)) {
2146 malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n",
2147 ptr
, free_head
->previous
);
2150 if (free_head
->next
&& !SMALL_PTR_IS_FREE(free_head
->next
)) {
2151 malloc_printf("*** invariant broken for %p (next is not a free pointer)\n", ptr
);
2154 if (SMALL_PREVIOUS_MSIZE(follower
) != msize
) {
2155 malloc_printf("*** invariant broken for small free %p followed by %p in region [%p-%p] "
2156 "(end marker incorrect) should be %d; in fact %d\n",
2157 ptr
, follower
, SMALL_REGION_ADDRESS(*region
), region_end
, msize
, SMALL_PREVIOUS_MSIZE(follower
));
2160 ptr
= (unsigned char *)follower
;
2161 prev_free
= SMALL_IS_FREE
;
2167 static kern_return_t
2168 small_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t region_address
, unsigned short num_regions
, size_t small_bytes_free_at_end
, memory_reader_t reader
, vm_range_recorder_t recorder
)
2170 small_region_t
*regions
;
2172 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
2175 small_region_t region
;
2177 vm_range_t admin_range
;
2178 vm_range_t ptr_range
;
2179 unsigned char *mapped_region
;
2180 msize_t
*block_header
;
2181 unsigned block_index
;
2182 unsigned block_limit
;
2183 msize_t msize_and_free
;
2186 err
= reader(task
, region_address
, sizeof(small_region_t
) * num_regions
, (void **)®ions
);
2187 if (err
) return err
;
2188 while (index
< num_regions
) {
2189 region
= regions
[index
];
2190 range
.address
= (vm_address_t
)SMALL_REGION_ADDRESS(region
);
2191 range
.size
= SMALL_REGION_SIZE
;
2192 if (type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) {
2193 admin_range
.address
= range
.address
+ (1 << SMALL_BLOCKS_ALIGN
);
2194 admin_range
.size
= range
.size
- (1 << SMALL_BLOCKS_ALIGN
);
2195 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &admin_range
, 1);
2197 if (type_mask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
)) {
2198 ptr_range
.address
= range
.address
;
2199 ptr_range
.size
= 1 << SMALL_BLOCKS_ALIGN
;
2200 recorder(task
, context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptr_range
, 1);
2202 if (type_mask
& MALLOC_PTR_IN_USE_RANGE_TYPE
) {
2203 err
= reader(task
, range
.address
, range
.size
, (void **)&mapped_region
);
2204 if (err
) return err
;
2205 block_header
= (msize_t
*)(mapped_region
+ (1 << SMALL_BLOCKS_ALIGN
));
2207 block_limit
= NUM_SMALL_BLOCKS
;
2208 if (index
== num_regions
- 1)
2209 block_limit
-= SMALL_MSIZE_FOR_BYTES(small_bytes_free_at_end
);
2210 while (block_index
< block_limit
) {
2211 msize_and_free
= block_header
[block_index
];
2212 msize
= msize_and_free
& ~ SMALL_IS_FREE
;
2213 if (! (msize_and_free
& SMALL_IS_FREE
)) {
2215 buffer
[count
].address
= range
.address
+ SMALL_BYTES_FOR_MSIZE(block_index
);
2216 buffer
[count
].size
= SMALL_BYTES_FOR_MSIZE(msize
);
2218 if (count
>= MAX_RECORDER_BUFFER
) {
2219 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
2223 block_index
+= msize
;
2229 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, buffer
, count
);
2234 static INLINE
void *
2235 small_malloc_from_free_list(szone_t
*szone
, msize_t msize
)
2237 grain_t grain
= (msize
<= NUM_SMALL_SLOTS
) ? msize
- 1 : NUM_SMALL_SLOTS
- 1;
2238 unsigned bitmap
= szone
->small_bitmap
& ~ ((1 << grain
) - 1);
2241 free_list_t
**free_list
;
2242 free_list_t
**limit
;
2244 msize_t leftover_msize
;
2246 msize_t
*meta_headers
;
2247 unsigned leftover_index
;
2250 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
2252 if (!bitmap
) goto try_small_from_end
;
2253 grain
= BITMAP32_FFS(bitmap
) - 1;
2254 // first try the small grains
2255 limit
= szone
->small_free_list
+ NUM_SMALL_SLOTS
- 1;
2256 free_list
= szone
->small_free_list
+ grain
;
2257 while (free_list
< limit
) {
2258 // try bigger grains
2261 next
= ((free_list_t
*)ptr
)->next
;
2263 next
->previous
= NULL
;
2264 free_list_set_checksum(szone
, next
);
2267 this_msize
= SMALL_PTR_SIZE(ptr
);
2268 goto add_leftover_and_proceed
;
2272 // We now check the large grains for one that is big enough
2275 this_msize
= SMALL_PTR_SIZE(ptr
);
2276 if (this_msize
>= msize
) {
2277 small_free_list_remove_ptr(szone
, ptr
, this_msize
);
2278 goto add_leftover_and_proceed
;
2280 ptr
= ((free_list_t
*)ptr
)->next
;
2283 // Let's see if we can use szone->small_bytes_free_at_end
2284 if (szone
->small_bytes_free_at_end
>= SMALL_BYTES_FOR_MSIZE(msize
)) {
2285 ptr
= (void *)(SMALL_REGION_END(szone
->small_regions
[szone
->num_small_regions
-1]) - szone
->small_bytes_free_at_end
);
2286 szone
->small_bytes_free_at_end
-= SMALL_BYTES_FOR_MSIZE(msize
);
2287 if (szone
->small_bytes_free_at_end
) {
2288 // let's mark this block as in use to serve as boundary
2289 *SMALL_METADATA_FOR_PTR(ptr
+ SMALL_BYTES_FOR_MSIZE(msize
)) = SMALL_MSIZE_FOR_BYTES(szone
->small_bytes_free_at_end
);
2292 goto return_small_alloc
;
2295 add_leftover_and_proceed
:
2296 if (this_msize
> msize
) {
2297 leftover_msize
= this_msize
- msize
;
2298 leftover_ptr
= ptr
+ SMALL_BYTES_FOR_MSIZE(msize
);
2300 if (LOG(szone
,ptr
)) {
2301 malloc_printf("in small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr
, this_msize
);
2304 small_free_list_add_ptr(szone
, leftover_ptr
, leftover_msize
);
2305 meta_headers
= SMALL_META_HEADER_FOR_PTR(leftover_ptr
);
2306 leftover_index
= SMALL_META_INDEX_FOR_PTR(leftover_ptr
);
2307 small_meta_header_set_is_free(meta_headers
, leftover_index
, leftover_msize
);
2311 szone
->num_small_objects
++;
2312 szone
->num_bytes_in_small_objects
+= SMALL_BYTES_FOR_MSIZE(this_msize
);
2314 if (LOG(szone
,ptr
)) {
2315 malloc_printf("in small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr
, this_msize
, msize
);
2318 *SMALL_METADATA_FOR_PTR(ptr
) = this_msize
;
2322 static INLINE
void *
2323 small_malloc_should_clear(szone_t
*szone
, msize_t msize
, boolean_t cleared_requested
)
2325 boolean_t locked
= 0;
2331 ptr
= (void *)szone
->last_small_free
;
2332 if ((((uintptr_t)ptr
) & (SMALL_QUANTUM
- 1)) == msize
) {
2333 // we have a candidate - let's lock to make sure
2334 LOCK_AND_NOTE_LOCKED(szone
, locked
);
2335 if (ptr
== (void *)szone
->last_small_free
) {
2336 szone
->last_small_free
= NULL
;
2337 SZONE_UNLOCK(szone
);
2338 CHECK(szone
, __PRETTY_FUNCTION__
);
2339 ptr
= (void *)((uintptr_t)ptr
& ~ (SMALL_QUANTUM
- 1));
2340 if (cleared_requested
) {
2341 memset(ptr
, 0, SMALL_BYTES_FOR_MSIZE(msize
));
2347 // Except in rare occasions where we need to add a new region, we are going to end up locking,
2348 // so we might as well lock right away to avoid doing unnecessary optimistic probes
2349 if (!locked
) LOCK_AND_NOTE_LOCKED(szone
, locked
);
2350 ptr
= small_malloc_from_free_list(szone
, msize
);
2352 SZONE_UNLOCK(szone
);
2353 CHECK(szone
, __PRETTY_FUNCTION__
);
2354 if (cleared_requested
) {
2355 memset(ptr
, 0, SMALL_BYTES_FOR_MSIZE(msize
));
2359 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
2360 // we don't clear because this freshly allocated space is pristine
2361 SZONE_UNLOCK(szone
);
2362 CHECK(szone
, __PRETTY_FUNCTION__
);
2366 // tries to allocate a small, cleared block
2367 static INLINE
void *
2368 small_malloc_cleared_no_lock(szone_t
*szone
, msize_t msize
)
2372 // Assumes already locked
2373 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
2374 ptr
= small_malloc_from_free_list(szone
, msize
);
2376 memset(ptr
, 0, SMALL_BYTES_FOR_MSIZE(msize
));
2379 ptr
= small_malloc_from_region_no_lock(szone
, msize
);
2380 // we don't clear because this freshly allocated space is pristine
2386 free_small(szone_t
*szone
, void *ptr
, small_region_t
*small_region
)
2388 msize_t msize_and_free
;
2393 // ptr is known to be in small_region
2394 msize_and_free
= *SMALL_METADATA_FOR_PTR(ptr
);
2395 if (msize_and_free
& SMALL_IS_FREE
) {
2396 szone_error(szone
, "Object already freed being freed", ptr
);
2399 CHECK(szone
, __PRETTY_FUNCTION__
);
2402 ptr2
= szone
->last_small_free
;
2403 szone
->last_small_free
= (void *)(((uintptr_t)ptr
) | msize_and_free
);
2405 SZONE_UNLOCK(szone
);
2406 CHECK(szone
, __PRETTY_FUNCTION__
);
2409 msize_and_free
= (uintptr_t)ptr2
& (SMALL_QUANTUM
- 1);
2410 ptr
= (void *)(((uintptr_t)ptr2
) & ~ (SMALL_QUANTUM
- 1));
2411 small_region
= small_region_for_ptr_no_lock(szone
, ptr
);
2412 if (!small_region
) {
2413 szone_error(szone
, "double free (small cache)", ptr
);
2417 small_free_no_lock(szone
, small_region
, ptr
, msize_and_free
);
2418 SZONE_UNLOCK(szone
);
2419 CHECK(szone
, __PRETTY_FUNCTION__
);
2423 print_small_free_list(szone_t
*szone
)
2428 malloc_printf("small free sizes: ");
2429 while (grain
< NUM_SMALL_SLOTS
) {
2430 ptr
= szone
->small_free_list
[grain
];
2432 malloc_printf("%s%y[%d]; ", (grain
== NUM_SMALL_SLOTS
-1) ? ">=" : "", (grain
+ 1) * SMALL_QUANTUM
, free_list_count(ptr
));
2436 malloc_printf("\n");
2440 print_small_region(szone_t
*szone
, boolean_t verbose
, small_region_t
*region
, size_t bytes_at_end
)
2442 unsigned counts
[1024];
2443 unsigned in_use
= 0;
2444 void *start
= SMALL_REGION_ADDRESS(*region
);
2445 void *limit
= SMALL_REGION_END(*region
) - bytes_at_end
;
2446 msize_t msize_and_free
;
2450 memset(counts
, 0, 1024 * sizeof(unsigned));
2451 while (start
< limit
) {
2452 msize_and_free
= *SMALL_METADATA_FOR_PTR(start
);
2453 msize
= msize_and_free
& ~ SMALL_IS_FREE
;
2454 if (!(msize_and_free
& SMALL_IS_FREE
)) {
2460 start
+= SMALL_BYTES_FOR_MSIZE(msize
);
2462 malloc_printf("Small region [%p-%p, %y]\tIn_use=%d ",
2463 SMALL_REGION_ADDRESS(*region
), SMALL_REGION_END(*region
), (int)SMALL_REGION_SIZE
, in_use
);
2465 malloc_printf("Untouched=%y ", bytes_at_end
);
2466 if (verbose
&& in_use
) {
2467 malloc_printf("\n\tSizes in use: ");
2468 for (ci
= 0; ci
< 1024; ci
++)
2470 malloc_printf("%d[%d] ", SMALL_BYTES_FOR_MSIZE(ci
), counts
[ci
]);
2472 malloc_printf("\n");
2476 small_free_list_check(szone_t
*szone
, grain_t grain
)
2479 free_list_t
*ptr
= szone
->small_free_list
[grain
];
2480 free_list_t
*previous
= NULL
;
2481 msize_t msize_and_free
;
2483 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
2485 msize_and_free
= *SMALL_METADATA_FOR_PTR(ptr
);
2487 if (!(msize_and_free
& SMALL_IS_FREE
)) {
2488 malloc_printf("*** in-use ptr in free list grain=%d count=%d ptr=%p\n", grain
, count
, ptr
);
2491 if (((uintptr_t)ptr
) & (SMALL_QUANTUM
- 1)) {
2492 malloc_printf("*** unaligned ptr in free list grain=%d count=%d ptr=%p\n", grain
, count
, ptr
);
2495 if (!small_region_for_ptr_no_lock(szone
, ptr
)) {
2496 malloc_printf("*** ptr not in szone grain=%d count=%d ptr=%p\n", grain
, count
, ptr
);
2499 free_list_checksum(szone
, ptr
, __PRETTY_FUNCTION__
);
2500 if (ptr
->previous
!= previous
) {
2501 malloc_printf("*** previous incorrectly set grain=%d count=%d ptr=%p\n", grain
, count
, ptr
);
2510 /********************* LARGE ENTRY UTILITIES ************************/
2515 large_debug_print(szone_t
*szone
)
2517 unsigned num_large_entries
= szone
->num_large_entries
;
2518 unsigned index
= num_large_entries
;
2519 large_entry_t
*range
;
2521 for (index
= 0, range
= szone
->large_entries
; index
< szone
->num_large_entries
; index
++, range
++)
2522 if (!LARGE_ENTRY_IS_EMPTY(*range
))
2523 malloc_printf("%d: %p(%y); ", index
, LARGE_ENTRY_ADDRESS(*range
), LARGE_ENTRY_SIZE(*range
));
2525 malloc_printf("\n");
2530 * Scan the hash ring looking for an entry for the given pointer.
2532 static large_entry_t
*
2533 large_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
)
2535 // result only valid with lock held
2536 unsigned num_large_entries
= szone
->num_large_entries
;
2537 unsigned hash_index
;
2539 large_entry_t
*range
;
2541 if (!num_large_entries
)
2543 hash_index
= ((uintptr_t)ptr
>> vm_page_shift
) % num_large_entries
;
2546 range
= szone
->large_entries
+ index
;
2547 if (LARGE_ENTRY_MATCHES(*range
, ptr
))
2549 if (LARGE_ENTRY_IS_EMPTY(*range
))
2550 return NULL
; // end of chain
2552 if (index
== num_large_entries
)
2554 } while (index
!= hash_index
);
2559 large_entry_insert_no_lock(szone_t
*szone
, large_entry_t range
)
2561 unsigned num_large_entries
= szone
->num_large_entries
;
2562 unsigned hash_index
= (range
.address_and_num_pages
>> vm_page_shift
) % num_large_entries
;
2563 unsigned index
= hash_index
;
2564 large_entry_t
*entry
;
2567 entry
= szone
->large_entries
+ index
;
2568 if (LARGE_ENTRY_IS_EMPTY(*entry
)) {
2570 return; // end of chain
2573 if (index
== num_large_entries
)
2575 } while (index
!= hash_index
);
2579 large_entries_rehash_after_entry_no_lock(szone_t
*szone
, large_entry_t
*entry
)
2581 unsigned num_large_entries
= szone
->num_large_entries
;
2582 unsigned hash_index
= entry
- szone
->large_entries
;
2583 unsigned index
= hash_index
;
2584 large_entry_t range
;
2588 if (index
== num_large_entries
)
2590 range
= szone
->large_entries
[index
];
2591 if (LARGE_ENTRY_IS_EMPTY(range
))
2593 szone
->large_entries
[index
].address_and_num_pages
= 0;
2594 large_entry_insert_no_lock(szone
, range
); // this will reinsert in the
2596 } while (index
!= hash_index
);
2599 static INLINE large_entry_t
*
2600 large_entries_alloc_no_lock(szone_t
*szone
, unsigned num
)
2602 size_t size
= num
* sizeof(large_entry_t
);
2603 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
2605 if (is_vm_allocation
) {
2606 // Note that we allocate memory (via a system call) under a spin lock
2607 // That is certainly evil, however it's very rare in the lifetime of a process
2608 // The alternative would slow down the normal case
2609 return (void *)allocate_pages(szone
, round_page(size
), 0, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
2611 return small_malloc_cleared_no_lock(szone
, SMALL_MSIZE_FOR_BYTES(size
+ SMALL_QUANTUM
- 1));
2616 large_entries_free_no_lock(szone_t
*szone
, large_entry_t
*entries
, unsigned num
, vm_range_t
*range_to_deallocate
)
2618 // returns range to deallocate
2619 size_t size
= num
* sizeof(large_entry_t
);
2620 boolean_t is_vm_allocation
= size
>= LARGE_THRESHOLD
;
2621 small_region_t
*region
;
2622 msize_t msize_and_free
;
2624 if (is_vm_allocation
) {
2625 range_to_deallocate
->address
= (vm_address_t
)entries
;
2626 range_to_deallocate
->size
= round_page(size
);
2628 range_to_deallocate
->size
= 0;
2629 region
= small_region_for_ptr_no_lock(szone
, entries
);
2630 msize_and_free
= *SMALL_METADATA_FOR_PTR(entries
);
2631 if (msize_and_free
& SMALL_IS_FREE
) {
2632 szone_error(szone
, "object already freed being freed", entries
);
2635 small_free_no_lock(szone
, region
, entries
, msize_and_free
);
2640 large_entries_grow_no_lock(szone_t
*szone
, vm_range_t
*range_to_deallocate
)
2642 // sets range_to_deallocate
2643 unsigned old_num_entries
= szone
->num_large_entries
;
2644 large_entry_t
*old_entries
= szone
->large_entries
;
2645 unsigned new_num_entries
= (old_num_entries
) ? old_num_entries
* 2 + 1 : 63; // always an odd number for good hashing
2646 large_entry_t
*new_entries
= large_entries_alloc_no_lock(szone
, new_num_entries
);
2647 unsigned index
= old_num_entries
;
2648 large_entry_t oldRange
;
2650 szone
->num_large_entries
= new_num_entries
;
2651 szone
->large_entries
= new_entries
;
2653 /* rehash entries into the new list */
2655 oldRange
= old_entries
[index
];
2656 if (!LARGE_ENTRY_IS_EMPTY(oldRange
)) {
2657 large_entry_insert_no_lock(szone
, oldRange
);
2661 large_entries_free_no_lock(szone
, old_entries
, old_num_entries
, range_to_deallocate
);
2663 range_to_deallocate
->size
= 0;
2667 // frees the specific entry in the size table
2668 // returns a range to truly deallocate
2670 large_free_no_lock(szone_t
*szone
, large_entry_t
*entry
)
2674 range
.address
= (vm_address_t
)LARGE_ENTRY_ADDRESS(*entry
);
2675 range
.size
= (vm_size_t
)LARGE_ENTRY_SIZE(*entry
);
2676 szone
->num_large_objects_in_use
--;
2677 szone
->num_bytes_in_large_objects
-= range
.size
;
2678 if (szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) {
2679 protect(szone
, (void *)range
.address
, range
.size
, VM_PROT_READ
| VM_PROT_WRITE
, szone
->debug_flags
);
2680 range
.address
-= vm_page_size
;
2681 range
.size
+= 2 * vm_page_size
;
2683 entry
->address_and_num_pages
= 0;
2684 large_entries_rehash_after_entry_no_lock(szone
, entry
);
2686 if (large_entry_for_pointer_no_lock(szone
, (void *)range
.address
)) {
2687 malloc_printf("*** freed entry %p still in use; num_large_entries=%d\n",
2688 range
.address
, szone
->num_large_entries
);
2689 large_debug_print(szone
);
2696 static kern_return_t
2697 large_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t large_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
)
2700 vm_range_t buffer
[MAX_RECORDER_BUFFER
];
2702 large_entry_t
*entries
;
2705 large_entry_t entry
;
2707 err
= reader(task
, large_entries_address
, sizeof(large_entry_t
) * num_entries
, (void **)&entries
);
2710 index
= num_entries
;
2711 if ((type_mask
& MALLOC_ADMIN_REGION_RANGE_TYPE
) &&
2712 (num_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
)) {
2713 range
.address
= large_entries_address
;
2714 range
.size
= round_page(num_entries
* sizeof(large_entry_t
));
2715 recorder(task
, context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &range
, 1);
2717 if (type_mask
& (MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
))
2719 entry
= entries
[index
];
2720 if (!LARGE_ENTRY_IS_EMPTY(entry
)) {
2721 range
.address
= (vm_address_t
)LARGE_ENTRY_ADDRESS(entry
);
2722 range
.size
= (vm_size_t
)LARGE_ENTRY_SIZE(entry
);
2723 buffer
[count
++] = range
;
2724 if (count
>= MAX_RECORDER_BUFFER
) {
2725 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
2731 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
2732 | MALLOC_PTR_REGION_RANGE_TYPE
, buffer
, count
);
2737 /********************* HUGE ENTRY UTILITIES ************************/
2739 static huge_entry_t
*
2740 huge_entry_for_pointer_no_lock(szone_t
*szone
, const void *ptr
)
2745 for (index
= szone
->num_huge_entries
, huge
= szone
->huge_entries
;
2749 if ((void *)huge
->address
== ptr
)
2756 huge_entry_append(szone_t
*szone
, huge_entry_t huge
)
2758 huge_entry_t
*new_huge_entries
= NULL
, *old_huge_entries
;
2759 unsigned num_huge_entries
;
2761 // We do a little dance with locking because doing allocation (even in the
2762 // default szone) may cause something to get freed in this szone, with a
2764 // Returns 1 on success
2767 num_huge_entries
= szone
->num_huge_entries
;
2768 SZONE_UNLOCK(szone
);
2769 /* check for counter wrap */
2770 if ((num_huge_entries
+ 1) < num_huge_entries
)
2772 /* stale allocation from last time around the loop? */
2773 if (new_huge_entries
)
2774 szone_free(szone
, new_huge_entries
);
2775 new_huge_entries
= szone_malloc(szone
, (num_huge_entries
+ 1) * sizeof(huge_entry_t
));
2776 if (new_huge_entries
== NULL
)
2779 if (num_huge_entries
== szone
->num_huge_entries
) {
2780 // No change - our malloc still applies
2781 old_huge_entries
= szone
->huge_entries
;
2782 if (num_huge_entries
) {
2783 memcpy(new_huge_entries
, old_huge_entries
, num_huge_entries
* sizeof(huge_entry_t
));
2785 new_huge_entries
[szone
->num_huge_entries
++] = huge
;
2786 szone
->huge_entries
= new_huge_entries
;
2787 SZONE_UNLOCK(szone
);
2788 szone_free(szone
, old_huge_entries
);
2795 static kern_return_t
2796 huge_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t huge_entries_address
, unsigned num_entries
, memory_reader_t reader
, vm_range_recorder_t recorder
)
2798 huge_entry_t
*entries
;
2801 err
= reader(task
, huge_entries_address
, sizeof(huge_entry_t
) * num_entries
, (void **)&entries
);
2805 recorder(task
, context
, MALLOC_PTR_IN_USE_RANGE_TYPE
| MALLOC_PTR_REGION_RANGE_TYPE
, entries
, num_entries
);
2811 large_and_huge_malloc(szone_t
*szone
, unsigned num_pages
)
2814 vm_range_t range_to_deallocate
;
2815 huge_entry_t huge_entry
;
2817 large_entry_t large_entry
;
2820 num_pages
= 1; // minimal allocation size for this szone
2821 size
= (size_t)num_pages
<< vm_page_shift
;
2822 range_to_deallocate
.size
= 0;
2823 if (num_pages
>= (1 << vm_page_shift
)) {
2824 addr
= allocate_pages(szone
, size
, 0, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_HUGE
));
2827 huge_entry
.size
= size
;
2828 huge_entry
.address
= (vm_address_t
)addr
;
2829 if (!huge_entry_append(szone
, huge_entry
))
2830 return NULL
; // we are leaking the allocation here
2832 szone
->num_bytes_in_huge_objects
+= size
;
2835 addr
= allocate_pages(szone
, size
, 0, szone
->debug_flags
, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE
));
2837 if (LOG(szone
, addr
))
2838 malloc_printf("in szone_malloc true large allocation at %p for %y\n", (void *)addr
, size
);
2842 SZONE_UNLOCK(szone
);
2846 if (large_entry_for_pointer_no_lock(szone
, addr
)) {
2847 malloc_printf("freshly allocated is already in use: %p\n", addr
);
2848 large_debug_print(szone
);
2852 if ((szone
->num_large_objects_in_use
+ 1) * 4 > szone
->num_large_entries
) {
2853 // density of hash table too high; grow table
2854 // we do that under lock to avoid a race
2855 large_entries_grow_no_lock(szone
, &range_to_deallocate
);
2857 large_entry
.address_and_num_pages
= (uintptr_t)addr
| num_pages
;
2859 if (large_entry_for_pointer_no_lock(szone
, addr
)) {
2860 malloc_printf("entry about to be added already in use: %p\n", addr
);
2861 large_debug_print(szone
);
2865 large_entry_insert_no_lock(szone
, large_entry
);
2867 if (!large_entry_for_pointer_no_lock(szone
, (void *)addr
)) {
2868 malloc_printf("can't find entry just added\n");
2869 large_debug_print(szone
);
2873 szone
->num_large_objects_in_use
++;
2874 szone
->num_bytes_in_large_objects
+= size
;
2876 SZONE_UNLOCK(szone
);
2877 if (range_to_deallocate
.size
) {
2878 deallocate_pages(szone
, (void *)range_to_deallocate
.address
, range_to_deallocate
.size
, 0); // we deallocate outside the lock
2880 return (void *)addr
;
2884 free_large_or_huge(szone_t
*szone
, void *ptr
)
2886 // We have established ptr is page-aligned and not tiny nor small
2887 large_entry_t
*entry
;
2888 vm_range_t vm_range_to_deallocate
;
2892 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2894 vm_range_to_deallocate
= large_free_no_lock(szone
, entry
);
2896 if (large_entry_for_pointer_no_lock(szone
, ptr
)) {
2897 malloc_printf("*** just after freeing %p still in use num_large_entries=%d\n", ptr
, szone
->num_large_entries
);
2898 large_debug_print(szone
);
2902 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
2903 vm_range_to_deallocate
= *huge
;
2904 *huge
= szone
->huge_entries
[--szone
->num_huge_entries
]; // last entry fills that spot
2905 szone
->num_bytes_in_huge_objects
-= (size_t)vm_range_to_deallocate
.size
;
2908 large_debug_print(szone
);
2910 szone_error(szone
, "pointer being freed was not allocated", ptr
);
2913 SZONE_UNLOCK(szone
); // we release the lock asap
2914 CHECK(szone
, __PRETTY_FUNCTION__
);
2915 // we deallocate_pages, including guard pages
2916 if (vm_range_to_deallocate
.address
) {
2918 if (large_entry_for_pointer_no_lock(szone
, (void *)vm_range_to_deallocate
.address
)) {
2919 malloc_printf("*** invariant broken: %p still in use num_large_entries=%d\n", vm_range_to_deallocate
.address
, szone
->num_large_entries
);
2920 large_debug_print(szone
);
2924 deallocate_pages(szone
, (void *)vm_range_to_deallocate
.address
, (size_t)vm_range_to_deallocate
.size
, 0);
2929 try_realloc_large_or_huge_in_place(szone_t
*szone
, void *ptr
, size_t old_size
, size_t new_size
)
2931 vm_address_t addr
= (vm_address_t
)ptr
+ old_size
;
2932 large_entry_t
*large_entry
, saved_entry
;
2933 huge_entry_t
*huge_entry
, huge
;
2937 if (old_size
!= ((old_size
>> vm_page_shift
) << vm_page_shift
)) {
2938 malloc_printf("*** old_size is %d\n", old_size
);
2942 large_entry
= large_entry_for_pointer_no_lock(szone
, (void *)addr
);
2943 SZONE_UNLOCK(szone
);
2945 return 0; // large pointer already exists in table - extension is not going to work
2947 new_size
= round_page(new_size
);
2949 * Ask for allocation at a specific address, and mark as realloc
2950 * to request coalescing with previous realloc'ed extensions.
2952 err
= vm_allocate(mach_task_self(), &addr
, new_size
- old_size
, VM_MAKE_TAG(VM_MEMORY_REALLOC
));
2953 if (err
!= KERN_SUCCESS
) {
2958 * If the new size is still under the large/huge threshold, we can just
2959 * extend the existing large block.
2961 * Note: this logic is predicated on the understanding that an allocated
2962 * block can never really shrink, so that the new size will always be
2963 * larger than the old size.
2965 * Note: the use of 1 << vm_page_shift here has to do with the subdivision
2966 * of the bits in the large_entry_t, and not the size of a page (directly).
2968 if ((new_size
>> vm_page_shift
) < (1 << vm_page_shift
)) {
2969 /* extend existing large entry */
2970 large_entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2972 szone_error(szone
, "large entry reallocated is not properly in table", ptr
);
2973 /* XXX will cause fault on next reference to entry */
2975 large_entry
->address_and_num_pages
= (uintptr_t)ptr
| (new_size
>> vm_page_shift
);
2976 szone
->num_bytes_in_large_objects
+= new_size
- old_size
;
2977 } else if ((old_size
>> vm_page_shift
) >= (1 << vm_page_shift
)) {
2978 /* extend existing huge entry */
2979 huge_entry
= huge_entry_for_pointer_no_lock(szone
, ptr
);
2981 szone_error(szone
, "huge entry reallocated is not properly in table", ptr
);
2982 /* XXX will cause fault on next reference to huge_entry */
2984 huge_entry
->size
= new_size
;
2985 szone
->num_bytes_in_huge_objects
+= new_size
- old_size
;
2987 /* need to convert large entry to huge entry */
2989 /* release large entry, note we still have the VM allocation */
2990 large_entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
2991 saved_entry
= *large_entry
; // in case we need to put it back
2992 large_free_no_lock(szone
, large_entry
);
2993 szone
->num_bytes_in_large_objects
-= old_size
;
2995 /* and get a huge entry */
2996 huge
.address
= (vm_address_t
)ptr
;
2997 huge
.size
= new_size
; /* fix up size */
2998 SZONE_UNLOCK(szone
);
2999 if (huge_entry_append(szone
, huge
)) {
3000 szone
->num_bytes_in_huge_objects
+= new_size
;
3001 return 1; // success!
3004 // we leak memory (the extra space appended) but data structures are correct
3005 large_entry_insert_no_lock(szone
, saved_entry
); // this will reinsert the large entry
3007 SZONE_UNLOCK(szone
); // we release the lock asap
3011 /********************* Zone call backs ************************/
3014 szone_free(szone_t
*szone
, void *ptr
)
3016 tiny_region_t
*tiny_region
;
3017 small_region_t
*small_region
;
3020 if (LOG(szone
, ptr
))
3021 malloc_printf("in szone_free with %p\n", ptr
);
3026 * Try to free to a tiny region.
3028 if ((uintptr_t)ptr
& (TINY_QUANTUM
- 1)) {
3029 szone_error(szone
, "Non-aligned pointer being freed", ptr
);
3032 if ((tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
)) != NULL
) {
3033 free_tiny(szone
, ptr
, tiny_region
);
3038 * Try to free to a small region.
3040 if ((uintptr_t)ptr
& (SMALL_QUANTUM
- 1)) {
3041 szone_error(szone
, "Non-aligned pointer being freed (2)", ptr
);
3044 if ((small_region
= small_region_for_ptr_no_lock(szone
, ptr
)) != NULL
) {
3045 free_small(szone
, ptr
, small_region
);
3049 /* check that it's a legal large/huge allocation */
3050 if ((uintptr_t)ptr
& (vm_page_size
- 1)) {
3051 szone_error(szone
, "non-page-aligned, non-allocated pointer being freed", ptr
);
3054 free_large_or_huge(szone
, ptr
);
3057 static INLINE
void *
3058 szone_malloc_should_clear(szone_t
*szone
, size_t size
, boolean_t cleared_requested
)
3064 if (size
<= 31*TINY_QUANTUM
) {
3066 msize
= TINY_MSIZE_FOR_BYTES(size
+ TINY_QUANTUM
- 1);
3069 ptr
= tiny_malloc_should_clear(szone
, msize
, cleared_requested
);
3070 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
3072 msize
= SMALL_MSIZE_FOR_BYTES(size
+ SMALL_QUANTUM
- 1);
3073 if (! msize
) msize
= 1;
3074 ptr
= small_malloc_should_clear(szone
, msize
, cleared_requested
);
3077 num_pages
= round_page(size
) >> vm_page_shift
;
3078 if (num_pages
== 0) /* Overflowed */
3081 ptr
= large_and_huge_malloc(szone
, num_pages
);
3084 if (LOG(szone
, ptr
))
3085 malloc_printf("szone_malloc returned %p\n", ptr
);
3088 * If requested, scribble on allocated memory.
3090 if ((szone
->debug_flags
& SCALABLE_MALLOC_DO_SCRIBBLE
) && ptr
&& !cleared_requested
&& size
)
3091 memset(ptr
, 0xaa, size
);
3097 szone_malloc(szone_t
*szone
, size_t size
) {
3098 return szone_malloc_should_clear(szone
, size
, 0);
3102 szone_calloc(szone_t
*szone
, size_t num_items
, size_t size
)
3104 return szone_malloc_should_clear(szone
, num_items
* size
, 1);
3108 szone_valloc(szone_t
*szone
, size_t size
)
3113 num_pages
= round_page(size
) >> vm_page_shift
;
3114 ptr
= large_and_huge_malloc(szone
, num_pages
);
3116 if (LOG(szone
, ptr
))
3117 malloc_printf("szone_valloc returned %p\n", ptr
);
3123 szone_size(szone_t
*szone
, const void *ptr
)
3127 msize_t msize
, msize_and_free
;
3128 large_entry_t
*entry
;
3134 if (LOG(szone
, ptr
)) {
3135 malloc_printf("in szone_size for %p (szone=%p)\n", ptr
, szone
);
3140 * Look for it in a tiny region.
3142 if ((uintptr_t)ptr
& (TINY_QUANTUM
- 1))
3144 if (tiny_region_for_ptr_no_lock(szone
, ptr
)) {
3145 msize
= get_tiny_meta_header(ptr
, &is_free
);
3146 return (is_free
) ? 0 : TINY_BYTES_FOR_MSIZE(msize
);
3150 * Look for it in a small region.
3152 if ((uintptr_t)ptr
& (SMALL_QUANTUM
- 1))
3154 if (small_region_for_ptr_no_lock(szone
, ptr
)) {
3155 msize_and_free
= *SMALL_METADATA_FOR_PTR(ptr
);
3156 return (msize_and_free
& SMALL_IS_FREE
) ? 0 : SMALL_BYTES_FOR_MSIZE(msize_and_free
);
3160 * If not page-aligned, it cannot have come from a large or huge allocation.
3162 if ((uintptr_t)ptr
& (vm_page_size
- 1))
3166 * Look for it in a large or huge entry.
3169 entry
= large_entry_for_pointer_no_lock(szone
, ptr
);
3171 size
= LARGE_ENTRY_SIZE(*entry
);
3172 } else if ((huge
= huge_entry_for_pointer_no_lock(szone
, ptr
))) {
3175 SZONE_UNLOCK(szone
);
3177 if (LOG(szone
, ptr
)) {
3178 malloc_printf("szone_size for %p returned %d\n", ptr
, (unsigned)size
);
3185 szone_realloc(szone_t
*szone
, void *ptr
, size_t new_size
)
3191 if (LOG(szone
, ptr
)) {
3192 malloc_printf("in szone_realloc for %p, %d\n", ptr
, (unsigned)new_size
);
3196 ptr
= szone_malloc(szone
, new_size
);
3199 old_size
= szone_size(szone
, ptr
);
3201 szone_error(szone
, "pointer being reallocated was not allocated", ptr
);
3204 /* we never shrink an allocation */
3205 if (old_size
>= new_size
)
3209 * If the old and new sizes both suit the tiny allocator, try to reallocate in-place.
3211 if ((new_size
+ TINY_QUANTUM
- 1) <= 31 * TINY_QUANTUM
) {
3212 if (try_realloc_tiny_in_place(szone
, ptr
, old_size
, new_size
)) {
3217 * If the old and new sizes both suit the small allocator, and we're not protecting the
3218 * small allocations, try to reallocate in-place.
3220 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) &&
3221 ((new_size
+ SMALL_QUANTUM
- 1) < LARGE_THRESHOLD
) &&
3222 (old_size
> 31 * TINY_QUANTUM
)) {
3223 if (try_realloc_small_in_place(szone
, ptr
, old_size
, new_size
)) {
3228 * If the allocation's a large or huge allocation, try to reallocate in-place there.
3230 } else if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (old_size
> LARGE_THRESHOLD
)) {
3231 if (try_realloc_large_or_huge_in_place(szone
, ptr
, old_size
, new_size
)) {
3237 * Can't reallocate in place for whatever reason; allocate a new buffer and copy.
3239 new_ptr
= szone_malloc(szone
, new_size
);
3240 if (new_ptr
== NULL
)
3244 * If the allocation's large enough, try to copy using VM. If that fails, or
3245 * if it's too small, just copy by hand.
3247 if ((old_size
< VM_COPY_THRESHOLD
) ||
3248 vm_copy(mach_task_self(), (vm_address_t
)ptr
, old_size
, (vm_address_t
)new_ptr
))
3249 memcpy(new_ptr
, ptr
, old_size
);
3250 szone_free(szone
, ptr
);
3253 if (LOG(szone
, ptr
)) {
3254 malloc_printf("szone_realloc returned %p for %d\n", new_ptr
, (unsigned)new_size
);
3260 // given a size, returns pointers capable of holding that size
3261 // returns the number of pointers allocated
3262 // may return 0 - this function will do best attempts, but just that
3264 szone_batch_malloc(szone_t
*szone
, size_t size
, void **results
, unsigned count
)
3266 msize_t msize
= TINY_MSIZE_FOR_BYTES(size
+ TINY_QUANTUM
- 1);
3267 size_t chunk_size
= TINY_BYTES_FOR_MSIZE(msize
);
3268 free_list_t
**free_list
= szone
->tiny_free_list
+ msize
- 1;
3269 free_list_t
*ptr
= *free_list
;
3272 if (size
> 31*TINY_QUANTUM
)
3273 return 0; // only bother implementing this for tiny
3276 CHECK(szone
, __PRETTY_FUNCTION__
);
3277 SZONE_LOCK(szone
); // might as well lock right here to avoid concurrency issues
3278 while (found
< count
) {
3283 set_tiny_meta_header_in_use(ptr
, msize
);
3287 ptr
->previous
= NULL
;
3288 free_list_set_checksum(szone
, ptr
);
3292 // Note that we could allocate from the free lists for larger msize
3293 // But that may un-necessarily fragment - so we might as well let the client do that
3294 // We could also allocate from szone->tiny_bytes_free_at_end
3295 // But that means we'll "eat-up" the untouched area faster, increasing the working set
3296 // So we just return what we have and just that
3297 szone
->num_tiny_objects
+= found
;
3298 szone
->num_bytes_in_tiny_objects
+= chunk_size
* found
;
3299 SZONE_UNLOCK(szone
);
3304 szone_batch_free(szone_t
*szone
, void **to_be_freed
, unsigned count
)
3308 tiny_region_t
*tiny_region
;
3312 // frees all the pointers in to_be_freed
3313 // note that to_be_freed may be overwritten during the process
3316 CHECK(szone
, __PRETTY_FUNCTION__
);
3318 while (cc
< count
) {
3319 ptr
= to_be_freed
[cc
];
3320 /* XXX this really slows us down */
3321 tiny_region
= tiny_region_for_ptr_no_lock(szone
, ptr
);
3323 // this is a tiny pointer
3324 msize
= get_tiny_meta_header(ptr
, &is_free
);
3326 break; // a double free; let the standard free deal with it
3327 tiny_free_no_lock(szone
, tiny_region
, ptr
, msize
);
3328 to_be_freed
[cc
] = NULL
;
3332 SZONE_UNLOCK(szone
);
3333 CHECK(szone
, __PRETTY_FUNCTION__
);
3335 ptr
= to_be_freed
[count
];
3337 szone_free(szone
, ptr
);
3342 szone_destroy(szone_t
*szone
)
3345 small_region_t pended_region
= 0;
3346 large_entry_t
*large
;
3347 vm_range_t range_to_deallocate
;
3349 tiny_region_t tiny_region
;
3350 small_region_t small_region
;
3352 /* destroy large entries */
3353 index
= szone
->num_large_entries
;
3355 large
= szone
->large_entries
+ index
;
3356 if (!LARGE_ENTRY_IS_EMPTY(*large
)) {
3357 // we deallocate_pages, including guard pages
3358 deallocate_pages(szone
, (void *)LARGE_ENTRY_ADDRESS(*large
), LARGE_ENTRY_SIZE(*large
), szone
->debug_flags
);
3361 if (szone
->num_large_entries
* sizeof(large_entry_t
) >= LARGE_THRESHOLD
) {
3362 // we do not free in the small chunk case
3363 large_entries_free_no_lock(szone
, szone
->large_entries
, szone
->num_large_entries
, &range_to_deallocate
);
3364 if (range_to_deallocate
.size
)
3365 deallocate_pages(szone
, (void *)range_to_deallocate
.address
, (size_t)range_to_deallocate
.size
, 0);
3368 /* destroy huge entries */
3369 index
= szone
->num_huge_entries
;
3371 huge
= szone
->huge_entries
+ index
;
3372 deallocate_pages(szone
, (void *)huge
->address
, huge
->size
, szone
->debug_flags
);
3375 /* destroy tiny regions */
3376 index
= szone
->num_tiny_regions
;
3378 tiny_region
= szone
->tiny_regions
[index
];
3379 deallocate_pages(szone
, TINY_REGION_ADDRESS(tiny_region
), TINY_REGION_SIZE
, 0);
3381 /* destroy small regions; region 0 must go last as it contains the szone */
3382 index
= szone
->num_small_regions
;
3384 small_region
= szone
->small_regions
[index
];
3386 * If we've allocated more than the basic set of small regions, avoid destroying the
3387 * region that contains the array.
3390 (SMALL_REGION_FOR_PTR(szone
->small_regions
) == SMALL_REGION_ADDRESS(small_region
))) {
3391 pended_region
= small_region
;
3393 deallocate_pages(szone
, (void *)SMALL_REGION_ADDRESS(small_region
), SMALL_REGION_SIZE
, 0);
3397 deallocate_pages(NULL
, (void *)SMALL_REGION_ADDRESS(pended_region
), SMALL_REGION_SIZE
, 0);
3401 szone_good_size(szone_t
*szone
, size_t size
)
3406 if (size
<= 31 * TINY_QUANTUM
) {
3408 msize
= TINY_MSIZE_FOR_BYTES(size
+ TINY_QUANTUM
- 1);
3409 if (! msize
) msize
= 1;
3410 return TINY_BYTES_FOR_MSIZE(msize
);
3412 if (!((szone
->debug_flags
& SCALABLE_MALLOC_ADD_GUARD_PAGES
) && PROTECT_SMALL
) && (size
< LARGE_THRESHOLD
)) {
3414 msize
= SMALL_MSIZE_FOR_BYTES(size
+ SMALL_QUANTUM
- 1);
3415 if (! msize
) msize
= 1;
3416 return SMALL_BYTES_FOR_MSIZE(msize
);
3418 num_pages
= round_page(size
) >> vm_page_shift
;
3420 num_pages
= 1; // minimal allocation size for this
3421 return num_pages
<< vm_page_shift
;
3425 unsigned szone_check_counter
= 0;
3426 unsigned szone_check_start
= 0;
3427 unsigned szone_check_modulo
= 1;
3430 szone_check_all(szone_t
*szone
, const char *function
)
3433 tiny_region_t
*tiny
;
3434 small_region_t
*small
;
3437 CHECK_LOCKED(szone
, __PRETTY_FUNCTION__
);
3439 /* check tiny regions - chould check region count */
3440 for (index
= szone
->num_tiny_regions
- 1, tiny
= szone
->tiny_regions
;
3443 if (!tiny_check_region(szone
, tiny
)) {
3444 SZONE_UNLOCK(szone
);
3445 szone
->debug_flags
&= ~ CHECK_REGIONS
;
3446 malloc_printf("*** tiny region %d incorrect szone_check_all(%s) counter=%d\n",
3447 szone
->num_tiny_regions
- index
, function
, szone_check_counter
);
3448 szone_error(szone
, "check: tiny region incorrect", NULL
);
3453 for (index
= NUM_TINY_SLOTS
- 1; index
>= 0; index
--) {
3454 if (!tiny_free_list_check(szone
, index
)) {
3455 SZONE_UNLOCK(szone
);
3456 szone
->debug_flags
&= ~ CHECK_REGIONS
;
3457 malloc_printf("*** tiny free list incorrect (slot=%d) szone_check_all(%s) counter=%d\n",
3458 index
, function
, szone_check_counter
);
3459 szone_error(szone
, "check: tiny free list incorrect", NULL
);
3464 /* check small regions - could check region count */
3465 for (index
= szone
->num_small_regions
- 1, small
= szone
->small_regions
;
3468 if (!szone_check_small_region(szone
, small
)) {
3469 SZONE_UNLOCK(szone
);
3470 szone
->debug_flags
&= ~ CHECK_REGIONS
;
3471 malloc_printf("*** small region %d incorrect szone_check_all(%s) counter=%d\n",
3472 szone
->num_small_regions
, index
, function
, szone_check_counter
);
3473 szone_error(szone
, "check: small region incorrect", NULL
);
3477 for (index
= NUM_SMALL_SLOTS
- 1; index
>= 0; index
--) {
3478 if (!small_free_list_check(szone
, index
)) {
3479 SZONE_UNLOCK(szone
);
3480 szone
->debug_flags
&= ~ CHECK_REGIONS
;
3481 malloc_printf("*** small free list incorrect (grain=%d) szone_check_all(%s) counter=%d\n", index
, function
, szone_check_counter
);
3482 szone_error(szone
, "check: small free list incorrect", NULL
);
3486 SZONE_UNLOCK(szone
);
3487 // szone_print(szone, 1);
3492 szone_check(szone_t
*szone
)
3495 if ((++szone_check_counter
% 10000) == 0)
3496 malloc_printf("at szone_check counter=%d\n", szone_check_counter
);
3497 if (szone_check_counter
< szone_check_start
)
3499 if (szone_check_counter
% szone_check_modulo
)
3501 return szone_check_all(szone
, "");
3504 static kern_return_t
3505 szone_ptr_in_use_enumerator(task_t task
, void *context
, unsigned type_mask
, vm_address_t zone_address
, memory_reader_t reader
, vm_range_recorder_t recorder
)
3510 if (!reader
) reader
= _szone_default_reader
;
3511 err
= reader(task
, zone_address
, sizeof(szone_t
), (void **)&szone
);
3512 if (err
) return err
;
3513 err
= tiny_in_use_enumerator(task
, context
, type_mask
,
3514 (vm_address_t
)szone
->tiny_regions
, szone
->num_tiny_regions
, szone
->tiny_bytes_free_at_end
, reader
, recorder
);
3515 if (err
) return err
;
3516 err
= small_in_use_enumerator(task
, context
, type_mask
,
3517 (vm_address_t
)szone
->small_regions
, szone
->num_small_regions
, szone
->small_bytes_free_at_end
, reader
, recorder
);
3518 if (err
) return err
;
3519 err
= large_in_use_enumerator(task
, context
, type_mask
,
3520 (vm_address_t
)szone
->large_entries
, szone
->num_large_entries
, reader
,
3522 if (err
) return err
;
3523 err
= huge_in_use_enumerator(task
, context
, type_mask
,
3524 (vm_address_t
)szone
->huge_entries
, szone
->num_huge_entries
, reader
,
3529 // Following method is deprecated: use scalable_zone_statistics instead
3531 scalable_zone_info(malloc_zone_t
*zone
, unsigned *info_to_fill
, unsigned count
)
3533 szone_t
*szone
= (void *)zone
;
3536 // We do not lock to facilitate debug
3537 info
[4] = szone
->num_tiny_objects
;
3538 info
[5] = szone
->num_bytes_in_tiny_objects
;
3539 info
[6] = szone
->num_small_objects
;
3540 info
[7] = szone
->num_bytes_in_small_objects
;
3541 info
[8] = szone
->num_large_objects_in_use
;
3542 info
[9] = szone
->num_bytes_in_large_objects
;
3543 info
[10] = szone
->num_huge_entries
;
3544 info
[11] = szone
->num_bytes_in_huge_objects
;
3545 info
[12] = szone
->debug_flags
;
3546 info
[0] = info
[4] + info
[6] + info
[8] + info
[10];
3547 info
[1] = info
[5] + info
[7] + info
[9] + info
[11];
3548 info
[3] = szone
->num_tiny_regions
* TINY_REGION_SIZE
+ szone
->num_small_regions
* SMALL_REGION_SIZE
+ info
[9] + info
[11];
3549 info
[2] = info
[3] - szone
->tiny_bytes_free_at_end
- szone
->small_bytes_free_at_end
;
3550 memcpy(info_to_fill
, info
, sizeof(unsigned)*count
);
3554 szone_print(szone_t
*szone
, boolean_t verbose
)
3558 tiny_region_t
*region
;
3561 scalable_zone_info((void *)szone
, info
, 13);
3562 malloc_printf("Scalable zone %p: inUse=%d(%y) touched=%y allocated=%y flags=%d\n",
3563 szone
, info
[0], info
[1], info
[2], info
[3], info
[12]);
3564 malloc_printf("\ttiny=%d(%y) small=%d(%y) large=%d(%y) huge=%d(%y)\n",
3565 info
[4], info
[5], info
[6], info
[7], info
[8], info
[9], info
[10], info
[11]);
3567 malloc_printf("%d tiny regions: \n", szone
->num_tiny_regions
);
3568 while (index
< szone
->num_tiny_regions
) {
3569 region
= szone
->tiny_regions
+ index
;
3570 print_tiny_region(verbose
, *region
, (index
== szone
->num_tiny_regions
- 1) ? szone
->tiny_bytes_free_at_end
: 0);
3573 if (verbose
) print_tiny_free_list(szone
);
3575 malloc_printf("%d small regions: \n", szone
->num_small_regions
);
3577 while (index
< szone
->num_small_regions
) {
3578 region
= szone
->small_regions
+ index
;
3579 print_small_region(szone
, verbose
, region
, (index
== szone
->num_small_regions
- 1) ? szone
->small_bytes_free_at_end
: 0);
3583 print_small_free_list(szone
);
3584 SZONE_UNLOCK(szone
);
3588 szone_log(malloc_zone_t
*zone
, void *log_address
)
3590 szone_t
*szone
= (szone_t
*)zone
;
3592 szone
->log_address
= log_address
;
3596 szone_force_lock(szone_t
*szone
)
3602 szone_force_unlock(szone_t
*szone
)
3604 SZONE_UNLOCK(szone
);
3608 scalable_zone_statistics(malloc_zone_t
*zone
, malloc_statistics_t
*stats
, unsigned subzone
)
3610 szone_t
*szone
= (szone_t
*)zone
;
3614 stats
->blocks_in_use
= szone
->num_tiny_objects
;
3615 stats
->size_in_use
= szone
->num_bytes_in_tiny_objects
;
3616 stats
->size_allocated
= szone
->num_tiny_regions
* TINY_REGION_SIZE
;
3617 stats
->max_size_in_use
= stats
->size_allocated
- szone
->tiny_bytes_free_at_end
;
3620 stats
->blocks_in_use
= szone
->num_small_objects
;
3621 stats
->size_in_use
= szone
->num_bytes_in_small_objects
;
3622 stats
->size_allocated
= szone
->num_small_regions
* SMALL_REGION_SIZE
;
3623 stats
->max_size_in_use
= stats
->size_allocated
- szone
->small_bytes_free_at_end
;
3626 stats
->blocks_in_use
= szone
->num_large_objects_in_use
;
3627 stats
->size_in_use
= szone
->num_bytes_in_large_objects
;
3628 stats
->max_size_in_use
= stats
->size_allocated
= stats
->size_in_use
;
3631 stats
->blocks_in_use
= szone
->num_huge_entries
;
3632 stats
->size_in_use
= szone
->num_bytes_in_huge_objects
;
3633 stats
->max_size_in_use
= stats
->size_allocated
= stats
->size_in_use
;
3640 szone_statistics(szone_t
*szone
, malloc_statistics_t
*stats
)
3642 size_t big_and_huge
;
3644 stats
->blocks_in_use
=
3645 szone
->num_tiny_objects
+
3646 szone
->num_small_objects
+
3647 szone
->num_large_objects_in_use
+
3648 szone
->num_huge_entries
;
3649 big_and_huge
= szone
->num_bytes_in_large_objects
+ szone
->num_bytes_in_huge_objects
;
3650 stats
->size_in_use
= szone
->num_bytes_in_tiny_objects
+ szone
->num_bytes_in_small_objects
+ big_and_huge
;
3651 stats
->max_size_in_use
= stats
->size_allocated
=
3652 szone
->num_tiny_regions
* TINY_REGION_SIZE
+
3653 szone
->num_small_regions
* SMALL_REGION_SIZE
+
3656 // Now we account for the untouched areas
3657 stats
->max_size_in_use
-= szone
->tiny_bytes_free_at_end
;
3658 stats
->max_size_in_use
-= szone
->small_bytes_free_at_end
;
3661 static const struct malloc_introspection_t szone_introspect
= {
3662 (void *)szone_ptr_in_use_enumerator
,
3663 (void *)szone_good_size
,
3664 (void *)szone_check
,
3665 (void *)szone_print
,
3667 (void *)szone_force_lock
,
3668 (void *)szone_force_unlock
,
3669 (void *)szone_statistics
3670 }; // marked as const to spare the DATA section
3673 create_scalable_zone(size_t initial_size
, unsigned debug_flags
)
3681 * Sanity-check our build-time assumptions about the size of a page.
3682 * Since we have sized various things assuming the default page size,
3683 * attempting to determine it dynamically is not useful.
3685 if ((vm_page_size
!= _vm_page_size
) || (vm_page_shift
!= _vm_page_shift
)) {
3686 malloc_printf("*** FATAL ERROR - machine page size does not match our assumptions.\n");
3690 /* get memory for the zone */
3691 szone
= allocate_pages(NULL
, SMALL_REGION_SIZE
, SMALL_BLOCKS_ALIGN
, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC
));
3694 /* set up the szone structure */
3695 szone
->tiny_regions
= szone
->initial_tiny_regions
;
3696 szone
->small_regions
= szone
->initial_small_regions
;
3697 msize_used
= msize
; szone
->num_small_objects
++;
3698 szone
->basic_zone
.version
= 3;
3699 szone
->basic_zone
.size
= (void *)szone_size
;
3700 szone
->basic_zone
.malloc
= (void *)szone_malloc
;
3701 szone
->basic_zone
.calloc
= (void *)szone_calloc
;
3702 szone
->basic_zone
.valloc
= (void *)szone_valloc
;
3703 szone
->basic_zone
.free
= (void *)szone_free
;
3704 szone
->basic_zone
.realloc
= (void *)szone_realloc
;
3705 szone
->basic_zone
.destroy
= (void *)szone_destroy
;
3706 szone
->basic_zone
.batch_malloc
= (void *)szone_batch_malloc
;
3707 szone
->basic_zone
.batch_free
= (void *)szone_batch_free
;
3708 szone
->basic_zone
.introspect
= (struct malloc_introspection_t
*)&szone_introspect
;
3709 szone
->debug_flags
= debug_flags
;
3711 /* as the szone is allocated out of the first tiny, region, reflect that allocation */
3712 szone
->small_regions
[0] = szone
;
3713 szone
->num_small_regions
= 1;
3714 msize
= SMALL_MSIZE_FOR_BYTES(sizeof(szone_t
) + SMALL_QUANTUM
- 1);
3715 free_msize
= NUM_SMALL_BLOCKS
- msize
;
3716 *SMALL_METADATA_FOR_PTR(szone
) = msize
;
3717 *(SMALL_METADATA_FOR_PTR(szone
) + msize
) = free_msize
;
3718 szone
->small_bytes_free_at_end
= SMALL_BYTES_FOR_MSIZE(free_msize
);
3720 LOCK_INIT(szone
->lock
);
3722 #warning CHECK_REGIONS enabled
3723 debug_flags
|= CHECK_REGIONS
;
3726 #warning LOG enabled
3727 szone
->log_address
= ~0;
3729 CHECK(szone
, __PRETTY_FUNCTION__
);
3730 return (malloc_zone_t
*)szone
;
3733 /********* Support code for emacs unexec ************/
3735 /* History of freezedry version numbers:
3737 * 1) Old malloc (before the scalable malloc implementation in this file
3739 * 2) Original freezedrying code for scalable malloc. This code was apparently
3740 * based on the old freezedrying code and was fundamentally flawed in its
3741 * assumption that tracking allocated memory regions was adequate to fake
3742 * operations on freezedried memory. This doesn't work, since scalable
3743 * malloc does not store flags in front of large page-aligned allocations.
3744 * 3) Original szone-based freezedrying code.
3745 * 4) Fresher malloc with tiny zone
3746 * 5) 32/64bit compatible malloc
3748 * No version backward compatibility is provided, but the version number does
3749 * make it possible for malloc_jumpstart() to return an error if the application
3750 * was freezedried with an older version of malloc.
3752 #define MALLOC_FREEZEDRY_VERSION 5
3761 frozen_malloc(szone_t
*zone
, size_t new_size
)
3763 return malloc(new_size
);
3767 frozen_calloc(szone_t
*zone
, size_t num_items
, size_t size
)
3769 return calloc(num_items
, size
);
3773 frozen_valloc(szone_t
*zone
, size_t new_size
)
3775 return valloc(new_size
);
3779 frozen_realloc(szone_t
*zone
, void *ptr
, size_t new_size
)
3781 size_t old_size
= szone_size(zone
, ptr
);
3784 if (new_size
<= old_size
) {
3787 new_ptr
= malloc(new_size
);
3789 memcpy(new_ptr
, ptr
, old_size
);
3795 frozen_free(szone_t
*zone
, void *ptr
)
3800 frozen_destroy(szone_t
*zone
)
3804 /********* Pseudo-private API for emacs unexec ************/
3807 * malloc_freezedry() records all of the szones in use, so that they can be
3808 * partially reconstituted by malloc_jumpstart(). Due to the differences
3809 * between reconstituted memory regions and those created by the szone code,
3810 * care is taken not to reallocate from the freezedried memory, except in the
3811 * case of a non-growing realloc().
3813 * Due to the flexibility provided by the zone registration mechanism, it is
3814 * impossible to implement generic freezedrying for any zone type. This code
3815 * only handles applications that use the szone allocator, so malloc_freezedry()
3816 * returns 0 (error) if any non-szone zones are encountered.
3820 malloc_freezedry(void)
3822 extern unsigned malloc_num_zones
;
3823 extern malloc_zone_t
**malloc_zones
;
3824 malloc_frozen
*data
;
3827 /* Allocate space in which to store the freezedry state. */
3828 data
= (malloc_frozen
*) malloc(sizeof(malloc_frozen
));
3830 /* Set freezedry version number so that malloc_jumpstart() can check for compatibility. */
3831 data
->version
= MALLOC_FREEZEDRY_VERSION
;
3833 /* Allocate the array of szone pointers. */
3834 data
->nszones
= malloc_num_zones
;
3835 data
->szones
= (szone_t
*) calloc(malloc_num_zones
, sizeof(szone_t
));
3838 * Fill in the array of szone structures. They are copied rather than
3839 * referenced, since the originals are likely to be clobbered during malloc
3842 for (i
= 0; i
< malloc_num_zones
; i
++) {
3843 if (strcmp(malloc_zones
[i
]->zone_name
, "DefaultMallocZone")) {
3844 /* Unknown zone type. */
3849 memcpy(&data
->szones
[i
], malloc_zones
[i
], sizeof(szone_t
));
3852 return((uintptr_t)data
);
3856 malloc_jumpstart(uintptr_t cookie
)
3858 malloc_frozen
*data
= (malloc_frozen
*)cookie
;
3861 if (data
->version
!= MALLOC_FREEZEDRY_VERSION
) {
3862 /* Unsupported freezedry version. */
3866 for (i
= 0; i
< data
->nszones
; i
++) {
3867 /* Set function pointers. Even the functions that stay the same must be
3868 * set, since there are no guarantees that they will be mapped to the
3869 * same addresses. */
3870 data
->szones
[i
].basic_zone
.size
= (void *) szone_size
;
3871 data
->szones
[i
].basic_zone
.malloc
= (void *) frozen_malloc
;
3872 data
->szones
[i
].basic_zone
.calloc
= (void *) frozen_calloc
;
3873 data
->szones
[i
].basic_zone
.valloc
= (void *) frozen_valloc
;
3874 data
->szones
[i
].basic_zone
.free
= (void *) frozen_free
;
3875 data
->szones
[i
].basic_zone
.realloc
= (void *) frozen_realloc
;
3876 data
->szones
[i
].basic_zone
.destroy
= (void *) frozen_destroy
;
3877 data
->szones
[i
].basic_zone
.introspect
= (struct malloc_introspection_t
*)&szone_introspect
;
3879 /* Register the freezedried zone. */
3880 malloc_zone_register(&data
->szones
[i
].basic_zone
);