]> git.saurik.com Git - apple/libc.git/blob - gen/scalable_malloc.c
0a7af09ebbfd9d8140ca367a151673e9e5054085
[apple/libc.git] / gen / scalable_malloc.c
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 /* Author: Bertrand Serlet, August 1999 */
27
28 #include "scalable_malloc.h"
29
30 #include <pthread_internals.h>
31
32 #include <unistd.h>
33 #include <libc.h>
34 #include <mach/vm_statistics.h>
35 #include <mach/mach_init.h>
36
37 /********************* DEFINITIONS ************************/
38
39 #define DEBUG_MALLOC 0 // set to one to debug malloc itself
40
41 #define DEBUG_CLIENT 0 // set to one to help debug a nasty memory smasher
42
43 #if DEBUG_MALLOC
44 #warning DEBUG_MALLOC ENABLED
45 # define INLINE
46 # define CHECK_LOCKED(szone, fun) \
47 do { \
48 if (__is_threaded && TRY_LOCK(szone->lock)) { \
49 malloc_printf("*** lock was not set %p in %s\n", szone->lock, fun); \
50 } \
51 } while (0)
52 #else
53 # define INLINE __inline__
54 # define CHECK_LOCKED(szone, fun) {}
55 #endif
56
57 /*
58 * Access to global variables is slow, so optimise our handling of vm_page_size
59 * and vm_page_shift.
60 */
61 #define _vm_page_size vm_page_size /* to get to the originals */
62 #define _vm_page_shift vm_page_shift
63 #define vm_page_size 4096 /* our normal working sizes */
64 #define vm_page_shift 12
65
66 typedef unsigned short msize_t; // a size in multiples of SHIFT_SMALL_QUANTUM or SHIFT_TINY_QUANTUM
67
68 /*
69 * Note that in the LP64 case, this is 24 bytes, necessitating the 32-byte tiny grain size.
70 */
71 typedef struct {
72 uintptr_t checksum;
73 void *previous;
74 void *next;
75 } free_list_t;
76
77 typedef struct {
78 uintptr_t address_and_num_pages;
79 // this type represents both an address and a number of pages
80 // the low bits are the number of pages; the high bits are the address
81 // note that the exact number of bits used for depends on the page size
82 // also, this cannot represent pointers larger than 1 << (vm_page_shift * 2)
83 } compact_range_t;
84
85 typedef unsigned char grain_t;
86
87 #define CHECK_REGIONS (1 << 31)
88
89 #ifdef __LP64__
90 # define CHECKSUM_MAGIC 0xdeadbeef0badc0de
91 #else
92 # define CHECKSUM_MAGIC 0x357B
93 #endif
94
95 #define MAX_RECORDER_BUFFER 256
96
97 /********************* DEFINITIONS for tiny ************************/
98
99 /*
100 * Memory in the Tiny range is allocated from regions (heaps) pointed to by the szone's tiny_regions
101 * pointer.
102 *
103 * Each region is laid out as a heap (1MB in 32-bit mode, 2MB in 64-bit mode), followed by a header
104 * block. The header block is arranged:
105 *
106 * 0x0
107 * header bits
108 * 0x2000
109 * 0xffffffff pad word
110 * 0x2004
111 * in-use bits
112 * 0x4004
113 * pad word (not written)
114 *
115 * Each bitfield comprises NUM_TINY_BLOCKS bits, and refers to the corresponding TINY_QUANTUM block
116 * within the heap.
117 *
118 * The bitfields are used to encode the state of memory within the heap. The header bit indicates
119 * that the corresponding quantum is the first quantum in a block (either in use or free). The
120 * in-use bit is set for the header if the block has been handed out (allocated). If the header
121 * bit is not set, the in-use bit is invalid.
122 *
123 * The szone maintains an array of 32 freelists, each of which is used to hold free objects
124 * of the corresponding quantum size.
125 *
126 * A free block is laid out as:
127 *
128 * Offset (32-bit mode) (64-bit mode)
129 * 0x0 0x0
130 * checksum
131 * 0x4 0x08
132 * previous
133 * 0x8 0x10
134 * next
135 * 0xc 0x18
136 * size (in quantum counts)
137 * end - 2 end - 2
138 * size (in quantum counts)
139 * end end
140 *
141 * All fields are pointer-sized, except for the size which is an unsigned short.
142 *
143 */
144
145 #ifdef __LP64__
146 # define SHIFT_TINY_QUANTUM 5 // Required to fit free_list_t
147 #else
148 # define SHIFT_TINY_QUANTUM 4 // Required for AltiVec
149 #endif
150 #define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM)
151
152 #define FOLLOWING_TINY_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM))
153
154 #define NUM_TINY_SLOTS 32 // number of slots for free-lists
155
156 #define SHIFT_NUM_TINY_BLOCKS 16
157 #define NUM_TINY_BLOCKS (1 << SHIFT_NUM_TINY_BLOCKS)
158 #define TINY_BLOCKS_ALIGN (SHIFT_NUM_TINY_BLOCKS + SHIFT_TINY_QUANTUM)
159
160 /*
161 * Enough room for the data, followed by the bit arrays (2-bits per block) plus 2 words of padding
162 * as our bitmap operators overflow, plus rounding to the nearest page.
163 */
164 #define TINY_REGION_SIZE ((NUM_TINY_BLOCKS * TINY_QUANTUM + (NUM_TINY_BLOCKS >> 2) + 8 + vm_page_size - 1) & ~ (vm_page_size - 1))
165
166 /*
167 * Obtain the size of a free tiny block (in msize_t units).
168 */
169 #ifdef __LP64__
170 # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[14])
171 #else
172 # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[6])
173 #endif
174 /*
175 * The size is also kept at the very end of a free block.
176 */
177 #define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
178
179 /*
180 * Beginning and end pointers for a region's heap.
181 */
182 #define TINY_REGION_ADDRESS(region) ((void *)(region))
183 #define TINY_REGION_END(region) (TINY_REGION_ADDRESS(region) + (1 << TINY_BLOCKS_ALIGN))
184
185 /*
186 * Locate the heap base for a pointer known to be within a tiny region.
187 */
188 #define TINY_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << TINY_BLOCKS_ALIGN) - 1)))
189
190 /*
191 * Convert between byte and msize units.
192 */
193 #define TINY_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_TINY_QUANTUM)
194 #define TINY_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_TINY_QUANTUM)
195
196 /*
197 * Locate the block header for a pointer known to be within a tiny region.
198 */
199 #define TINY_BLOCK_HEADER_FOR_PTR(_p) ((void *)(((((uintptr_t)(_p)) >> TINY_BLOCKS_ALIGN) + 1) << TINY_BLOCKS_ALIGN))
200
201 /*
202 * Locate the inuse map for a given block header pointer.
203 */
204 #define TINY_INUSE_FOR_HEADER(_h) ((void *)((uintptr_t)(_h) + (NUM_TINY_BLOCKS >> 3) + 4))
205
206 /*
207 * Compute the bitmap index for a pointer known to be within a tiny region.
208 */
209 #define TINY_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_TINY_QUANTUM) & (NUM_TINY_BLOCKS - 1))
210
211 typedef void *tiny_region_t;
212
213 #define INITIAL_NUM_TINY_REGIONS 24 // must be even for szone to be aligned
214
215 #define TINY_CACHE 1 // This governs a last-free cache of 1 that bypasses the free-list
216
217 #if ! TINY_CACHE
218 #warning TINY_CACHE turned off
219 #endif
220
221 /********************* DEFINITIONS for small ************************/
222
223 /*
224 * Memory in the Small range is allocated from regions (heaps) pointed to by the szone's small_regions
225 * pointer.
226 *
227 * Each region is laid out as a heap (8MB in 32-bit and 64-bit mode), followed by the metadata array.
228 * The array is arranged as an array of shorts, one for each SMALL_QUANTUM in the heap.
229 *
230 * The MSB of each short is set for the first quantum in a free block. The low 15 bits encode the
231 * block size (in SMALL_QUANTUM units), or are zero if the quantum is not the first in a block.
232 *
233 * The szone maintains an array of 32 freelists, each of which is used to hold free objects
234 * of the corresponding quantum size.
235 *
236 * A free block is laid out as:
237 *
238 * Offset (32-bit mode) (64-bit mode)
239 * 0x0 0x0
240 * checksum
241 * 0x4 0x08
242 * previous
243 * 0x8 0x10
244 * next
245 * 0xc 0x18
246 * size (in quantum counts)
247 * end - 2 end - 2
248 * size (in quantum counts)
249 * end end
250 *
251 * All fields are pointer-sized, except for the size which is an unsigned short.
252 *
253 */
254
255 #define SMALL_IS_FREE (1 << 15)
256
257 #define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9
258 #define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes
259
260 #define FOLLOWING_SMALL_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM))
261
262 #define NUM_SMALL_SLOTS 32 // number of slots for free-lists
263
264 /*
265 * We can only represent up to 1<<15 for msize; but we choose to stay even below that to avoid the
266 * convention msize=0 => msize = (1<<15)
267 */
268 #define SHIFT_NUM_SMALL_BLOCKS 14
269 #define NUM_SMALL_BLOCKS (1 << SHIFT_NUM_SMALL_BLOCKS)
270 #define SMALL_BLOCKS_ALIGN (SHIFT_NUM_SMALL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23
271 #define SMALL_REGION_SIZE (NUM_SMALL_BLOCKS * SMALL_QUANTUM + NUM_SMALL_BLOCKS * 2) // data + meta data
272
273 #define SMALL_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
274
275 /*
276 * Convert between byte and msize units.
277 */
278 #define SMALL_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_SMALL_QUANTUM)
279 #define SMALL_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_SMALL_QUANTUM)
280
281
282 #define SMALL_REGION_ADDRESS(region) ((unsigned char *)region)
283 #define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region) + (1 << SMALL_BLOCKS_ALIGN))
284
285 /*
286 * Locate the heap base for a pointer known to be within a small region.
287 */
288 #define SMALL_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << SMALL_BLOCKS_ALIGN) - 1)))
289
290 /*
291 * Locate the metadata base for a pointer known to be within a small region.
292 */
293 #define SMALL_META_HEADER_FOR_PTR(_p) ((msize_t *)(((((uintptr_t)(_p)) >> SMALL_BLOCKS_ALIGN) + 1) << SMALL_BLOCKS_ALIGN))
294
295 /*
296 * Compute the metadata index for a pointer known to be within a small region.
297 */
298 #define SMALL_META_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_SMALL_QUANTUM) & (NUM_SMALL_BLOCKS - 1))
299
300 /*
301 * Find the metadata word for a pointer known to be within a small region.
302 */
303 #define SMALL_METADATA_FOR_PTR(_p) (SMALL_META_HEADER_FOR_PTR(_p) + SMALL_META_INDEX_FOR_PTR(_p))
304
305 /*
306 * Determine whether a pointer known to be within a small region points to memory which is free.
307 */
308 #define SMALL_PTR_IS_FREE(_p) (*SMALL_METADATA_FOR_PTR(_p) & SMALL_IS_FREE)
309
310 /*
311 * Extract the msize value for a pointer known to be within a small region.
312 */
313 #define SMALL_PTR_SIZE(_p) (*SMALL_METADATA_FOR_PTR(_p) & ~SMALL_IS_FREE)
314
315 typedef void * small_region_t;
316
317 #define INITIAL_NUM_SMALL_REGIONS 6 // must be even for szone to be aligned
318
319 #define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use
320
321 #define SMALL_CACHE 1
322 #if !SMALL_CACHE
323 #warning SMALL_CACHE turned off
324 #endif
325
326 /********************* DEFINITIONS for large ************************/
327
328 #define LARGE_THRESHOLD (15 * 1024) // at or above this use "large"
329
330 #if (LARGE_THRESHOLD > NUM_SMALL_SLOTS * SMALL_QUANTUM)
331 #error LARGE_THRESHOLD should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM
332 #endif
333
334 #define VM_COPY_THRESHOLD (40 * 1024)
335 // When all memory is touched after a copy, vm_copy() is always a lose
336 // But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages (on a G3/300MHz)
337 // This must be larger than LARGE_THRESHOLD
338
339 /*
340 * Given a large_entry, return the address of the allocated block.
341 */
342 #define LARGE_ENTRY_ADDRESS(entry) \
343 (void *)(((entry).address_and_num_pages >> vm_page_shift) << vm_page_shift)
344
345 /*
346 * Given a large entry, return the number of pages or bytes in the allocated block.
347 */
348 #define LARGE_ENTRY_NUM_PAGES(entry) \
349 ((entry).address_and_num_pages & (vm_page_size - 1))
350 #define LARGE_ENTRY_SIZE(entry) \
351 (LARGE_ENTRY_NUM_PAGES(entry) << vm_page_shift)
352
353 /*
354 * Compare a pointer with a large entry.
355 */
356 #define LARGE_ENTRY_MATCHES(entry,ptr) \
357 ((((entry).address_and_num_pages - (uintptr_t)(ptr)) >> vm_page_shift) == 0)
358
359 #define LARGE_ENTRY_IS_EMPTY(entry) (((entry).address_and_num_pages) == 0)
360
361 typedef compact_range_t large_entry_t;
362
363 /********************* DEFINITIONS for huge ************************/
364
365 typedef vm_range_t huge_entry_t;
366
367 /********************* zone itself ************************/
368
369 typedef struct {
370 malloc_zone_t basic_zone;
371 pthread_lock_t lock;
372 unsigned debug_flags;
373 void *log_address;
374
375 /* Regions for tiny objects */
376 unsigned num_tiny_regions;
377 tiny_region_t *tiny_regions;
378 void *last_tiny_free; // low SHIFT_TINY_QUANTUM indicate the msize
379 unsigned tiny_bitmap; // cache of the 32 free lists
380 free_list_t *tiny_free_list[NUM_TINY_SLOTS]; // 31 free lists for 1*TINY_QUANTUM to 31*TINY_QUANTUM plus 1 for larger than 32*SMALL_QUANTUM
381 size_t tiny_bytes_free_at_end; // the last free region in the last block is treated as a big block in use that is not accounted for
382 unsigned num_tiny_objects;
383 size_t num_bytes_in_tiny_objects;
384
385 /* Regions for small objects */
386 unsigned num_small_regions;
387 small_region_t *small_regions;
388 void *last_small_free; // low SHIFT_SMALL_QUANTUM indicate the msize
389 unsigned small_bitmap; // cache of the free list
390 free_list_t *small_free_list[NUM_SMALL_SLOTS];
391 size_t small_bytes_free_at_end; // the last free region in the last block is treated as a big block in use that is not accounted for
392 unsigned num_small_objects;
393 size_t num_bytes_in_small_objects;
394
395 /* large objects: vm_page_shift <= log2(size) < 2 *vm_page_shift */
396 unsigned num_large_objects_in_use;
397 unsigned num_large_entries;
398 large_entry_t *large_entries; // hashed by location; null entries don't count
399 size_t num_bytes_in_large_objects;
400
401 /* huge objects: log2(size) >= 2 *vm_page_shift */
402 unsigned num_huge_entries;
403 huge_entry_t *huge_entries;
404 size_t num_bytes_in_huge_objects;
405
406 /* Initial region list */
407 tiny_region_t initial_tiny_regions[INITIAL_NUM_TINY_REGIONS];
408 small_region_t initial_small_regions[INITIAL_NUM_SMALL_REGIONS];
409 } szone_t;
410
411 #if DEBUG_MALLOC || DEBUG_CLIENT
412 static void szone_sleep(void);
413 #endif
414 static void szone_error(szone_t *szone, const char *msg, const void *ptr);
415 static void protect(szone_t *szone, void *address, size_t size, unsigned protection, unsigned debug_flags);
416 static void *allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label);
417 static void deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags);
418 static kern_return_t _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr);
419
420 static INLINE void free_list_checksum(szone_t *szone, free_list_t *ptr, const char *msg);
421 static INLINE void free_list_set_checksum(szone_t *szone, free_list_t *ptr);
422 static unsigned free_list_count(const free_list_t *ptr);
423
424 static INLINE msize_t get_tiny_meta_header(const void *ptr, boolean_t *is_free);
425 static INLINE void set_tiny_meta_header_in_use(const void *ptr, msize_t msize);
426 static INLINE void set_tiny_meta_header_middle(const void *ptr);
427 static INLINE void set_tiny_meta_header_free(const void *ptr, msize_t msize);
428 static INLINE boolean_t tiny_meta_header_is_free(const void *ptr);
429 static INLINE void *tiny_previous_preceding_free(void *ptr, msize_t *prev_msize);
430 static INLINE void tiny_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize);
431 static INLINE void tiny_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize);
432 static INLINE tiny_region_t *tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr);
433 static INLINE void tiny_free_no_lock(szone_t *szone, tiny_region_t *region, void *ptr, msize_t msize);
434 static void *tiny_malloc_from_region_no_lock(szone_t *szone, msize_t msize);
435 static INLINE boolean_t try_realloc_tiny_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size);
436 static boolean_t tiny_check_region(szone_t *szone, tiny_region_t *region);
437 static kern_return_t tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t tiny_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder);
438 static INLINE void *tiny_malloc_from_free_list(szone_t *szone, msize_t msize);
439 static INLINE void *tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested);
440 static INLINE void free_tiny(szone_t *szone, void *ptr, tiny_region_t *tiny_region);
441 static void print_tiny_free_list(szone_t *szone);
442 static void print_tiny_region(boolean_t verbose, tiny_region_t region, size_t bytes_at_end);
443 static boolean_t tiny_free_list_check(szone_t *szone, grain_t slot);
444
445 static INLINE void small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize);
446 static INLINE void small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize);
447 static INLINE void small_meta_header_set_middle(msize_t *meta_headers, msize_t index);
448 static void small_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize);
449 static void small_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize);
450 static INLINE small_region_t *small_region_for_ptr_no_lock(szone_t *szone, const void *ptr);
451 static INLINE void small_free_no_lock(szone_t *szone, small_region_t *region, void *ptr, msize_t msize);
452 static void *small_malloc_from_region_no_lock(szone_t *szone, msize_t msize);
453 static INLINE boolean_t try_realloc_small_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size);
454 static boolean_t szone_check_small_region(szone_t *szone, small_region_t *region);
455 static kern_return_t small_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t small_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder);
456 static INLINE void *small_malloc_from_free_list(szone_t *szone, msize_t msize);
457 static INLINE void *small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested);
458 static INLINE void *small_malloc_cleared_no_lock(szone_t *szone, msize_t msize);
459 static INLINE void free_small(szone_t *szone, void *ptr, small_region_t *small_region);
460 static void print_small_free_list(szone_t *szone);
461 static void print_small_region(szone_t *szone, boolean_t verbose, small_region_t *region, size_t bytes_at_end);
462 static boolean_t small_free_list_check(szone_t *szone, grain_t grain);
463
464 #if DEBUG_MALLOC
465 static void large_debug_print(szone_t *szone);
466 #endif
467 static large_entry_t *large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr);
468 static void large_entry_insert_no_lock(szone_t *szone, large_entry_t range);
469 static INLINE void large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry);
470 static INLINE large_entry_t *large_entries_alloc_no_lock(szone_t *szone, unsigned num);
471 static void large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate);
472 static void large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate);
473 static vm_range_t large_free_no_lock(szone_t *szone, large_entry_t *entry);
474 static kern_return_t large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder);
475 static huge_entry_t *huge_entry_for_pointer_no_lock(szone_t *szone, const void *ptr);
476 static boolean_t huge_entry_append(szone_t *szone, huge_entry_t huge);
477 static kern_return_t huge_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t huge_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder);
478 static void *large_and_huge_malloc(szone_t *szone, unsigned num_pages);
479 static INLINE void free_large_or_huge(szone_t *szone, void *ptr);
480 static INLINE int try_realloc_large_or_huge_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size);
481
482 static void szone_free(szone_t *szone, void *ptr);
483 static INLINE void *szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested);
484 static void *szone_malloc(szone_t *szone, size_t size);
485 static void *szone_calloc(szone_t *szone, size_t num_items, size_t size);
486 static void *szone_valloc(szone_t *szone, size_t size);
487 static size_t szone_size(szone_t *szone, const void *ptr);
488 static void *szone_realloc(szone_t *szone, void *ptr, size_t new_size);
489 static unsigned szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count);
490 static void szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count);
491 static void szone_destroy(szone_t *szone);
492 static size_t szone_good_size(szone_t *szone, size_t size);
493
494 static boolean_t szone_check_all(szone_t *szone, const char *function);
495 static boolean_t szone_check(szone_t *szone);
496 static kern_return_t szone_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder);
497 static void szone_print(szone_t *szone, boolean_t verbose);
498 static void szone_log(malloc_zone_t *zone, void *log_address);
499 static void szone_force_lock(szone_t *szone);
500 static void szone_force_unlock(szone_t *szone);
501
502 static void szone_statistics(szone_t *szone, malloc_statistics_t *stats);
503
504 static void *frozen_malloc(szone_t *zone, size_t new_size);
505 static void *frozen_calloc(szone_t *zone, size_t num_items, size_t size);
506 static void *frozen_valloc(szone_t *zone, size_t new_size);
507 static void *frozen_realloc(szone_t *zone, void *ptr, size_t new_size);
508 static void frozen_free(szone_t *zone, void *ptr);
509 static void frozen_destroy(szone_t *zone);
510
511 #if DEBUG_MALLOC
512 # define LOG(szone,ptr) \
513 (szone->log_address && (((uintptr_t)szone->log_address == -1) || (szone->log_address == (void *)(ptr))))
514 #else
515 # define LOG(szone,ptr) 0
516 #endif
517
518 #define SZONE_LOCK(szone) \
519 do { \
520 LOCK(szone->lock); \
521 } while (0)
522
523 #define SZONE_UNLOCK(szone) \
524 do { \
525 UNLOCK(szone->lock); \
526 } while (0)
527
528 #define LOCK_AND_NOTE_LOCKED(szone,locked) \
529 do { \
530 CHECK(szone, __PRETTY_FUNCTION__); \
531 locked = 1; SZONE_LOCK(szone); \
532 } while (0)
533
534 #if DEBUG_MALLOC || DEBUG_CLIENT
535 # define CHECK(szone,fun) \
536 if ((szone)->debug_flags & CHECK_REGIONS) szone_check_all(szone, fun)
537 #else
538 # define CHECK(szone,fun) do {} while (0)
539 #endif
540
541 /********************* VERY LOW LEVEL UTILITIES ************************/
542
543 #if DEBUG_MALLOC || DEBUG_CLIENT
544 static void
545 szone_sleep(void)
546 {
547
548 if (getenv("MallocErrorSleep")) {
549 malloc_printf("*** sleeping to help debug\n");
550 sleep(3600); // to help debug
551 }
552 }
553 #endif
554
555 static void
556 szone_error(szone_t *szone, const char *msg, const void *ptr)
557 {
558
559 if (szone) SZONE_UNLOCK(szone);
560 if (ptr) {
561 malloc_printf("*** error for object %p: %s\n", ptr, msg);
562 } else {
563 malloc_printf("*** error: %s\n", msg);
564 }
565 #if DEBUG_MALLOC
566 szone_print(szone, 1);
567 szone_sleep();
568 #endif
569 #if DEBUG_CLIENT
570 szone_sleep();
571 #endif
572 }
573
574 static void
575 protect(szone_t *szone, void *address, size_t size, unsigned protection, unsigned debug_flags)
576 {
577 kern_return_t err;
578
579 if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_PRELUDE)) {
580 err = vm_protect(mach_task_self(), (vm_address_t)(uintptr_t)address - vm_page_size, vm_page_size, 0, protection);
581 if (err) {
582 malloc_printf("*** can't protect(%p) region for prelude guard page at %p\n",
583 protection,address - (1 << vm_page_shift));
584 }
585 }
586 if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE)) {
587 err = vm_protect(mach_task_self(), (vm_address_t)(uintptr_t)address + size, vm_page_size, 0, protection);
588 if (err) {
589 malloc_printf("*** can't protect(%p) region for postlude guard page at %p\n",
590 protection, address + size);
591 }
592 }
593 }
594
595 static void *
596 allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label)
597 {
598 // align specifies a desired alignment (as a log) or 0 if no alignment requested
599 kern_return_t err;
600 vm_address_t vm_addr;
601 uintptr_t addr, aligned_address;
602 boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES;
603 size_t allocation_size = round_page(size);
604 size_t delta;
605
606 if (align) add_guard_pages = 0; // too cumbersome to deal with that
607 if (!allocation_size) allocation_size = 1 << vm_page_shift;
608 if (add_guard_pages) allocation_size += 2 * (1 << vm_page_shift);
609 if (align) allocation_size += (size_t)1 << align;
610 err = vm_allocate(mach_task_self(), &vm_addr, allocation_size, vm_page_label | 1);
611 if (err) {
612 malloc_printf("*** vm_allocate(size=%lld) failed (error code=%d)\n", (long long)size, err);
613 szone_error(szone, "can't allocate region", NULL);
614 return NULL;
615 }
616 addr = (uintptr_t)vm_addr;
617 if (align) {
618 aligned_address = (addr + ((uintptr_t)1 << align) - 1) & ~ (((uintptr_t)1 << align) - 1);
619 if (aligned_address != addr) {
620 delta = aligned_address - addr;
621 err = vm_deallocate(mach_task_self(), (vm_address_t)addr, delta);
622 if (err)
623 malloc_printf("*** freeing unaligned header failed with %d\n", err);
624 addr = aligned_address;
625 allocation_size -= delta;
626 }
627 if (allocation_size > size) {
628 err = vm_deallocate(mach_task_self(), (vm_address_t)addr + size, allocation_size - size);
629 if (err)
630 malloc_printf("*** freeing unaligned footer failed with %d\n", err);
631 }
632 }
633 if (add_guard_pages) {
634 addr += (uintptr_t)1 << vm_page_shift;
635 protect(szone, (void *)addr, size, 0, debug_flags);
636 }
637 return (void *)addr;
638 }
639
640 static void
641 deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags)
642 {
643 kern_return_t err;
644 boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES;
645
646 if (add_guard_pages) {
647 addr -= 1 << vm_page_shift;
648 size += 2 * (1 << vm_page_shift);
649 }
650 err = vm_deallocate(mach_task_self(), (vm_address_t)addr, size);
651 if (err && szone)
652 szone_error(szone, "Can't deallocate_pages region", addr);
653 }
654
655 static kern_return_t
656 _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr)
657 {
658 *ptr = (void *)address;
659 return 0;
660 }
661
662 static INLINE void
663 free_list_checksum(szone_t *szone, free_list_t *ptr, const char *msg)
664 {
665 // We always checksum, as testing whether to do it (based on szone->debug_flags) is as fast as
666 // doing it
667 // XXX not necessarily true for LP64 case
668 if (ptr->checksum != (((uintptr_t)ptr->previous) ^ ((uintptr_t)ptr->next) ^ CHECKSUM_MAGIC)) {
669 #if DEBUG_MALLOC
670 malloc_printf("*** incorrect checksum: %s\n", msg);
671 #endif
672 szone_error(szone, "incorrect checksum for freed object "
673 "- object was probably modified after being freed, break at szone_error to debug", ptr);
674 }
675 }
676
677 static INLINE void
678 free_list_set_checksum(szone_t *szone, free_list_t *ptr)
679 {
680 // We always set checksum, as testing whether to do it (based on
681 // szone->debug_flags) is slower than just doing it
682 // XXX not necessarily true for LP64 case
683 ptr->checksum = ((uintptr_t)ptr->previous) ^ ((uintptr_t)ptr->next) ^ CHECKSUM_MAGIC;
684 }
685
686 static unsigned
687 free_list_count(const free_list_t *ptr)
688 {
689 unsigned count = 0;
690
691 while (ptr) {
692 count++;
693 ptr = ptr->next;
694 }
695 return count;
696 }
697
698 /* XXX inconsistent use of BITMAP32 and BITARRAY operations could be cleaned up */
699
700 #define BITMAP32_SET(bitmap,bit) (bitmap |= 1 << (bit))
701 #define BITMAP32_CLR(bitmap,bit) (bitmap &= ~ (1 << (bit)))
702 #define BITMAP32_BIT(bitmap,bit) ((bitmap >> (bit)) & 1)
703
704 #define BITMAP32_FFS(bitmap) (ffs(bitmap))
705 // returns bit # of first bit that's one, starting at 1 (returns 0 if !bitmap)
706
707 /********************* TINY FREE LIST UTILITIES ************************/
708
709 // We encode the meta-headers as follows:
710 // Each quantum has an associated set of 2 bits:
711 // block_header when 1 says this block is the beginning of a block
712 // in_use when 1 says this block is in use
713 // so a block in use of size 3 is 1-1 0-X 0-X
714 // for a free block TINY_FREE_SIZE(ptr) carries the size and the bits are 1-0 X-X X-X
715 // for a block middle the bits are 0-0
716
717 // Attention double evaluation for these
718 #define BITARRAY_SET(bits,index) (bits[index>>3] |= (1 << (index & 7)))
719 #define BITARRAY_CLR(bits,index) (bits[index>>3] &= ~(1 << (index & 7)))
720 #define BITARRAY_BIT(bits,index) (((bits[index>>3]) >> (index & 7)) & 1)
721
722 // Following is for start<8 and end<=start+32
723 #define BITARRAY_MCLR_LESS_32(bits,start,end) \
724 do { \
725 unsigned char *_bits = (bits); \
726 unsigned _end = (end); \
727 switch (_end >> 3) { \
728 case 4: _bits[4] &= ~ ((1 << (_end - 32)) - 1); _end = 32; \
729 case 3: _bits[3] &= ~ ((1 << (_end - 24)) - 1); _end = 24; \
730 case 2: _bits[2] &= ~ ((1 << (_end - 16)) - 1); _end = 16; \
731 case 1: _bits[1] &= ~ ((1 << (_end - 8)) - 1); _end = 8; \
732 case 0: _bits[0] &= ~ ((1 << _end) - (1 << (start))); \
733 } \
734 } while (0)
735
736 #if 0 // Simple but slow version
737 #warning Slow version in effect
738 #define BITARRAY_MCLR(bits,index,num) \
739 do { \
740 unsigned _ctr = (num); \
741 unsigned _cur = (index); \
742 \
743 while (_ctr--) {BITARRAY_CLR(bits,_cur); _cur++; } \
744 } while (0)
745 #else
746
747 // Following is for num <= 32
748 #define BITARRAY_MCLR(bits,index,num) \
749 do { \
750 unsigned _index = (index); \
751 unsigned char *_rebased = (bits) + (_index >> 3); \
752 \
753 _index &= 7; \
754 BITARRAY_MCLR_LESS_32(_rebased, _index, _index + (num)); \
755 } while (0)
756 #endif
757
758 static INLINE msize_t
759 get_tiny_meta_header(const void *ptr, boolean_t *is_free)
760 {
761 // returns msize and is_free
762 // may return 0 for the msize component (meaning 65536)
763 unsigned char *block_header;
764 unsigned char *in_use;
765 msize_t index;
766 unsigned byte_index;
767
768 block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
769 index = TINY_INDEX_FOR_PTR(ptr);
770 byte_index = index >> 3;
771
772 block_header += byte_index;
773 index &= 7;
774 *is_free = 0;
775 if (!BITMAP32_BIT(*block_header, index))
776 return 0;
777 in_use = TINY_INUSE_FOR_HEADER(block_header);
778 if (!BITMAP32_BIT(*in_use, index)) {
779 *is_free = 1;
780 return TINY_FREE_SIZE(ptr);
781 }
782 uint32_t *addr = (uint32_t *)((uintptr_t)block_header & ~3);
783 uint32_t word0 = OSReadLittleInt32(addr, 0) >> index;
784 uint32_t word1 = OSReadLittleInt32(addr, 4) << (8 - index);
785 uint32_t bits = (((uintptr_t)block_header & 3) * 8); // precision loss on LP64 OK here
786 uint32_t word = (word0 >> bits) | (word1 << (24 - bits));
787 uint32_t result = ffs(word >> 1);
788 return result;
789 }
790
791 static INLINE void
792 set_tiny_meta_header_in_use(const void *ptr, msize_t msize)
793 {
794 unsigned char *block_header;
795 unsigned char *in_use;
796 msize_t index;
797 unsigned byte_index;
798 msize_t clr_msize;
799 unsigned end_bit;
800
801 block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
802 index = TINY_INDEX_FOR_PTR(ptr);
803 byte_index = index >> 3;
804
805 #if DEBUG_MALLOC
806 if (msize >= 32)
807 malloc_printf("set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr, msize);
808 if ((unsigned)index + (unsigned)msize > 0x10000)
809 malloc_printf("set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr, msize);
810 #endif
811 block_header += byte_index;
812 index &= 7;
813 BITMAP32_SET(*block_header, index);
814 in_use = TINY_INUSE_FOR_HEADER(block_header);
815 BITMAP32_SET(*in_use, index);
816 index++;
817 clr_msize = msize-1;
818 if (clr_msize) {
819 byte_index = index >> 3;
820 block_header += byte_index; in_use += byte_index;
821 index &= 7;
822 end_bit = index + clr_msize;
823 BITARRAY_MCLR_LESS_32(block_header, index, end_bit);
824 BITARRAY_MCLR_LESS_32(in_use, index, end_bit);
825 }
826 BITARRAY_SET(block_header, index+clr_msize); // we set the block_header bit for the following block to reaffirm next block is a block
827 #if DEBUG_MALLOC
828 {
829 boolean_t ff;
830 msize_t mf;
831
832 mf = get_tiny_meta_header(ptr, &ff);
833 if (msize != mf) {
834 malloc_printf("setting header for tiny in_use %p : %d\n", ptr, msize);
835 malloc_printf("reading header for tiny %p : %d %d\n", ptr, mf, ff);
836 }
837 }
838 #endif
839 }
840
841 static INLINE void
842 set_tiny_meta_header_middle(const void *ptr)
843 {
844 // indicates this block is in the middle of an in use block
845 unsigned char *block_header;
846 unsigned char *in_use;
847 msize_t index;
848
849 block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
850 in_use = TINY_INUSE_FOR_HEADER(block_header);
851 index = TINY_INDEX_FOR_PTR(ptr);
852
853 BITARRAY_CLR(block_header, index);
854 BITARRAY_CLR(in_use, index);
855 TINY_FREE_SIZE(ptr) = 0;
856 }
857
858 static INLINE void
859 set_tiny_meta_header_free(const void *ptr, msize_t msize)
860 {
861 // !msize is acceptable and means 65536
862 unsigned char *block_header;
863 unsigned char *in_use;
864 msize_t index;
865
866 block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
867 in_use = TINY_INUSE_FOR_HEADER(block_header);
868 index = TINY_INDEX_FOR_PTR(ptr);
869
870 #if DEBUG_MALLOC
871 if ((unsigned)index + (unsigned)msize > 0x10000) {
872 malloc_printf("setting header for tiny free %p msize too large: %d\n", ptr, msize);
873 }
874 #endif
875 BITARRAY_SET(block_header, index); BITARRAY_CLR(in_use, index);
876 TINY_FREE_SIZE(ptr) = msize;
877 // mark the end of this block
878 if (msize) { // msize==0 => the whole region is free
879 void *follower = FOLLOWING_TINY_PTR(ptr, msize);
880 TINY_PREVIOUS_MSIZE(follower) = msize;
881 }
882 #if DEBUG_MALLOC
883 boolean_t ff;
884 msize_t mf = get_tiny_meta_header(ptr, &ff);
885 if ((msize != mf) || !ff) {
886 malloc_printf("setting header for tiny free %p : %d\n", ptr, msize);
887 malloc_printf("reading header for tiny %p : %d %d\n", ptr, mf, ff);
888 }
889 #endif
890 }
891
892 static INLINE boolean_t
893 tiny_meta_header_is_free(const void *ptr)
894 {
895 // returns msize and is_free shifted by 16
896 // may return 0 for the msize component (meaning 65536)
897 unsigned char *block_header;
898 unsigned char *in_use;
899 msize_t index;
900
901 block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
902 in_use = TINY_INUSE_FOR_HEADER(block_header);
903 index = TINY_INDEX_FOR_PTR(ptr);
904 if (!BITARRAY_BIT(block_header, index))
905 return 0;
906 return !BITARRAY_BIT(in_use, index);
907 }
908
909 static INLINE void *
910 tiny_previous_preceding_free(void *ptr, msize_t *prev_msize)
911 {
912 // returns the previous block, assuming and verifying it's free
913 unsigned char *block_header;
914 unsigned char *in_use;
915 msize_t index;
916 msize_t previous_msize;
917 msize_t previous_index;
918 void *previous_ptr;
919
920 block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
921 in_use = TINY_INUSE_FOR_HEADER(block_header);
922 index = TINY_INDEX_FOR_PTR(ptr);
923
924 if (!index)
925 return NULL;
926 if ((previous_msize = TINY_PREVIOUS_MSIZE(ptr)) > index)
927 return NULL;
928
929 previous_index = index - previous_msize;
930 previous_ptr = (void *)(TINY_REGION_FOR_PTR(ptr) + TINY_BYTES_FOR_MSIZE(previous_index));
931 if (TINY_FREE_SIZE(previous_ptr) != previous_msize)
932 return NULL;
933
934 if (!BITARRAY_BIT(block_header, previous_index))
935 return NULL;
936 if (BITARRAY_BIT(in_use, previous_index))
937 return NULL;
938
939 // conservative check did match true check
940 *prev_msize = previous_msize;
941 return previous_ptr;
942 }
943
944 static INLINE void
945 tiny_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize)
946 {
947 // Adds an item to the proper free list
948 // Also marks the meta-header of the block properly
949 // Assumes szone has been locked
950 grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1;
951 free_list_t *free_ptr = ptr;
952 free_list_t *free_head = szone->tiny_free_list[slot];
953
954 #if DEBUG_MALLOC
955 if (LOG(szone,ptr)) {
956 malloc_printf("in tiny_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr, msize);
957 }
958 if (((unsigned)ptr) & (TINY_QUANTUM - 1)) {
959 szone_error(szone, "tiny_free_list_add_ptr: Unaligned ptr", ptr);
960 }
961 #endif
962 set_tiny_meta_header_free(ptr, msize);
963 if (free_head) {
964 free_list_checksum(szone, free_head, __PRETTY_FUNCTION__);
965 #if DEBUG_MALLOC
966 if (free_head->previous) {
967 malloc_printf("ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, free_head, free_head->previous);
968 szone_error(szone, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr);
969 }
970 if (! tiny_meta_header_is_free(free_head)) {
971 malloc_printf("ptr=%p slot=%d free_head=%p\n", ptr, slot, free_head);
972 szone_error(szone, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr);
973 }
974 #endif
975 free_head->previous = free_ptr;
976 free_list_set_checksum(szone, free_head);
977 } else {
978 BITMAP32_SET(szone->tiny_bitmap, slot);
979 }
980 free_ptr->previous = NULL;
981 free_ptr->next = free_head;
982 free_list_set_checksum(szone, free_ptr);
983 szone->tiny_free_list[slot] = free_ptr;
984 }
985
986 static INLINE void
987 tiny_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize)
988 {
989 // Removes item in the proper free list
990 // msize could be read, but all callers have it so we pass it in
991 // Assumes szone has been locked
992 grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1;
993 free_list_t *free_ptr = ptr;
994 free_list_t *next = free_ptr->next;
995 free_list_t *previous = free_ptr->previous;
996
997 #if DEBUG_MALLOC
998 if (LOG(szone,ptr)) {
999 malloc_printf("In tiny_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr, msize);
1000 }
1001 #endif
1002 free_list_checksum(szone, free_ptr, __PRETTY_FUNCTION__);
1003 if (!previous) {
1004 // The block to remove is the head of the free list
1005 #if DEBUG_MALLOC
1006 if (szone->tiny_free_list[slot] != ptr) {
1007 malloc_printf("ptr=%p slot=%d msize=%d szone->tiny_free_list[slot]=%p\n",
1008 ptr, slot, msize, szone->tiny_free_list[slot]);
1009 szone_error(szone, "tiny_free_list_remove_ptr: Internal invariant broken (szone->tiny_free_list[slot])", ptr);
1010 return;
1011 }
1012 #endif
1013 szone->tiny_free_list[slot] = next;
1014 if (!next) BITMAP32_CLR(szone->tiny_bitmap, slot);
1015 } else {
1016 previous->next = next;
1017 free_list_set_checksum(szone, previous);
1018 }
1019 if (next) {
1020 next->previous = previous;
1021 free_list_set_checksum(szone, next);
1022 }
1023 }
1024
1025 /*
1026 * Find the tiny region containing (ptr) (if any).
1027 *
1028 * We take advantage of the knowledge that tiny regions are always (1 << TINY_BLOCKS_ALIGN) aligned.
1029 */
1030 static INLINE tiny_region_t *
1031 tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr)
1032 {
1033 tiny_region_t *region;
1034 tiny_region_t rbase;
1035 int i;
1036
1037 /* mask off irrelevant lower bits */
1038 rbase = TINY_REGION_FOR_PTR(ptr);
1039 /* iterate over allocated regions - XXX not terribly efficient for large number of regions */
1040 for (i = szone->num_tiny_regions, region = szone->tiny_regions; i > 0; i--, region++)
1041 if (rbase == *region)
1042 return(region);
1043 return(NULL);
1044 }
1045
1046 static INLINE void
1047 tiny_free_no_lock(szone_t *szone, tiny_region_t *region, void *ptr, msize_t msize)
1048 {
1049 size_t original_size = TINY_BYTES_FOR_MSIZE(msize);
1050 void *next_block = ((char *)ptr + original_size);
1051 msize_t previous_msize;
1052 void *previous;
1053 msize_t next_msize;
1054 free_list_t *big_free_block;
1055 free_list_t *after_next_block;
1056 free_list_t *before_next_block;
1057
1058 #if DEBUG_MALLOC
1059 if (LOG(szone,ptr)) {
1060 malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
1061 }
1062 if (! msize) {
1063 malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
1064 szone_error(szone, "trying to free tiny block that is too small", ptr);
1065 }
1066 #endif
1067 // We try to coalesce this block with the preceeding one
1068 previous = tiny_previous_preceding_free(ptr, &previous_msize);
1069 if (previous) {
1070 #if DEBUG_MALLOC
1071 if (LOG(szone, ptr) || LOG(szone,previous)) {
1072 malloc_printf("in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous);
1073 }
1074 #endif
1075 tiny_free_list_remove_ptr(szone, previous, previous_msize);
1076 ptr = previous;
1077 msize += previous_msize;
1078 }
1079 // We try to coalesce with the next block
1080 if ((next_block < TINY_REGION_END(*region)) && tiny_meta_header_is_free(next_block)) {
1081 // The next block is free, we coalesce
1082 next_msize = TINY_FREE_SIZE(next_block);
1083 #if DEBUG_MALLOC
1084 if (LOG(szone, ptr) || LOG(szone, next_block)) {
1085 malloc_printf("in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n",
1086 ptr, msize, next_block, next_msize);
1087 }
1088 #endif
1089 if (next_msize >= NUM_TINY_SLOTS) {
1090 // we take a short cut here to avoid removing next_block from the slot 31 freelist and then adding ptr back to slot 31
1091 msize += next_msize;
1092 big_free_block = (free_list_t *)next_block;
1093 after_next_block = big_free_block->next;
1094 before_next_block = big_free_block->previous;
1095 free_list_checksum(szone, big_free_block, __PRETTY_FUNCTION__);
1096 if (!before_next_block) {
1097 szone->tiny_free_list[NUM_TINY_SLOTS-1] = ptr;
1098 } else {
1099 before_next_block->next = ptr;
1100 free_list_set_checksum(szone, before_next_block);
1101 }
1102 if (after_next_block) {
1103 after_next_block->previous = ptr;
1104 free_list_set_checksum(szone, after_next_block);
1105 }
1106 ((free_list_t *)ptr)->previous = before_next_block;
1107 ((free_list_t *)ptr)->next = after_next_block;
1108 free_list_set_checksum(szone, ptr);
1109 set_tiny_meta_header_free(ptr, msize);
1110 set_tiny_meta_header_middle(big_free_block); // clear the meta_header to enable coalescing backwards
1111 goto tiny_free_ending;
1112 }
1113 tiny_free_list_remove_ptr(szone, next_block, next_msize);
1114 set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards
1115 msize += next_msize;
1116 }
1117 if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize) {
1118 memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize));
1119 }
1120 tiny_free_list_add_ptr(szone, ptr, msize);
1121 tiny_free_ending:
1122 // When in proper debug mode we write on the memory to help debug memory smashers
1123 szone->num_tiny_objects--;
1124 szone->num_bytes_in_tiny_objects -= original_size; // we use original_size and not msize to avoid double counting the coalesced blocks
1125 }
1126
1127 static void *
1128 tiny_malloc_from_region_no_lock(szone_t *szone, msize_t msize)
1129 {
1130 tiny_region_t last_region, *new_regions;
1131 void *last_block, *ptr, *aligned_address;
1132
1133 // Allocates from the last region or a freshly allocated region
1134 // Before anything we transform the tiny_bytes_free_at_end - if any - to a regular free block
1135 if (szone->tiny_bytes_free_at_end) {
1136 last_region = szone->tiny_regions[szone->num_tiny_regions-1];
1137 last_block = TINY_REGION_END(last_region) - szone->tiny_bytes_free_at_end;
1138 tiny_free_list_add_ptr(szone, last_block, TINY_MSIZE_FOR_BYTES(szone->tiny_bytes_free_at_end));
1139 szone->tiny_bytes_free_at_end = 0;
1140 }
1141 // time to create a new region
1142 aligned_address = allocate_pages(szone, TINY_REGION_SIZE, TINY_BLOCKS_ALIGN, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_TINY));
1143 if (!aligned_address) {
1144 // out of memory!
1145 return NULL;
1146 }
1147 // We set the padding after block_header to be all 1
1148 ((uint32_t *)(aligned_address + (1 << TINY_BLOCKS_ALIGN) + (NUM_TINY_BLOCKS >> 3)))[0] = ~0;
1149 if (szone->num_tiny_regions == INITIAL_NUM_TINY_REGIONS) {
1150 // XXX logic here fails after initial reallocation of tiny regions is exhausted (approx 4GB of
1151 // tiny allocations)
1152 new_regions = small_malloc_from_region_no_lock(szone, 16); // 16 * 512 bytes is plenty of tiny regions (more than 4,000)
1153 if (!new_regions) return NULL;
1154 memcpy(new_regions, szone->tiny_regions, INITIAL_NUM_TINY_REGIONS * sizeof(tiny_region_t));
1155 szone->tiny_regions = new_regions; // we set the pointer after it's all ready to enable enumeration from another thread without locking
1156 }
1157 szone->tiny_regions[szone->num_tiny_regions] = aligned_address;
1158 szone->num_tiny_regions ++; // we set the number after the pointer is all ready to enable enumeration from another thread without taking the lock
1159 ptr = aligned_address;
1160 set_tiny_meta_header_in_use(ptr, msize);
1161 szone->num_tiny_objects++;
1162 szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(msize);
1163 // We put a header on the last block so that it appears in use (for coalescing, etc...)
1164 set_tiny_meta_header_in_use(ptr + TINY_BYTES_FOR_MSIZE(msize), 1);
1165 szone->tiny_bytes_free_at_end = TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS - msize);
1166 #if DEBUG_MALLOC
1167 if (LOG(szone,ptr)) {
1168 malloc_printf("in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
1169 }
1170 #endif
1171 return ptr;
1172 }
1173
1174 static INLINE boolean_t
1175 try_realloc_tiny_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size)
1176 {
1177 // returns 1 on success
1178 msize_t index;
1179 msize_t old_msize;
1180 unsigned next_index;
1181 void *next_block;
1182 boolean_t is_free;
1183 msize_t next_msize, coalesced_msize, leftover_msize;
1184 void *leftover;
1185
1186 index = TINY_INDEX_FOR_PTR(ptr);
1187 old_msize = TINY_MSIZE_FOR_BYTES(old_size);
1188 next_index = index + old_msize;
1189
1190 if (next_index >= NUM_TINY_BLOCKS) {
1191 return 0;
1192 }
1193 next_block = (char *)ptr + old_size;
1194 SZONE_LOCK(szone);
1195 is_free = tiny_meta_header_is_free(next_block);
1196 if (!is_free) {
1197 SZONE_UNLOCK(szone);
1198 return 0; // next_block is in use;
1199 }
1200 next_msize = TINY_FREE_SIZE(next_block);
1201 if (old_size + TINY_MSIZE_FOR_BYTES(next_msize) < new_size) {
1202 SZONE_UNLOCK(szone);
1203 return 0; // even with next block, not enough
1204 }
1205 tiny_free_list_remove_ptr(szone, next_block, next_msize);
1206 set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards
1207 coalesced_msize = TINY_MSIZE_FOR_BYTES(new_size - old_size + TINY_QUANTUM - 1);
1208 leftover_msize = next_msize - coalesced_msize;
1209 if (leftover_msize) {
1210 leftover = next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize);
1211 tiny_free_list_add_ptr(szone, leftover, leftover_msize);
1212 }
1213 set_tiny_meta_header_in_use(ptr, old_msize + coalesced_msize);
1214 #if DEBUG_MALLOC
1215 if (LOG(szone,ptr)) {
1216 malloc_printf("in try_realloc_tiny_in_place(), ptr=%p, msize=%d\n", ptr, old_msize + coalesced_msize);
1217 }
1218 #endif
1219 szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(coalesced_msize);
1220 SZONE_UNLOCK(szone);
1221 CHECK(szone, __PRETTY_FUNCTION__);
1222 return 1;
1223 }
1224
1225 static boolean_t
1226 tiny_check_region(szone_t *szone, tiny_region_t *region)
1227 {
1228 uintptr_t start, ptr, region_end, follower;
1229 boolean_t prev_free = 0;
1230 boolean_t is_free;
1231 msize_t msize;
1232 free_list_t *free_head;
1233
1234 /* establish region limits */
1235 start = (uintptr_t)TINY_REGION_ADDRESS(*region);
1236 ptr = start;
1237 region_end = (uintptr_t)TINY_REGION_END(*region);
1238
1239 /*
1240 * The last region may have a trailing chunk which has not been converted into inuse/freelist
1241 * blocks yet.
1242 */
1243 if (region == szone->tiny_regions + szone->num_tiny_regions - 1)
1244 region_end -= szone->tiny_bytes_free_at_end;
1245
1246
1247 /*
1248 * Scan blocks within the region.
1249 */
1250 while (ptr < region_end) {
1251 /*
1252 * If the first block is free, and its size is 65536 (msize = 0) then the entire region is
1253 * free.
1254 */
1255 msize = get_tiny_meta_header((void *)ptr, &is_free);
1256 if (is_free && !msize && (ptr == start)) {
1257 return 1;
1258 }
1259
1260 /*
1261 * If the block's size is 65536 (msize = 0) then since we're not the first entry the size is
1262 * corrupt.
1263 */
1264 if (!msize) {
1265 malloc_printf("*** invariant broken for tiny block %p this msize=%d - size is too small\n",
1266 ptr, msize);
1267 return 0;
1268 }
1269
1270 if (!is_free) {
1271 /*
1272 * In use blocks cannot be more than 31 quanta large.
1273 */
1274 prev_free = 0;
1275 if (msize > 31 * TINY_QUANTUM) {
1276 malloc_printf("*** invariant broken for %p this tiny msize=%d[%p] - size is too large\n",
1277 ptr, msize, msize);
1278 return 0;
1279 }
1280 /* move to next block */
1281 ptr += TINY_BYTES_FOR_MSIZE(msize);
1282 } else {
1283 /*
1284 * Free blocks must have been coalesced, we cannot have a free block following another
1285 * free block.
1286 */
1287 if (prev_free) {
1288 malloc_printf("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n",
1289 ptr, msize);
1290 return 0;
1291 }
1292 prev_free = 1;
1293 /*
1294 * Check the integrity of this block's entry in its freelist.
1295 */
1296 free_head = (free_list_t *)ptr;
1297 free_list_checksum(szone, free_head, __PRETTY_FUNCTION__);
1298 if (free_head->previous && !tiny_meta_header_is_free(free_head->previous)) {
1299 malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n",
1300 ptr, free_head->previous);
1301 return 0;
1302 }
1303 if (free_head->next && !tiny_meta_header_is_free(free_head->next)) {
1304 malloc_printf("*** invariant broken for %p (next in free list %p is not a free pointer)\n",
1305 ptr, free_head->next);
1306 return 0;
1307 }
1308 /*
1309 * Check the free block's trailing size value.
1310 */
1311 follower = (uintptr_t)FOLLOWING_TINY_PTR(ptr, msize);
1312 if ((follower != region_end) && (TINY_PREVIOUS_MSIZE(follower) != msize)) {
1313 malloc_printf("*** invariant broken for tiny free %p followed by %p in region [%p-%p] "
1314 "(end marker incorrect) should be %d; in fact %d\n",
1315 ptr, follower, TINY_REGION_ADDRESS(*region), region_end, msize, TINY_PREVIOUS_MSIZE(follower));
1316 return 0;
1317 }
1318 /* move to next block */
1319 ptr = follower;
1320 }
1321 }
1322 /*
1323 * Ensure that we scanned the entire region
1324 */
1325 if (ptr != region_end) {
1326 malloc_printf("*** invariant broken for region end %p - %p\n", ptr, region_end);
1327 return 0;
1328 }
1329 /*
1330 * Check the trailing block's integrity.
1331 */
1332 if (region == szone->tiny_regions + szone->num_tiny_regions - 1) {
1333 if (szone->tiny_bytes_free_at_end) {
1334 msize = get_tiny_meta_header((void *)ptr, &is_free);
1335 if (is_free || (msize != 1)) {
1336 malloc_printf("*** invariant broken for blocker block %p - %d %d\n", ptr, msize, is_free);
1337 }
1338 }
1339 }
1340 return 1;
1341 }
1342
1343 static kern_return_t
1344 tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t tiny_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder)
1345 {
1346 tiny_region_t *regions;
1347 unsigned index = 0;
1348 vm_range_t buffer[MAX_RECORDER_BUFFER];
1349 unsigned count = 0;
1350 kern_return_t err;
1351 tiny_region_t region;
1352 vm_range_t range;
1353 vm_range_t admin_range;
1354 vm_range_t ptr_range;
1355 unsigned char *mapped_region;
1356 unsigned char *block_header;
1357 unsigned char *in_use;
1358 unsigned block_index;
1359 unsigned block_limit;
1360 boolean_t is_free;
1361 msize_t msize;
1362 void *mapped_ptr;
1363 unsigned bit;
1364
1365 err = reader(task, region_address, sizeof(tiny_region_t) * num_regions, (void **)&regions);
1366 if (err) return err;
1367 while (index < num_regions) {
1368 // unsigned num_in_use = 0;
1369 // unsigned num_free = 0;
1370 region = regions[index];
1371 range.address = (vm_address_t)TINY_REGION_ADDRESS(region);
1372 range.size = (vm_size_t)TINY_REGION_SIZE;
1373 if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
1374 admin_range.address = range.address + (1 << TINY_BLOCKS_ALIGN);
1375 admin_range.size = range.size - (1 << TINY_BLOCKS_ALIGN);
1376 recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1);
1377 }
1378 if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) {
1379 ptr_range.address = range.address;
1380 ptr_range.size = 1 << TINY_BLOCKS_ALIGN;
1381 recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1);
1382 }
1383 if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
1384 err = reader(task, range.address, range.size, (void **)&mapped_region);
1385 if (err)
1386 return err;
1387 block_header = (unsigned char *)(mapped_region + (1 << TINY_BLOCKS_ALIGN));
1388 in_use = block_header + (NUM_TINY_BLOCKS >> 3) + 4;
1389 block_index = 0;
1390 block_limit = NUM_TINY_BLOCKS;
1391 if (index == num_regions - 1)
1392 block_limit -= TINY_MSIZE_FOR_BYTES(tiny_bytes_free_at_end);
1393 while (block_index < block_limit) {
1394 is_free = !BITARRAY_BIT(in_use, block_index);
1395 if (is_free) {
1396 mapped_ptr = mapped_region + TINY_BYTES_FOR_MSIZE(block_index);
1397 msize = TINY_FREE_SIZE(mapped_ptr);
1398 if (!msize)
1399 break;
1400 } else {
1401 msize = 1;
1402 bit = block_index + 1;
1403 while (! BITARRAY_BIT(block_header, bit)) {
1404 bit++;
1405 msize ++;
1406 }
1407 buffer[count].address = range.address + TINY_BYTES_FOR_MSIZE(block_index);
1408 buffer[count].size = TINY_BYTES_FOR_MSIZE(msize);
1409 count++;
1410 if (count >= MAX_RECORDER_BUFFER) {
1411 recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
1412 count = 0;
1413 }
1414 }
1415 block_index += msize;
1416 }
1417 }
1418 index++;
1419 }
1420 if (count) {
1421 recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
1422 }
1423 return 0;
1424 }
1425
1426 static INLINE void *
1427 tiny_malloc_from_free_list(szone_t *szone, msize_t msize)
1428 {
1429 // Assumes we've locked the region
1430 free_list_t *ptr;
1431 msize_t this_msize;
1432 grain_t slot = msize - 1;
1433 free_list_t **free_list = szone->tiny_free_list;
1434 free_list_t **the_slot = free_list + slot;
1435 free_list_t *next;
1436 free_list_t **limit;
1437 unsigned bitmap;
1438 msize_t leftover_msize;
1439 free_list_t *leftover_ptr;
1440
1441 /*
1442 * Look for an exact match by checking the freelist for this msize.
1443 */
1444 ptr = *the_slot;
1445 if (ptr) {
1446 next = ptr->next;
1447 if (next) {
1448 next->previous = NULL;
1449 free_list_set_checksum(szone, next);
1450 }
1451 *the_slot = next;
1452 this_msize = msize;
1453 #if DEBUG_MALLOC
1454 if (LOG(szone, ptr)) {
1455 malloc_printf("in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr, this_msize);
1456 }
1457 #endif
1458 goto return_tiny_alloc;
1459 }
1460
1461 /*
1462 * Iterate over freelists for larger blocks looking for the next-largest block.
1463 */
1464 bitmap = szone->tiny_bitmap & ~ ((1 << slot) - 1);
1465 if (!bitmap)
1466 goto try_tiny_malloc_from_end;
1467 slot = BITMAP32_FFS(bitmap) - 1;
1468 limit = free_list + NUM_TINY_SLOTS - 1;
1469 free_list += slot;
1470 while (free_list < limit) {
1471 // try bigger grains
1472 ptr = *free_list;
1473 if (ptr) {
1474 next = ptr->next;
1475 if (next) {
1476 next->previous = NULL;
1477 free_list_set_checksum(szone, next);
1478 }
1479 *free_list = next;
1480 this_msize = TINY_FREE_SIZE(ptr);
1481 #if DEBUG_MALLOC
1482 if (LOG(szone, ptr)) {
1483 malloc_printf("in tiny_malloc_from_free_list(), bigger grain ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize);
1484 }
1485 #endif
1486 goto add_leftover_and_proceed;
1487 }
1488 free_list++;
1489 }
1490 // we are now looking at the last slot (31)
1491 ptr = *limit;
1492 if (ptr) {
1493 this_msize = TINY_FREE_SIZE(ptr);
1494 next = ptr->next;
1495 if (this_msize - msize >= NUM_TINY_SLOTS) {
1496 // the leftover will go back to the free list, so we optimize by modifying the free list rather than removing the head and then adding back
1497 leftover_msize = this_msize - msize;
1498 leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
1499 *limit = leftover_ptr;
1500 if (next) {
1501 next->previous = leftover_ptr;
1502 free_list_set_checksum(szone, next);
1503 }
1504 leftover_ptr->next = next;
1505 leftover_ptr->previous = NULL;
1506 free_list_set_checksum(szone, leftover_ptr);
1507 set_tiny_meta_header_free(leftover_ptr, leftover_msize);
1508 #if DEBUG_MALLOC
1509 if (LOG(szone,ptr)) {
1510 malloc_printf("in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize);
1511 }
1512 #endif
1513 this_msize = msize;
1514 goto return_tiny_alloc;
1515 }
1516 *limit = next;
1517 if (next) {
1518 next->previous = NULL;
1519 free_list_set_checksum(szone, next);
1520 }
1521 goto add_leftover_and_proceed;
1522 }
1523 try_tiny_malloc_from_end:
1524 // Let's see if we can use szone->tiny_bytes_free_at_end
1525 if (szone->tiny_bytes_free_at_end >= TINY_BYTES_FOR_MSIZE(msize)) {
1526 ptr = (free_list_t *)(TINY_REGION_END(szone->tiny_regions[szone->num_tiny_regions-1]) - szone->tiny_bytes_free_at_end);
1527 szone->tiny_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(msize);
1528 if (szone->tiny_bytes_free_at_end) {
1529 // let's add an in use block after ptr to serve as boundary
1530 set_tiny_meta_header_in_use((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize), 1);
1531 }
1532 this_msize = msize;
1533 #if DEBUG_MALLOC
1534 if (LOG(szone, ptr)) {
1535 malloc_printf("in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr, msize);
1536 }
1537 #endif
1538 goto return_tiny_alloc;
1539 }
1540 return NULL;
1541 add_leftover_and_proceed:
1542 if (!this_msize || (this_msize > msize)) {
1543 leftover_msize = this_msize - msize;
1544 leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
1545 #if DEBUG_MALLOC
1546 if (LOG(szone,ptr)) {
1547 malloc_printf("in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize);
1548 }
1549 #endif
1550 tiny_free_list_add_ptr(szone, leftover_ptr, leftover_msize);
1551 this_msize = msize;
1552 }
1553 return_tiny_alloc:
1554 szone->num_tiny_objects++;
1555 szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(this_msize);
1556 #if DEBUG_MALLOC
1557 if (LOG(szone,ptr)) {
1558 malloc_printf("in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize);
1559 }
1560 #endif
1561 set_tiny_meta_header_in_use(ptr, this_msize);
1562 return ptr;
1563 }
1564
1565 static INLINE void *
1566 tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested)
1567 {
1568 boolean_t locked = 0;
1569 void *ptr;
1570
1571 #if DEBUG_MALLOC
1572 if (!msize) {
1573 szone_error(szone, "invariant broken (!msize) in allocation (region)", NULL);
1574 return(NULL);
1575 }
1576 #endif
1577 #if TINY_CACHE
1578 ptr = szone->last_tiny_free;
1579 if ((((uintptr_t)ptr) & (TINY_QUANTUM - 1)) == msize) {
1580 // we have a candidate - let's lock to make sure
1581 LOCK_AND_NOTE_LOCKED(szone, locked);
1582 if (ptr == szone->last_tiny_free) {
1583 szone->last_tiny_free = NULL;
1584 SZONE_UNLOCK(szone);
1585 CHECK(szone, __PRETTY_FUNCTION__);
1586 ptr = (void *)((uintptr_t)ptr & ~ (TINY_QUANTUM - 1));
1587 if (cleared_requested) {
1588 memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
1589 }
1590 #if DEBUG_MALLOC
1591 if (LOG(szone,ptr)) {
1592 malloc_printf("in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr, msize);
1593 }
1594 #endif
1595 return ptr;
1596 }
1597 }
1598 #endif
1599 // Except in rare occasions where we need to add a new region, we are going to end up locking, so we might as well lock right away to avoid doing unnecessary optimistic probes
1600 if (!locked) LOCK_AND_NOTE_LOCKED(szone, locked);
1601 ptr = tiny_malloc_from_free_list(szone, msize);
1602 if (ptr) {
1603 SZONE_UNLOCK(szone);
1604 CHECK(szone, __PRETTY_FUNCTION__);
1605 if (cleared_requested) {
1606 memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
1607 }
1608 return ptr;
1609 }
1610 ptr = tiny_malloc_from_region_no_lock(szone, msize);
1611 // we don't clear because this freshly allocated space is pristine
1612 SZONE_UNLOCK(szone);
1613 CHECK(szone, __PRETTY_FUNCTION__);
1614 return ptr;
1615 }
1616
1617 static INLINE void
1618 free_tiny(szone_t *szone, void *ptr, tiny_region_t *tiny_region)
1619 {
1620 msize_t msize;
1621 boolean_t is_free;
1622 #if TINY_CACHE
1623 void *ptr2;
1624 #endif
1625
1626 // ptr is known to be in tiny_region
1627 SZONE_LOCK(szone);
1628 #if TINY_CACHE
1629 ptr2 = szone->last_tiny_free;
1630 /* check that we don't already have this pointer in the cache */
1631 if (ptr == (void *)((uintptr_t)ptr2 & ~ (TINY_QUANTUM - 1))) {
1632 szone_error(szone, "double free", ptr);
1633 return;
1634 }
1635 #endif /* TINY_CACHE */
1636 msize = get_tiny_meta_header(ptr, &is_free);
1637 if (is_free) {
1638 szone_error(szone, "double free", ptr);
1639 return;
1640 }
1641 #if DEBUG_MALLOC
1642 if (!msize) {
1643 malloc_printf("*** szone_free() block in use is too large: %p\n", ptr);
1644 return;
1645 }
1646 #endif
1647 #if TINY_CACHE
1648 if (msize < TINY_QUANTUM) { // to see if the bits fit in the last 4 bits
1649 szone->last_tiny_free = (void *)(((uintptr_t)ptr) | msize);
1650 if (!ptr2) {
1651 SZONE_UNLOCK(szone);
1652 CHECK(szone, __PRETTY_FUNCTION__);
1653 return;
1654 }
1655 msize = (uintptr_t)ptr2 & (TINY_QUANTUM - 1);
1656 ptr = (void *)(((uintptr_t)ptr2) & ~(TINY_QUANTUM - 1));
1657 tiny_region = tiny_region_for_ptr_no_lock(szone, ptr);
1658 if (!tiny_region) {
1659 szone_error(szone, "double free (tiny cache)", ptr);
1660 }
1661 }
1662 #endif
1663 tiny_free_no_lock(szone, tiny_region, ptr, msize);
1664 SZONE_UNLOCK(szone);
1665 CHECK(szone, __PRETTY_FUNCTION__);
1666 }
1667
1668 static void
1669 print_tiny_free_list(szone_t *szone)
1670 {
1671 grain_t slot = 0;
1672 free_list_t *ptr;
1673
1674 malloc_printf("tiny free sizes: ");
1675 while (slot < NUM_TINY_SLOTS) {
1676 ptr = szone->tiny_free_list[slot];
1677 if (ptr) {
1678 malloc_printf("%s%y[%d]; ", (slot == NUM_TINY_SLOTS-1) ? ">=" : "", (slot+1)*TINY_QUANTUM, free_list_count(ptr));
1679 }
1680 slot++;
1681 }
1682 }
1683
1684 static void
1685 print_tiny_region(boolean_t verbose, tiny_region_t region, size_t bytes_at_end)
1686 {
1687 unsigned counts[1024];
1688 unsigned in_use = 0;
1689 uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(region);
1690 uintptr_t current = start;
1691 uintptr_t limit = (uintptr_t)TINY_REGION_END(region) - bytes_at_end;
1692 boolean_t is_free;
1693 msize_t msize;
1694 unsigned ci;
1695
1696 memset(counts, 0, 1024 * sizeof(unsigned));
1697 while (current < limit) {
1698 msize = get_tiny_meta_header((void *)current, &is_free);
1699 if (is_free & !msize && (current == start)) {
1700 // first block is all free
1701 break;
1702 }
1703 if (!msize) {
1704 malloc_printf("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize);
1705 break;
1706 }
1707 if (!is_free) {
1708 // block in use
1709 if (msize > 32)
1710 malloc_printf("*** error at %p msize for in_use is %d\n", (void *)current, msize);
1711 if (msize < 1024)
1712 counts[msize]++;
1713 in_use++;
1714 }
1715 current += TINY_BYTES_FOR_MSIZE(msize);
1716 }
1717 malloc_printf("Tiny region [%p-%p, %y]\t", (void *)start, TINY_REGION_END(region), (int)TINY_REGION_SIZE);
1718 malloc_printf("In_use=%d ", in_use);
1719 if (bytes_at_end) malloc_printf("untouched=%y ", bytes_at_end);
1720 if (verbose && in_use) {
1721 malloc_printf("\tSizes in use: ");
1722 for (ci = 0; ci < 1024; ci++)
1723 if (counts[ci])
1724 malloc_printf("%d[%d]", TINY_BYTES_FOR_MSIZE(ci), counts[ci]);
1725 }
1726 malloc_printf("\n");
1727 }
1728
1729 static boolean_t
1730 tiny_free_list_check(szone_t *szone, grain_t slot)
1731 {
1732 unsigned count = 0;
1733 free_list_t *ptr = szone->tiny_free_list[slot];
1734 free_list_t *previous = NULL;
1735 boolean_t is_free;
1736
1737 CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
1738 while (ptr) {
1739 free_list_checksum(szone, ptr, __PRETTY_FUNCTION__);
1740 is_free = tiny_meta_header_is_free(ptr);
1741 if (! is_free) {
1742 malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
1743 return 0;
1744 }
1745 if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) {
1746 malloc_printf("*** inaligned ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
1747 return 0;
1748 }
1749 if (!tiny_region_for_ptr_no_lock(szone, ptr)) {
1750 malloc_printf("*** itr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr);
1751 return 0;
1752 }
1753 if (ptr->previous != previous) {
1754 malloc_printf("*** irevious incorrectly set slot=%d count=%d ptr=%p\n", slot, count, ptr);
1755 return 0;
1756 }
1757 previous = ptr;
1758 ptr = ptr->next;
1759 count++;
1760 }
1761 return 1;
1762 }
1763
1764 /********************* SMALL FREE LIST UTILITIES ************************/
1765
1766 /*
1767 * Mark a block as free. Only the first quantum of a block is marked thusly,
1768 * the remainder are marked "middle".
1769 */
1770 static INLINE void
1771 small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize)
1772 {
1773
1774 meta_headers[index] = msize | SMALL_IS_FREE;
1775 }
1776
1777 /*
1778 * Mark a block as in use. Only the first quantum of a block is marked thusly,
1779 * the remainder are marked "middle".
1780 */
1781 static INLINE void
1782 small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize)
1783 {
1784
1785 meta_headers[index] = msize;
1786 }
1787
1788 /*
1789 * Mark a quantum as being the second or later in a block.
1790 */
1791 static INLINE void
1792 small_meta_header_set_middle(msize_t *meta_headers, msize_t index)
1793 {
1794
1795 meta_headers[index] = 0;
1796 }
1797
1798 // Adds an item to the proper free list
1799 // Also marks the header of the block properly
1800 // Assumes szone has been locked
1801 static void
1802 small_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize)
1803 {
1804 grain_t grain = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1;
1805 free_list_t *free_ptr = ptr;
1806 free_list_t *free_head = szone->small_free_list[grain];
1807 void *follower;
1808
1809 CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
1810 #if DEBUG_MALLOC
1811 if (LOG(szone,ptr)) {
1812 malloc_printf("in small_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr, msize);
1813 }
1814 if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) {
1815 szone_error(szone, "small_free_list_add_ptr: Unaligned ptr", ptr);
1816 }
1817 #endif
1818 if (free_head) {
1819 free_list_checksum(szone, free_head, __PRETTY_FUNCTION__);
1820 #if DEBUG_MALLOC
1821 if (free_head->previous) {
1822 malloc_printf("ptr=%p grain=%d free_head=%p previous=%p\n",
1823 ptr, grain, free_head, free_head->previous);
1824 szone_error(szone, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr);
1825 }
1826 if (!SMALL_PTR_IS_FREE(free_head)) {
1827 malloc_printf("ptr=%p grain=%d free_head=%p\n", ptr, grain, free_head);
1828 szone_error(szone, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr);
1829 }
1830 #endif
1831 free_head->previous = free_ptr;
1832 free_list_set_checksum(szone, free_head);
1833 } else {
1834 BITMAP32_SET(szone->small_bitmap, grain);
1835 }
1836 free_ptr->previous = NULL;
1837 free_ptr->next = free_head;
1838 free_list_set_checksum(szone, free_ptr);
1839 szone->small_free_list[grain] = free_ptr;
1840 follower = ptr + SMALL_BYTES_FOR_MSIZE(msize);
1841 SMALL_PREVIOUS_MSIZE(follower) = msize;
1842 }
1843
1844 // Removes item in the proper free list
1845 // msize could be read, but all callers have it so we pass it in
1846 // Assumes szone has been locked
1847 static void
1848 small_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize)
1849 {
1850 grain_t grain = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1;
1851 free_list_t *free_ptr = ptr;
1852 free_list_t *next = free_ptr->next;
1853 free_list_t *previous = free_ptr->previous;
1854
1855 CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
1856 #if DEBUG_MALLOC
1857 if (LOG(szone,ptr)) {
1858 malloc_printf("in small_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr, msize);
1859 }
1860 #endif
1861 free_list_checksum(szone, free_ptr, __PRETTY_FUNCTION__);
1862 if (!previous) {
1863 #if DEBUG_MALLOC
1864 if (szone->small_free_list[grain] != ptr) {
1865 malloc_printf("ptr=%p grain=%d msize=%d szone->small_free_list[grain]=%p\n",
1866 ptr, grain, msize, szone->small_free_list[grain]);
1867 szone_error(szone, "small_free_list_remove_ptr: Internal invariant broken (szone->small_free_list[grain])", ptr);
1868 return;
1869 }
1870 #endif
1871 szone->small_free_list[grain] = next;
1872 if (!next) BITMAP32_CLR(szone->small_bitmap, grain);
1873 } else {
1874 previous->next = next;
1875 free_list_set_checksum(szone, previous);
1876 }
1877 if (next) {
1878 next->previous = previous;
1879 free_list_set_checksum(szone, next);
1880 }
1881 }
1882
1883 static INLINE small_region_t *
1884 small_region_for_ptr_no_lock(szone_t *szone, const void *ptr)
1885 {
1886 small_region_t *region;
1887 small_region_t rbase;
1888 int i;
1889
1890 /* find assumed heap/region base */
1891 rbase = SMALL_REGION_FOR_PTR(ptr);
1892
1893 /* scan existing regions for a match */
1894 for (i = szone->num_small_regions, region = szone->small_regions; i > 0; i--, region++)
1895 if (rbase == *region)
1896 return(region);
1897 return(NULL);
1898 }
1899
1900 static INLINE void
1901 small_free_no_lock(szone_t *szone, small_region_t *region, void *ptr, msize_t msize)
1902 {
1903 msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
1904 unsigned index = SMALL_META_INDEX_FOR_PTR(ptr);
1905 size_t original_size = SMALL_BYTES_FOR_MSIZE(msize);
1906 unsigned char *next_block = ((unsigned char *)ptr + original_size);
1907 msize_t next_index = index + msize;
1908 msize_t previous_msize, next_msize;
1909 void *previous;
1910
1911 // Assumes locked
1912 CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
1913 #if DEBUG_MALLOC
1914 if (LOG(szone,ptr)) {
1915 malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
1916 }
1917 if (! msize) {
1918 malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
1919 szone_error(szone, "trying to free small block that is too small", ptr);
1920 }
1921 #endif
1922 // We try to coalesce this block with the preceeding one
1923 if (index && (SMALL_PREVIOUS_MSIZE(ptr) <= index)) {
1924 previous_msize = SMALL_PREVIOUS_MSIZE(ptr);
1925 if (meta_headers[index - previous_msize] == (previous_msize | SMALL_IS_FREE)) {
1926 previous = ptr - SMALL_BYTES_FOR_MSIZE(previous_msize);
1927 // previous is really to be coalesced
1928 #if DEBUG_MALLOC
1929 if (LOG(szone, ptr) || LOG(szone,previous)) {
1930 malloc_printf("in small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous);
1931 }
1932 #endif
1933 small_free_list_remove_ptr(szone, previous, previous_msize);
1934 small_meta_header_set_middle(meta_headers, index);
1935 ptr = previous;
1936 msize += previous_msize;
1937 index -= previous_msize;
1938 }
1939 }
1940 // We try to coalesce with the next block
1941 if ((next_block < SMALL_REGION_END(*region)) && (meta_headers[next_index] & SMALL_IS_FREE)) {
1942 // next block is free, we coalesce
1943 next_msize = meta_headers[next_index] & ~ SMALL_IS_FREE;
1944 #if DEBUG_MALLOC
1945 if (LOG(szone,ptr)) malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr, msize, next_block, next_msize);
1946 #endif
1947 small_free_list_remove_ptr(szone, next_block, next_msize);
1948 small_meta_header_set_middle(meta_headers, next_index);
1949 msize += next_msize;
1950 }
1951 if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) {
1952 if (!msize) {
1953 szone_error(szone, "incorrect size information - block header was damaged", ptr);
1954 } else {
1955 memset(ptr, 0x55, SMALL_BYTES_FOR_MSIZE(msize));
1956 }
1957 }
1958 small_free_list_add_ptr(szone, ptr, msize);
1959 small_meta_header_set_is_free(meta_headers, index, msize);
1960 szone->num_small_objects--;
1961 szone->num_bytes_in_small_objects -= original_size; // we use original_size and not msize to avoid double counting the coalesced blocks
1962 }
1963
1964 static void *
1965 small_malloc_from_region_no_lock(szone_t *szone, msize_t msize)
1966 {
1967 small_region_t last_region;
1968 void *last_block;
1969 void *ptr;
1970 void *new_address;
1971 msize_t *meta_headers;
1972 msize_t index ;
1973 size_t region_capacity;
1974 msize_t new_msize;
1975 small_region_t *new_regions;
1976 msize_t msize_left;
1977
1978 // Allocates from the last region or a freshly allocated region
1979 CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
1980 // Before anything we transform the small_bytes_free_at_end - if any - to a regular free block
1981 if (szone->small_bytes_free_at_end) {
1982 last_region = szone->small_regions[szone->num_small_regions - 1];
1983 last_block = (void *)(SMALL_REGION_END(last_region) - szone->small_bytes_free_at_end);
1984 small_free_list_add_ptr(szone, last_block, SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end));
1985 *SMALL_METADATA_FOR_PTR(last_block) = SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end) | SMALL_IS_FREE;
1986 szone->small_bytes_free_at_end = 0;
1987 }
1988 // time to create a new region
1989 new_address = allocate_pages(szone, SMALL_REGION_SIZE, SMALL_BLOCKS_ALIGN, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL));
1990 if (!new_address) {
1991 // out of memory!
1992 return NULL;
1993 }
1994 ptr = new_address;
1995 meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
1996 index = 0;
1997 if (szone->num_small_regions == INITIAL_NUM_SMALL_REGIONS) {
1998 // time to grow the number of regions
1999 region_capacity = (1 << (32 - SMALL_BLOCKS_ALIGN)) - 20; // that is for sure the maximum number of small regions we can have
2000 new_msize = (region_capacity * sizeof(small_region_t) + SMALL_QUANTUM - 1) / SMALL_QUANTUM;
2001 new_regions = ptr;
2002 small_meta_header_set_in_use(meta_headers, index, new_msize);
2003 szone->num_small_objects++;
2004 szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(new_msize);
2005 memcpy(new_regions, szone->small_regions, INITIAL_NUM_SMALL_REGIONS * sizeof(small_region_t));
2006 // We intentionally leak the previous regions pointer to avoid multi-threading crashes if
2007 // another thread was reading it (unlocked) while we are changing it.
2008 szone->small_regions = new_regions; // note we set this pointer after it's all set
2009 ptr += SMALL_BYTES_FOR_MSIZE(new_msize);
2010 index = new_msize;
2011 }
2012 szone->small_regions[szone->num_small_regions] = new_address;
2013 // we bump the number of regions AFTER we have changes the regions pointer to enable finding a
2014 // small region without taking the lock
2015 // XXX naive assumption assumes memory ordering coherence between this and other CPUs
2016 szone->num_small_regions++;
2017 small_meta_header_set_in_use(meta_headers, index, msize);
2018 msize_left = NUM_SMALL_BLOCKS - index;
2019 szone->num_small_objects++;
2020 szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(msize);
2021 // add a big free block
2022 index += msize; msize_left -= msize;
2023 meta_headers[index] = msize_left;
2024 szone->small_bytes_free_at_end = SMALL_BYTES_FOR_MSIZE(msize_left);
2025 return ptr;
2026 }
2027
2028 static INLINE boolean_t
2029 try_realloc_small_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size)
2030 {
2031 // returns 1 on success
2032 msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
2033 unsigned index = SMALL_META_INDEX_FOR_PTR(ptr);
2034 msize_t old_msize = SMALL_MSIZE_FOR_BYTES(old_size);
2035 msize_t new_msize = SMALL_MSIZE_FOR_BYTES(new_size + SMALL_QUANTUM - 1);
2036 void *next_block = (char *)ptr + old_size;
2037 unsigned next_index = index + old_msize;
2038 msize_t next_msize_and_free;
2039 msize_t next_msize;
2040 msize_t leftover_msize;
2041 void *leftover;
2042 unsigned leftover_index;
2043
2044 if (next_index >= NUM_SMALL_BLOCKS) {
2045 return 0;
2046 }
2047 #if DEBUG_MALLOC
2048 if ((uintptr_t)next_block & (SMALL_QUANTUM - 1)) {
2049 szone_error(szone, "internal invariant broken in realloc(next_block)", next_block);
2050 }
2051 if (meta_headers[index] != old_msize)
2052 malloc_printf("*** try_realloc_small_in_place incorrect old %d %d\n",
2053 meta_headers[index], old_msize);
2054 #endif
2055 SZONE_LOCK(szone);
2056 /*
2057 * Look for a free block immediately afterwards. If it's large enough, we can consume (part of)
2058 * it.
2059 */
2060 next_msize_and_free = meta_headers[next_index];
2061 next_msize = next_msize_and_free & ~ SMALL_IS_FREE;
2062 if (!(next_msize_and_free & SMALL_IS_FREE) || (old_msize + next_msize < new_msize)) {
2063 SZONE_UNLOCK(szone);
2064 return 0;
2065 }
2066 /*
2067 * The following block is big enough; pull it from its freelist and chop off enough to satisfy
2068 * our needs.
2069 */
2070 small_free_list_remove_ptr(szone, next_block, next_msize);
2071 small_meta_header_set_middle(meta_headers, next_index);
2072 leftover_msize = old_msize + next_msize - new_msize;
2073 if (leftover_msize) {
2074 /* there's some left, so put the remainder back */
2075 leftover = (unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(new_msize);
2076 small_free_list_add_ptr(szone, leftover, leftover_msize);
2077 leftover_index = index + new_msize;
2078 small_meta_header_set_is_free(meta_headers, leftover_index, leftover_msize);
2079 }
2080 #if DEBUG_MALLOC
2081 if (SMALL_BYTES_FOR_MSIZE(new_msize) >= LARGE_THRESHOLD) {
2082 malloc_printf("*** realloc in place for %p exceeded msize=%d\n", new_msize);
2083 }
2084 #endif
2085 small_meta_header_set_in_use(meta_headers, index, new_msize);
2086 #if DEBUG_MALLOC
2087 if (LOG(szone,ptr)) {
2088 malloc_printf("in szone_realloc(), ptr=%p, msize=%d\n", ptr, *SMALL_METADATA_FOR_PTR(ptr));
2089 }
2090 #endif
2091 szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(new_msize - old_msize);
2092 SZONE_UNLOCK(szone);
2093 CHECK(szone, __PRETTY_FUNCTION__);
2094 return 1;
2095 }
2096
2097 static boolean_t
2098 szone_check_small_region(szone_t *szone, small_region_t *region)
2099 {
2100 unsigned char *ptr = SMALL_REGION_ADDRESS(*region);
2101 msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
2102 unsigned char *region_end = SMALL_REGION_END(*region);
2103 msize_t prev_free = 0;
2104 unsigned index;
2105 msize_t msize_and_free;
2106 msize_t msize;
2107 free_list_t *free_head;
2108 msize_t *follower;
2109
2110 CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
2111 if (region == szone->small_regions + szone->num_small_regions - 1) region_end -= szone->small_bytes_free_at_end;
2112 while (ptr < region_end) {
2113 index = SMALL_META_INDEX_FOR_PTR(ptr);
2114 msize_and_free = meta_headers[index];
2115 if (!(msize_and_free & SMALL_IS_FREE)) {
2116 // block is in use
2117 msize = msize_and_free;
2118 if (!msize) {
2119 malloc_printf("*** invariant broken: null msize ptr=%p region#=%d num_small_regions=%d end=%p\n",
2120 ptr, region - szone->small_regions, szone->num_small_regions, (void *)region_end);
2121 return 0;
2122 }
2123 if (msize > (LARGE_THRESHOLD / SMALL_QUANTUM)) {
2124 malloc_printf("*** invariant broken for %p this small msize=%d - size is too large\n",
2125 ptr, msize_and_free);
2126 return 0;
2127 }
2128 ptr += SMALL_BYTES_FOR_MSIZE(msize);
2129 prev_free = 0;
2130 } else {
2131 // free pointer
2132 msize = msize_and_free & ~ SMALL_IS_FREE;
2133 free_head = (free_list_t *)ptr;
2134 follower = (msize_t *)FOLLOWING_SMALL_PTR(ptr, msize);
2135 if (!msize) {
2136 malloc_printf("*** invariant broken for free block %p this msize=%d\n", ptr, msize);
2137 return 0;
2138 }
2139 if (prev_free) {
2140 malloc_printf("*** invariant broken for %p (2 free in a row)\n", ptr);
2141 return 0;
2142 }
2143 free_list_checksum(szone, free_head, __PRETTY_FUNCTION__);
2144 if (free_head->previous && !SMALL_PTR_IS_FREE(free_head->previous)) {
2145 malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n",
2146 ptr, free_head->previous);
2147 return 0;
2148 }
2149 if (free_head->next && !SMALL_PTR_IS_FREE(free_head->next)) {
2150 malloc_printf("*** invariant broken for %p (next is not a free pointer)\n", ptr);
2151 return 0;
2152 }
2153 if (SMALL_PREVIOUS_MSIZE(follower) != msize) {
2154 malloc_printf("*** invariant broken for small free %p followed by %p in region [%p-%p] "
2155 "(end marker incorrect) should be %d; in fact %d\n",
2156 ptr, follower, SMALL_REGION_ADDRESS(*region), region_end, msize, SMALL_PREVIOUS_MSIZE(follower));
2157 return 0;
2158 }
2159 ptr = (unsigned char *)follower;
2160 prev_free = SMALL_IS_FREE;
2161 }
2162 }
2163 return 1;
2164 }
2165
2166 static kern_return_t
2167 small_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t small_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder)
2168 {
2169 small_region_t *regions;
2170 unsigned index = 0;
2171 vm_range_t buffer[MAX_RECORDER_BUFFER];
2172 unsigned count = 0;
2173 kern_return_t err;
2174 small_region_t region;
2175 vm_range_t range;
2176 vm_range_t admin_range;
2177 vm_range_t ptr_range;
2178 unsigned char *mapped_region;
2179 msize_t *block_header;
2180 unsigned block_index;
2181 unsigned block_limit;
2182 msize_t msize_and_free;
2183 msize_t msize;
2184
2185 err = reader(task, region_address, sizeof(small_region_t) * num_regions, (void **)&regions);
2186 if (err) return err;
2187 while (index < num_regions) {
2188 region = regions[index];
2189 range.address = (vm_address_t)SMALL_REGION_ADDRESS(region);
2190 range.size = SMALL_REGION_SIZE;
2191 if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
2192 admin_range.address = range.address + (1 << SMALL_BLOCKS_ALIGN);
2193 admin_range.size = range.size - (1 << SMALL_BLOCKS_ALIGN);
2194 recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1);
2195 }
2196 if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) {
2197 ptr_range.address = range.address;
2198 ptr_range.size = 1 << SMALL_BLOCKS_ALIGN;
2199 recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1);
2200 }
2201 if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
2202 err = reader(task, range.address, range.size, (void **)&mapped_region);
2203 if (err) return err;
2204 block_header = (msize_t *)(mapped_region + (1 << SMALL_BLOCKS_ALIGN));
2205 block_index = 0;
2206 block_limit = NUM_SMALL_BLOCKS;
2207 if (index == num_regions - 1)
2208 block_limit -= SMALL_MSIZE_FOR_BYTES(small_bytes_free_at_end);
2209 while (block_index < block_limit) {
2210 msize_and_free = block_header[block_index];
2211 msize = msize_and_free & ~ SMALL_IS_FREE;
2212 if (! (msize_and_free & SMALL_IS_FREE)) {
2213 // Block in use
2214 buffer[count].address = range.address + SMALL_BYTES_FOR_MSIZE(block_index);
2215 buffer[count].size = SMALL_BYTES_FOR_MSIZE(msize);
2216 count++;
2217 if (count >= MAX_RECORDER_BUFFER) {
2218 recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
2219 count = 0;
2220 }
2221 }
2222 block_index += msize;
2223 }
2224 }
2225 index++;
2226 }
2227 if (count) {
2228 recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
2229 }
2230 return 0;
2231 }
2232
2233 static INLINE void *
2234 small_malloc_from_free_list(szone_t *szone, msize_t msize)
2235 {
2236 grain_t grain = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1;
2237 unsigned bitmap = szone->small_bitmap & ~ ((1 << grain) - 1);
2238 void *ptr;
2239 msize_t this_msize;
2240 free_list_t **free_list;
2241 free_list_t **limit;
2242 free_list_t *next;
2243 msize_t leftover_msize;
2244 void *leftover_ptr;
2245 msize_t *meta_headers;
2246 unsigned leftover_index;
2247
2248 // Assumes locked
2249 CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
2250
2251 if (!bitmap) goto try_small_from_end;
2252 grain = BITMAP32_FFS(bitmap) - 1;
2253 // first try the small grains
2254 limit = szone->small_free_list + NUM_SMALL_SLOTS - 1;
2255 free_list = szone->small_free_list + grain;
2256 while (free_list < limit) {
2257 // try bigger grains
2258 ptr = *free_list;
2259 if (ptr) {
2260 next = ((free_list_t *)ptr)->next;
2261 if (next) {
2262 next->previous = NULL;
2263 free_list_set_checksum(szone, next);
2264 }
2265 *free_list = next;
2266 this_msize = SMALL_PTR_SIZE(ptr);
2267 goto add_leftover_and_proceed;
2268 }
2269 free_list++;
2270 }
2271 // We now check the large grains for one that is big enough
2272 ptr = *free_list;
2273 while (ptr) {
2274 this_msize = SMALL_PTR_SIZE(ptr);
2275 if (this_msize >= msize) {
2276 small_free_list_remove_ptr(szone, ptr, this_msize);
2277 goto add_leftover_and_proceed;
2278 }
2279 ptr = ((free_list_t *)ptr)->next;
2280 }
2281 try_small_from_end:
2282 // Let's see if we can use szone->small_bytes_free_at_end
2283 if (szone->small_bytes_free_at_end >= SMALL_BYTES_FOR_MSIZE(msize)) {
2284 ptr = (void *)(SMALL_REGION_END(szone->small_regions[szone->num_small_regions-1]) - szone->small_bytes_free_at_end);
2285 szone->small_bytes_free_at_end -= SMALL_BYTES_FOR_MSIZE(msize);
2286 if (szone->small_bytes_free_at_end) {
2287 // let's mark this block as in use to serve as boundary
2288 *SMALL_METADATA_FOR_PTR(ptr + SMALL_BYTES_FOR_MSIZE(msize)) = SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end);
2289 }
2290 this_msize = msize;
2291 goto return_small_alloc;
2292 }
2293 return NULL;
2294 add_leftover_and_proceed:
2295 if (this_msize > msize) {
2296 leftover_msize = this_msize - msize;
2297 leftover_ptr = ptr + SMALL_BYTES_FOR_MSIZE(msize);
2298 #if DEBUG_MALLOC
2299 if (LOG(szone,ptr)) {
2300 malloc_printf("in small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize);
2301 }
2302 #endif
2303 small_free_list_add_ptr(szone, leftover_ptr, leftover_msize);
2304 meta_headers = SMALL_META_HEADER_FOR_PTR(leftover_ptr);
2305 leftover_index = SMALL_META_INDEX_FOR_PTR(leftover_ptr);
2306 small_meta_header_set_is_free(meta_headers, leftover_index, leftover_msize);
2307 this_msize = msize;
2308 }
2309 return_small_alloc:
2310 szone->num_small_objects++;
2311 szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(this_msize);
2312 #if DEBUG_MALLOC
2313 if (LOG(szone,ptr)) {
2314 malloc_printf("in small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize);
2315 }
2316 #endif
2317 *SMALL_METADATA_FOR_PTR(ptr) = this_msize;
2318 return ptr;
2319 }
2320
2321 static INLINE void *
2322 small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested)
2323 {
2324 boolean_t locked = 0;
2325 #if SMALL_CACHE
2326 void *ptr;
2327 #endif
2328
2329 #if SMALL_CACHE
2330 ptr = (void *)szone->last_small_free;
2331 if ((((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) == msize) {
2332 // we have a candidate - let's lock to make sure
2333 LOCK_AND_NOTE_LOCKED(szone, locked);
2334 if (ptr == (void *)szone->last_small_free) {
2335 szone->last_small_free = NULL;
2336 SZONE_UNLOCK(szone);
2337 CHECK(szone, __PRETTY_FUNCTION__);
2338 ptr = (void *)((uintptr_t)ptr & ~ (SMALL_QUANTUM - 1));
2339 if (cleared_requested) {
2340 memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize));
2341 }
2342 return ptr;
2343 }
2344 }
2345 #endif
2346 // Except in rare occasions where we need to add a new region, we are going to end up locking,
2347 // so we might as well lock right away to avoid doing unnecessary optimistic probes
2348 if (!locked) LOCK_AND_NOTE_LOCKED(szone, locked);
2349 ptr = small_malloc_from_free_list(szone, msize);
2350 if (ptr) {
2351 SZONE_UNLOCK(szone);
2352 CHECK(szone, __PRETTY_FUNCTION__);
2353 if (cleared_requested) {
2354 memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize));
2355 }
2356 return ptr;
2357 }
2358 ptr = small_malloc_from_region_no_lock(szone, msize);
2359 // we don't clear because this freshly allocated space is pristine
2360 SZONE_UNLOCK(szone);
2361 CHECK(szone, __PRETTY_FUNCTION__);
2362 return ptr;
2363 }
2364
2365 // tries to allocate a small, cleared block
2366 static INLINE void *
2367 small_malloc_cleared_no_lock(szone_t *szone, msize_t msize)
2368 {
2369 void *ptr;
2370
2371 // Assumes already locked
2372 CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
2373 ptr = small_malloc_from_free_list(szone, msize);
2374 if (ptr) {
2375 memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize));
2376 return ptr;
2377 } else {
2378 ptr = small_malloc_from_region_no_lock(szone, msize);
2379 // we don't clear because this freshly allocated space is pristine
2380 }
2381 return ptr;
2382 }
2383
2384 static INLINE void
2385 free_small(szone_t *szone, void *ptr, small_region_t *small_region)
2386 {
2387 msize_t msize_and_free;
2388 #if SMALL_CACHE
2389 void *ptr2;
2390 #endif
2391
2392 // ptr is known to be in small_region
2393 msize_and_free = *SMALL_METADATA_FOR_PTR(ptr);
2394 if (msize_and_free & SMALL_IS_FREE) {
2395 szone_error(szone, "Object already freed being freed", ptr);
2396 return;
2397 }
2398 CHECK(szone, __PRETTY_FUNCTION__);
2399 SZONE_LOCK(szone);
2400 #if SMALL_CACHE
2401 ptr2 = szone->last_small_free;
2402 szone->last_small_free = (void *)(((uintptr_t)ptr) | msize_and_free);
2403 if (!ptr2) {
2404 SZONE_UNLOCK(szone);
2405 CHECK(szone, __PRETTY_FUNCTION__);
2406 return;
2407 }
2408 msize_and_free = (uintptr_t)ptr2 & (SMALL_QUANTUM - 1);
2409 ptr = (void *)(((uintptr_t)ptr2) & ~ (SMALL_QUANTUM - 1));
2410 small_region = small_region_for_ptr_no_lock(szone, ptr);
2411 if (!small_region) {
2412 szone_error(szone, "double free (small cache)", ptr);
2413 return;
2414 }
2415 #endif
2416 small_free_no_lock(szone, small_region, ptr, msize_and_free);
2417 SZONE_UNLOCK(szone);
2418 CHECK(szone, __PRETTY_FUNCTION__);
2419 }
2420
2421 static void
2422 print_small_free_list(szone_t *szone)
2423 {
2424 grain_t grain = 0;
2425 free_list_t *ptr;
2426
2427 malloc_printf("small free sizes: ");
2428 while (grain < NUM_SMALL_SLOTS) {
2429 ptr = szone->small_free_list[grain];
2430 if (ptr) {
2431 malloc_printf("%s%y[%d]; ", (grain == NUM_SMALL_SLOTS-1) ? ">=" : "", (grain + 1) * SMALL_QUANTUM, free_list_count(ptr));
2432 }
2433 grain++;
2434 }
2435 malloc_printf("\n");
2436 }
2437
2438 static void
2439 print_small_region(szone_t *szone, boolean_t verbose, small_region_t *region, size_t bytes_at_end)
2440 {
2441 unsigned counts[1024];
2442 unsigned in_use = 0;
2443 void *start = SMALL_REGION_ADDRESS(*region);
2444 void *limit = SMALL_REGION_END(*region) - bytes_at_end;
2445 msize_t msize_and_free;
2446 msize_t msize;
2447 unsigned ci;
2448
2449 memset(counts, 0, 1024 * sizeof(unsigned));
2450 while (start < limit) {
2451 msize_and_free = *SMALL_METADATA_FOR_PTR(start);
2452 msize = msize_and_free & ~ SMALL_IS_FREE;
2453 if (!(msize_and_free & SMALL_IS_FREE)) {
2454 // block in use
2455 if (msize < 1024)
2456 counts[msize]++;
2457 in_use++;
2458 }
2459 start += SMALL_BYTES_FOR_MSIZE(msize);
2460 }
2461 malloc_printf("Small region [%p-%p, %y]\tIn_use=%d ",
2462 SMALL_REGION_ADDRESS(*region), SMALL_REGION_END(*region), (int)SMALL_REGION_SIZE, in_use);
2463 if (bytes_at_end)
2464 malloc_printf("Untouched=%y ", bytes_at_end);
2465 if (verbose && in_use) {
2466 malloc_printf("\n\tSizes in use: ");
2467 for (ci = 0; ci < 1024; ci++)
2468 if (counts[ci])
2469 malloc_printf("%d[%d] ", SMALL_BYTES_FOR_MSIZE(ci), counts[ci]);
2470 }
2471 malloc_printf("\n");
2472 }
2473
2474 static boolean_t
2475 small_free_list_check(szone_t *szone, grain_t grain)
2476 {
2477 unsigned count = 0;
2478 free_list_t *ptr = szone->small_free_list[grain];
2479 free_list_t *previous = NULL;
2480 msize_t msize_and_free;
2481
2482 CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
2483 while (ptr) {
2484 msize_and_free = *SMALL_METADATA_FOR_PTR(ptr);
2485 count++;
2486 if (!(msize_and_free & SMALL_IS_FREE)) {
2487 malloc_printf("*** in-use ptr in free list grain=%d count=%d ptr=%p\n", grain, count, ptr);
2488 return 0;
2489 }
2490 if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) {
2491 malloc_printf("*** unaligned ptr in free list grain=%d count=%d ptr=%p\n", grain, count, ptr);
2492 return 0;
2493 }
2494 if (!small_region_for_ptr_no_lock(szone, ptr)) {
2495 malloc_printf("*** ptr not in szone grain=%d count=%d ptr=%p\n", grain, count, ptr);
2496 return 0;
2497 }
2498 free_list_checksum(szone, ptr, __PRETTY_FUNCTION__);
2499 if (ptr->previous != previous) {
2500 malloc_printf("*** previous incorrectly set grain=%d count=%d ptr=%p\n", grain, count, ptr);
2501 return 0;
2502 }
2503 previous = ptr;
2504 ptr = ptr->next;
2505 }
2506 return 1;
2507 }
2508
2509 /********************* LARGE ENTRY UTILITIES ************************/
2510
2511 #if DEBUG_MALLOC
2512
2513 static void
2514 large_debug_print(szone_t *szone)
2515 {
2516 unsigned num_large_entries = szone->num_large_entries;
2517 unsigned index = num_large_entries;
2518 large_entry_t *range;
2519
2520 for (index = 0, range = szone->large_entries; index < szone->num_large_entries; index++, range++)
2521 if (!LARGE_ENTRY_IS_EMPTY(*range))
2522 malloc_printf("%d: %p(%y); ", index, LARGE_ENTRY_ADDRESS(*range), LARGE_ENTRY_SIZE(*range));
2523
2524 malloc_printf("\n");
2525 }
2526 #endif
2527
2528 /*
2529 * Scan the hash ring looking for an entry for the given pointer.
2530 */
2531 static large_entry_t *
2532 large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr)
2533 {
2534 // result only valid with lock held
2535 unsigned num_large_entries = szone->num_large_entries;
2536 unsigned hash_index;
2537 unsigned index;
2538 large_entry_t *range;
2539
2540 if (!num_large_entries)
2541 return NULL;
2542 hash_index = ((uintptr_t)ptr >> vm_page_shift) % num_large_entries;
2543 index = hash_index;
2544 do {
2545 range = szone->large_entries + index;
2546 if (LARGE_ENTRY_MATCHES(*range, ptr))
2547 return range;
2548 if (LARGE_ENTRY_IS_EMPTY(*range))
2549 return NULL; // end of chain
2550 index++;
2551 if (index == num_large_entries)
2552 index = 0;
2553 } while (index != hash_index);
2554 return NULL;
2555 }
2556
2557 static void
2558 large_entry_insert_no_lock(szone_t *szone, large_entry_t range)
2559 {
2560 unsigned num_large_entries = szone->num_large_entries;
2561 unsigned hash_index = (range.address_and_num_pages >> vm_page_shift) % num_large_entries;
2562 unsigned index = hash_index;
2563 large_entry_t *entry;
2564
2565 do {
2566 entry = szone->large_entries + index;
2567 if (LARGE_ENTRY_IS_EMPTY(*entry)) {
2568 *entry = range;
2569 return; // end of chain
2570 }
2571 index++;
2572 if (index == num_large_entries)
2573 index = 0;
2574 } while (index != hash_index);
2575 }
2576
2577 static INLINE void
2578 large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry)
2579 {
2580 unsigned num_large_entries = szone->num_large_entries;
2581 unsigned hash_index = entry - szone->large_entries;
2582 unsigned index = hash_index;
2583 large_entry_t range;
2584
2585 do {
2586 index++;
2587 if (index == num_large_entries)
2588 index = 0;
2589 range = szone->large_entries[index];
2590 if (LARGE_ENTRY_IS_EMPTY(range))
2591 return;
2592 szone->large_entries[index].address_and_num_pages = 0;
2593 large_entry_insert_no_lock(szone, range); // this will reinsert in the
2594 // proper place
2595 } while (index != hash_index);
2596 }
2597
2598 static INLINE large_entry_t *
2599 large_entries_alloc_no_lock(szone_t *szone, unsigned num)
2600 {
2601 size_t size = num * sizeof(large_entry_t);
2602 boolean_t is_vm_allocation = size >= LARGE_THRESHOLD;
2603
2604 if (is_vm_allocation) {
2605 // Note that we allocate memory (via a system call) under a spin lock
2606 // That is certainly evil, however it's very rare in the lifetime of a process
2607 // The alternative would slow down the normal case
2608 return (void *)allocate_pages(szone, round_page(size), 0, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE));
2609 } else {
2610 return small_malloc_cleared_no_lock(szone, SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1));
2611 }
2612 }
2613
2614 static void
2615 large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate)
2616 {
2617 // returns range to deallocate
2618 size_t size = num * sizeof(large_entry_t);
2619 boolean_t is_vm_allocation = size >= LARGE_THRESHOLD;
2620 small_region_t *region;
2621 msize_t msize_and_free;
2622
2623 if (is_vm_allocation) {
2624 range_to_deallocate->address = (vm_address_t)entries;
2625 range_to_deallocate->size = round_page(size);
2626 } else {
2627 range_to_deallocate->size = 0;
2628 region = small_region_for_ptr_no_lock(szone, entries);
2629 msize_and_free = *SMALL_METADATA_FOR_PTR(entries);
2630 if (msize_and_free & SMALL_IS_FREE) {
2631 szone_error(szone, "object already freed being freed", entries);
2632 return;
2633 }
2634 small_free_no_lock(szone, region, entries, msize_and_free);
2635 }
2636 }
2637
2638 static void
2639 large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate)
2640 {
2641 // sets range_to_deallocate
2642 unsigned old_num_entries = szone->num_large_entries;
2643 large_entry_t *old_entries = szone->large_entries;
2644 unsigned new_num_entries = (old_num_entries) ? old_num_entries * 2 + 1 : 63; // always an odd number for good hashing
2645 large_entry_t *new_entries = large_entries_alloc_no_lock(szone, new_num_entries);
2646 unsigned index = old_num_entries;
2647 large_entry_t oldRange;
2648
2649 szone->num_large_entries = new_num_entries;
2650 szone->large_entries = new_entries;
2651
2652 /* rehash entries into the new list */
2653 while (index--) {
2654 oldRange = old_entries[index];
2655 if (!LARGE_ENTRY_IS_EMPTY(oldRange)) {
2656 large_entry_insert_no_lock(szone, oldRange);
2657 }
2658 }
2659 if (old_entries) {
2660 large_entries_free_no_lock(szone, old_entries, old_num_entries, range_to_deallocate);
2661 } else {
2662 range_to_deallocate->size = 0;
2663 }
2664 }
2665
2666 // frees the specific entry in the size table
2667 // returns a range to truly deallocate
2668 static vm_range_t
2669 large_free_no_lock(szone_t *szone, large_entry_t *entry)
2670 {
2671 vm_range_t range;
2672
2673 range.address = (vm_address_t)LARGE_ENTRY_ADDRESS(*entry);
2674 range.size = (vm_size_t)LARGE_ENTRY_SIZE(*entry);
2675 szone->num_large_objects_in_use--;
2676 szone->num_bytes_in_large_objects -= range.size;
2677 if (szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) {
2678 protect(szone, (void *)range.address, range.size, VM_PROT_READ | VM_PROT_WRITE, szone->debug_flags);
2679 range.address -= vm_page_size;
2680 range.size += 2 * vm_page_size;
2681 }
2682 entry->address_and_num_pages = 0;
2683 large_entries_rehash_after_entry_no_lock(szone, entry);
2684 #if DEBUG_MALLOC
2685 if (large_entry_for_pointer_no_lock(szone, (void *)range.address)) {
2686 malloc_printf("*** freed entry %p still in use; num_large_entries=%d\n",
2687 range.address, szone->num_large_entries);
2688 large_debug_print(szone);
2689 szone_sleep();
2690 }
2691 #endif
2692 return range;
2693 }
2694
2695 static kern_return_t
2696 large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder)
2697 {
2698 unsigned index = 0;
2699 vm_range_t buffer[MAX_RECORDER_BUFFER];
2700 unsigned count = 0;
2701 large_entry_t *entries;
2702 kern_return_t err;
2703 vm_range_t range;
2704 large_entry_t entry;
2705
2706 err = reader(task, large_entries_address, sizeof(large_entry_t) * num_entries, (void **)&entries);
2707 if (err)
2708 return err;
2709 index = num_entries;
2710 if ((type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) &&
2711 (num_entries * sizeof(large_entry_t) >= LARGE_THRESHOLD)) {
2712 range.address = large_entries_address;
2713 range.size = round_page(num_entries * sizeof(large_entry_t));
2714 recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &range, 1);
2715 }
2716 if (type_mask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE))
2717 while (index--) {
2718 entry = entries[index];
2719 if (!LARGE_ENTRY_IS_EMPTY(entry)) {
2720 range.address = (vm_address_t)LARGE_ENTRY_ADDRESS(entry);
2721 range.size = (vm_size_t)LARGE_ENTRY_SIZE(entry);
2722 buffer[count++] = range;
2723 if (count >= MAX_RECORDER_BUFFER) {
2724 recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, buffer, count);
2725 count = 0;
2726 }
2727 }
2728 }
2729 if (count) {
2730 recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE
2731 | MALLOC_PTR_REGION_RANGE_TYPE, buffer, count);
2732 }
2733 return 0;
2734 }
2735
2736 /********************* HUGE ENTRY UTILITIES ************************/
2737
2738 static huge_entry_t *
2739 huge_entry_for_pointer_no_lock(szone_t *szone, const void *ptr)
2740 {
2741 unsigned index;
2742 huge_entry_t *huge;
2743
2744 for (index = szone->num_huge_entries, huge = szone->huge_entries;
2745 index > 0;
2746 index--, huge++) {
2747
2748 if ((void *)huge->address == ptr)
2749 return huge;
2750 }
2751 return NULL;
2752 }
2753
2754 static boolean_t
2755 huge_entry_append(szone_t *szone, huge_entry_t huge)
2756 {
2757 huge_entry_t *new_huge_entries = NULL, *old_huge_entries;
2758 unsigned num_huge_entries;
2759
2760 // We do a little dance with locking because doing allocation (even in the
2761 // default szone) may cause something to get freed in this szone, with a
2762 // deadlock
2763 // Returns 1 on success
2764 SZONE_LOCK(szone);
2765 for (;;) {
2766 num_huge_entries = szone->num_huge_entries;
2767 SZONE_UNLOCK(szone);
2768 /* check for counter wrap */
2769 if ((num_huge_entries + 1) < num_huge_entries)
2770 return 0;
2771 /* stale allocation from last time around the loop? */
2772 if (new_huge_entries)
2773 szone_free(szone, new_huge_entries);
2774 new_huge_entries = szone_malloc(szone, (num_huge_entries + 1) * sizeof(huge_entry_t));
2775 if (new_huge_entries == NULL)
2776 return 0;
2777 SZONE_LOCK(szone);
2778 if (num_huge_entries == szone->num_huge_entries) {
2779 // No change - our malloc still applies
2780 old_huge_entries = szone->huge_entries;
2781 if (num_huge_entries) {
2782 memcpy(new_huge_entries, old_huge_entries, num_huge_entries * sizeof(huge_entry_t));
2783 }
2784 new_huge_entries[szone->num_huge_entries++] = huge;
2785 szone->huge_entries = new_huge_entries;
2786 SZONE_UNLOCK(szone);
2787 szone_free(szone, old_huge_entries);
2788 return 1;
2789 }
2790 // try again!
2791 }
2792 }
2793
2794 static kern_return_t
2795 huge_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t huge_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder)
2796 {
2797 huge_entry_t *entries;
2798 kern_return_t err;
2799
2800 err = reader(task, huge_entries_address, sizeof(huge_entry_t) * num_entries, (void **)&entries);
2801 if (err)
2802 return err;
2803 if (num_entries)
2804 recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, entries, num_entries);
2805
2806 return 0;
2807 }
2808
2809 static void *
2810 large_and_huge_malloc(szone_t *szone, unsigned num_pages)
2811 {
2812 void *addr;
2813 vm_range_t range_to_deallocate;
2814 huge_entry_t huge_entry;
2815 size_t size;
2816 large_entry_t large_entry;
2817
2818 if (!num_pages)
2819 num_pages = 1; // minimal allocation size for this szone
2820 size = (size_t)num_pages << vm_page_shift;
2821 range_to_deallocate.size = 0;
2822 if (num_pages >= (1 << vm_page_shift)) {
2823 addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MAKE_TAG(VM_MEMORY_MALLOC_HUGE));
2824 if (addr == NULL)
2825 return NULL;
2826 huge_entry.size = size;
2827 huge_entry.address = (vm_address_t)addr;
2828 if (!huge_entry_append(szone, huge_entry))
2829 return NULL; // we are leaking the allocation here
2830 SZONE_LOCK(szone);
2831 szone->num_bytes_in_huge_objects += size;
2832 } else {
2833
2834 addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE));
2835 #if DEBUG_MALLOC
2836 if (LOG(szone, addr))
2837 malloc_printf("in szone_malloc true large allocation at %p for %y\n", (void *)addr, size);
2838 #endif
2839 SZONE_LOCK(szone);
2840 if (addr == NULL) {
2841 SZONE_UNLOCK(szone);
2842 return NULL;
2843 }
2844 #if DEBUG_MALLOC
2845 if (large_entry_for_pointer_no_lock(szone, addr)) {
2846 malloc_printf("freshly allocated is already in use: %p\n", addr);
2847 large_debug_print(szone);
2848 szone_sleep();
2849 }
2850 #endif
2851 if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) {
2852 // density of hash table too high; grow table
2853 // we do that under lock to avoid a race
2854 large_entries_grow_no_lock(szone, &range_to_deallocate);
2855 }
2856 large_entry.address_and_num_pages = (uintptr_t)addr | num_pages;
2857 #if DEBUG_MALLOC
2858 if (large_entry_for_pointer_no_lock(szone, addr)) {
2859 malloc_printf("entry about to be added already in use: %p\n", addr);
2860 large_debug_print(szone);
2861 szone_sleep();
2862 }
2863 #endif
2864 large_entry_insert_no_lock(szone, large_entry);
2865 #if DEBUG_MALLOC
2866 if (!large_entry_for_pointer_no_lock(szone, (void *)addr)) {
2867 malloc_printf("can't find entry just added\n");
2868 large_debug_print(szone);
2869 szone_sleep();
2870 }
2871 #endif
2872 szone->num_large_objects_in_use ++;
2873 szone->num_bytes_in_large_objects += size;
2874 }
2875 SZONE_UNLOCK(szone);
2876 if (range_to_deallocate.size) {
2877 deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0); // we deallocate outside the lock
2878 }
2879 return (void *)addr;
2880 }
2881
2882 static INLINE void
2883 free_large_or_huge(szone_t *szone, void *ptr)
2884 {
2885 // We have established ptr is page-aligned and not tiny nor small
2886 large_entry_t *entry;
2887 vm_range_t vm_range_to_deallocate;
2888 huge_entry_t *huge;
2889
2890 SZONE_LOCK(szone);
2891 entry = large_entry_for_pointer_no_lock(szone, ptr);
2892 if (entry) {
2893 vm_range_to_deallocate = large_free_no_lock(szone, entry);
2894 #if DEBUG_MALLOC
2895 if (large_entry_for_pointer_no_lock(szone, ptr)) {
2896 malloc_printf("*** just after freeing %p still in use num_large_entries=%d\n", ptr, szone->num_large_entries);
2897 large_debug_print(szone);
2898 szone_sleep();
2899 }
2900 #endif
2901 } else if ((huge = huge_entry_for_pointer_no_lock(szone, ptr))) {
2902 vm_range_to_deallocate = *huge;
2903 *huge = szone->huge_entries[--szone->num_huge_entries]; // last entry fills that spot
2904 szone->num_bytes_in_huge_objects -= (size_t)vm_range_to_deallocate.size;
2905 } else {
2906 #if DEBUG_MALLOC
2907 large_debug_print(szone);
2908 #endif
2909 szone_error(szone, "pointer being freed was not allocated", ptr);
2910 return;
2911 }
2912 SZONE_UNLOCK(szone); // we release the lock asap
2913 CHECK(szone, __PRETTY_FUNCTION__);
2914 // we deallocate_pages, including guard pages
2915 if (vm_range_to_deallocate.address) {
2916 #if DEBUG_MALLOC
2917 if (large_entry_for_pointer_no_lock(szone, (void *)vm_range_to_deallocate.address)) {
2918 malloc_printf("*** invariant broken: %p still in use num_large_entries=%d\n", vm_range_to_deallocate.address, szone->num_large_entries);
2919 large_debug_print(szone);
2920 szone_sleep();
2921 }
2922 #endif
2923 deallocate_pages(szone, (void *)vm_range_to_deallocate.address, (size_t)vm_range_to_deallocate.size, 0);
2924 }
2925 }
2926
2927 static INLINE int
2928 try_realloc_large_or_huge_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size)
2929 {
2930 vm_address_t addr = (vm_address_t)ptr + old_size;
2931 large_entry_t *large_entry, saved_entry;
2932 huge_entry_t *huge_entry, huge;
2933 kern_return_t err;
2934
2935 #if DEBUG_MALLOC
2936 if (old_size != ((old_size >> vm_page_shift) << vm_page_shift)) {
2937 malloc_printf("*** old_size is %d\n", old_size);
2938 }
2939 #endif
2940 SZONE_LOCK(szone);
2941 large_entry = large_entry_for_pointer_no_lock(szone, (void *)addr);
2942 SZONE_UNLOCK(szone);
2943 if (large_entry) {
2944 return 0; // large pointer already exists in table - extension is not going to work
2945 }
2946 new_size = round_page(new_size);
2947 /*
2948 * Ask for allocation at a specific address, and mark as realloc
2949 * to request coalescing with previous realloc'ed extensions.
2950 */
2951 err = vm_allocate(mach_task_self(), &addr, new_size - old_size, VM_MAKE_TAG(VM_MEMORY_REALLOC));
2952 if (err != KERN_SUCCESS) {
2953 return 0;
2954 }
2955 SZONE_LOCK(szone);
2956 /*
2957 * If the new size is still under the large/huge threshold, we can just
2958 * extend the existing large block.
2959 *
2960 * Note: this logic is predicated on the understanding that an allocated
2961 * block can never really shrink, so that the new size will always be
2962 * larger than the old size.
2963 *
2964 * Note: the use of 1 << vm_page_shift here has to do with the subdivision
2965 * of the bits in the large_entry_t, and not the size of a page (directly).
2966 */
2967 if ((new_size >> vm_page_shift) < (1 << vm_page_shift)) {
2968 /* extend existing large entry */
2969 large_entry = large_entry_for_pointer_no_lock(szone, ptr);
2970 if (!large_entry) {
2971 szone_error(szone, "large entry reallocated is not properly in table", ptr);
2972 /* XXX will cause fault on next reference to entry */
2973 }
2974 large_entry->address_and_num_pages = (uintptr_t)ptr | (new_size >> vm_page_shift);
2975 szone->num_bytes_in_large_objects += new_size - old_size;
2976 } else if ((old_size >> vm_page_shift) >= (1 << vm_page_shift)) {
2977 /* extend existing huge entry */
2978 huge_entry = huge_entry_for_pointer_no_lock(szone, ptr);
2979 if (!huge_entry) {
2980 szone_error(szone, "huge entry reallocated is not properly in table", ptr);
2981 /* XXX will cause fault on next reference to huge_entry */
2982 }
2983 huge_entry->size = new_size;
2984 szone->num_bytes_in_huge_objects += new_size - old_size;
2985 } else {
2986 /* need to convert large entry to huge entry */
2987
2988 /* release large entry, note we still have the VM allocation */
2989 large_entry = large_entry_for_pointer_no_lock(szone, ptr);
2990 saved_entry = *large_entry; // in case we need to put it back
2991 large_free_no_lock(szone, large_entry);
2992 szone->num_bytes_in_large_objects -= old_size;
2993
2994 /* and get a huge entry */
2995 huge.address = (vm_address_t)ptr;
2996 huge.size = new_size; /* fix up size */
2997 SZONE_UNLOCK(szone);
2998 if (huge_entry_append(szone, huge)) {
2999 szone->num_bytes_in_huge_objects += new_size;
3000 return 1; // success!
3001 }
3002 SZONE_LOCK(szone);
3003 // we leak memory (the extra space appended) but data structures are correct
3004 large_entry_insert_no_lock(szone, saved_entry); // this will reinsert the large entry
3005 }
3006 SZONE_UNLOCK(szone); // we release the lock asap
3007 return 1;
3008 }
3009
3010 /********************* Zone call backs ************************/
3011
3012 static void
3013 szone_free(szone_t *szone, void *ptr)
3014 {
3015 tiny_region_t *tiny_region;
3016 small_region_t *small_region;
3017
3018 #if DEBUG_MALLOC
3019 if (LOG(szone, ptr))
3020 malloc_printf("in szone_free with %p\n", ptr);
3021 #endif
3022 if (!ptr)
3023 return;
3024 /*
3025 * Try to free to a tiny region.
3026 */
3027 if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) {
3028 szone_error(szone, "Non-aligned pointer being freed", ptr);
3029 return;
3030 }
3031 if ((tiny_region = tiny_region_for_ptr_no_lock(szone, ptr)) != NULL) {
3032 free_tiny(szone, ptr, tiny_region);
3033 return;
3034 }
3035
3036 /*
3037 * Try to free to a small region.
3038 */
3039 if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) {
3040 szone_error(szone, "Non-aligned pointer being freed (2)", ptr);
3041 return;
3042 }
3043 if ((small_region = small_region_for_ptr_no_lock(szone, ptr)) != NULL) {
3044 free_small(szone, ptr, small_region);
3045 return;
3046 }
3047
3048 /* check that it's a legal large/huge allocation */
3049 if ((uintptr_t)ptr & (vm_page_size - 1)) {
3050 szone_error(szone, "non-page-aligned, non-allocated pointer being freed", ptr);
3051 return;
3052 }
3053 free_large_or_huge(szone, ptr);
3054 }
3055
3056 static INLINE void *
3057 szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested)
3058 {
3059 void *ptr;
3060 msize_t msize;
3061 unsigned num_pages;
3062
3063 if (size <= 31*TINY_QUANTUM) {
3064 // think tiny
3065 msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
3066 if (!msize)
3067 msize = 1;
3068 ptr = tiny_malloc_should_clear(szone, msize, cleared_requested);
3069 } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (size < LARGE_THRESHOLD)) {
3070 // think small
3071 msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
3072 if (! msize) msize = 1;
3073 ptr = small_malloc_should_clear(szone, msize, cleared_requested);
3074 } else {
3075 // large or huge
3076 num_pages = round_page(size) >> vm_page_shift;
3077 ptr = large_and_huge_malloc(szone, num_pages);
3078 }
3079 #if DEBUG_MALLOC
3080 if (LOG(szone, ptr))
3081 malloc_printf("szone_malloc returned %p\n", ptr);
3082 #endif
3083 /*
3084 * If requested, scribble on allocated memory.
3085 */
3086 if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && ptr && !cleared_requested && size)
3087 memset(ptr, 0xaa, size);
3088
3089 return ptr;
3090 }
3091
3092 static void *
3093 szone_malloc(szone_t *szone, size_t size) {
3094 return szone_malloc_should_clear(szone, size, 0);
3095 }
3096
3097 static void *
3098 szone_calloc(szone_t *szone, size_t num_items, size_t size)
3099 {
3100 return szone_malloc_should_clear(szone, num_items * size, 1);
3101 }
3102
3103 static void *
3104 szone_valloc(szone_t *szone, size_t size)
3105 {
3106 void *ptr;
3107 unsigned num_pages;
3108
3109 num_pages = round_page(size) >> vm_page_shift;
3110 ptr = large_and_huge_malloc(szone, num_pages);
3111 #if DEBUG_MALLOC
3112 if (LOG(szone, ptr))
3113 malloc_printf("szone_valloc returned %p\n", ptr);
3114 #endif
3115 return ptr;
3116 }
3117
3118 static size_t
3119 szone_size(szone_t *szone, const void *ptr)
3120 {
3121 size_t size = 0;
3122 boolean_t is_free;
3123 msize_t msize, msize_and_free;
3124 large_entry_t *entry;
3125 huge_entry_t *huge;
3126
3127 if (!ptr)
3128 return 0;
3129 #if DEBUG_MALLOC
3130 if (LOG(szone, ptr)) {
3131 malloc_printf("in szone_size for %p (szone=%p)\n", ptr, szone);
3132 }
3133 #endif
3134
3135 /*
3136 * Look for it in a tiny region.
3137 */
3138 if ((uintptr_t)ptr & (TINY_QUANTUM - 1))
3139 return 0;
3140 if (tiny_region_for_ptr_no_lock(szone, ptr)) {
3141 msize = get_tiny_meta_header(ptr, &is_free);
3142 return (is_free) ? 0 : TINY_BYTES_FOR_MSIZE(msize);
3143 }
3144
3145 /*
3146 * Look for it in a small region.
3147 */
3148 if ((uintptr_t)ptr & (SMALL_QUANTUM - 1))
3149 return 0;
3150 if (small_region_for_ptr_no_lock(szone, ptr)) {
3151 msize_and_free = *SMALL_METADATA_FOR_PTR(ptr);
3152 return (msize_and_free & SMALL_IS_FREE) ? 0 : SMALL_BYTES_FOR_MSIZE(msize_and_free);
3153 }
3154
3155 /*
3156 * If not page-aligned, it cannot have come from a large or huge allocation.
3157 */
3158 if ((uintptr_t)ptr & (vm_page_size - 1))
3159 return(0);
3160
3161 /*
3162 * Look for it in a large or huge entry.
3163 */
3164 SZONE_LOCK(szone);
3165 entry = large_entry_for_pointer_no_lock(szone, ptr);
3166 if (entry) {
3167 size = LARGE_ENTRY_SIZE(*entry);
3168 } else if ((huge = huge_entry_for_pointer_no_lock(szone, ptr))) {
3169 size = huge->size;
3170 }
3171 SZONE_UNLOCK(szone);
3172 #if DEBUG_MALLOC
3173 if (LOG(szone, ptr)) {
3174 malloc_printf("szone_size for %p returned %d\n", ptr, (unsigned)size);
3175 }
3176 #endif
3177 return size;
3178 }
3179
3180 static void *
3181 szone_realloc(szone_t *szone, void *ptr, size_t new_size)
3182 {
3183 size_t old_size;
3184 void *new_ptr;
3185
3186 #if DEBUG_MALLOC
3187 if (LOG(szone, ptr)) {
3188 malloc_printf("in szone_realloc for %p, %d\n", ptr, (unsigned)new_size);
3189 }
3190 #endif
3191 if (!ptr) {
3192 ptr = szone_malloc(szone, new_size);
3193 return ptr;
3194 }
3195 old_size = szone_size(szone, ptr);
3196 if (!old_size) {
3197 szone_error(szone, "pointer being reallocated was not allocated", ptr);
3198 return NULL;
3199 }
3200 /* we never shrink an allocation */
3201 if (old_size >= new_size)
3202 return ptr;
3203
3204 /*
3205 * If the old and new sizes both suit the tiny allocator, try to reallocate in-place.
3206 */
3207 if ((new_size + TINY_QUANTUM - 1) <= 31 * TINY_QUANTUM) {
3208 if (try_realloc_tiny_in_place(szone, ptr, old_size, new_size)) {
3209 return ptr;
3210 }
3211
3212 /*
3213 * If the old and new sizes both suit the small allocator, and we're not protecting the
3214 * small allocations, try to reallocate in-place.
3215 */
3216 } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) &&
3217 ((new_size + SMALL_QUANTUM - 1) < LARGE_THRESHOLD) &&
3218 (old_size > 31 * TINY_QUANTUM)) {
3219 if (try_realloc_small_in_place(szone, ptr, old_size, new_size)) {
3220 return ptr;
3221 }
3222
3223 /*
3224 * If the allocation's a large or huge allocation, try to reallocate in-place there.
3225 */
3226 } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (old_size > LARGE_THRESHOLD)) {
3227 if (try_realloc_large_or_huge_in_place(szone, ptr, old_size, new_size)) {
3228 return ptr;
3229 }
3230 }
3231
3232 /*
3233 * Can't reallocate in place for whatever reason; allocate a new buffer and copy.
3234 */
3235 new_ptr = szone_malloc(szone, new_size);
3236 if (new_ptr == NULL)
3237 return NULL;
3238
3239 /*
3240 * If the allocation's large enough, try to copy using VM. If that fails, or
3241 * if it's too small, just copy by hand.
3242 */
3243 if ((old_size < VM_COPY_THRESHOLD) ||
3244 vm_copy(mach_task_self(), (vm_address_t)ptr, old_size, (vm_address_t)new_ptr))
3245 memcpy(new_ptr, ptr, old_size);
3246 szone_free(szone, ptr);
3247
3248 #if DEBUG_MALLOC
3249 if (LOG(szone, ptr)) {
3250 malloc_printf("szone_realloc returned %p for %d\n", new_ptr, (unsigned)new_size);
3251 }
3252 #endif
3253 return new_ptr;
3254 }
3255
3256 // given a size, returns pointers capable of holding that size
3257 // returns the number of pointers allocated
3258 // may return 0 - this function will do best attempts, but just that
3259 static unsigned
3260 szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count)
3261 {
3262 msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
3263 size_t chunk_size = TINY_BYTES_FOR_MSIZE(msize);
3264 free_list_t **free_list = szone->tiny_free_list + msize - 1;
3265 free_list_t *ptr = *free_list;
3266 unsigned found = 0;
3267
3268 if (size > 31*TINY_QUANTUM)
3269 return 0; // only bother implementing this for tiny
3270 if (!msize)
3271 msize = 1;
3272 CHECK(szone, __PRETTY_FUNCTION__);
3273 SZONE_LOCK(szone); // might as well lock right here to avoid concurrency issues
3274 while (found < count) {
3275 if (!ptr)
3276 break;
3277 *results++ = ptr;
3278 found++;
3279 set_tiny_meta_header_in_use(ptr, msize);
3280 ptr = ptr->next;
3281 }
3282 if (ptr) {
3283 ptr->previous = NULL;
3284 free_list_set_checksum(szone, ptr);
3285 }
3286 *free_list = ptr;
3287
3288 // Note that we could allocate from the free lists for larger msize
3289 // But that may un-necessarily fragment - so we might as well let the client do that
3290 // We could also allocate from szone->tiny_bytes_free_at_end
3291 // But that means we'll "eat-up" the untouched area faster, increasing the working set
3292 // So we just return what we have and just that
3293 szone->num_tiny_objects += found;
3294 szone->num_bytes_in_tiny_objects += chunk_size * found;
3295 SZONE_UNLOCK(szone);
3296 return found;
3297 }
3298
3299 static void
3300 szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count)
3301 {
3302 unsigned cc = 0;
3303 void *ptr;
3304 tiny_region_t *tiny_region;
3305 boolean_t is_free;
3306 msize_t msize;
3307
3308 // frees all the pointers in to_be_freed
3309 // note that to_be_freed may be overwritten during the process
3310 if (!count)
3311 return;
3312 CHECK(szone, __PRETTY_FUNCTION__);
3313 SZONE_LOCK(szone);
3314 while (cc < count) {
3315 ptr = to_be_freed[cc];
3316 /* XXX this really slows us down */
3317 tiny_region = tiny_region_for_ptr_no_lock(szone, ptr);
3318 if (tiny_region) {
3319 // this is a tiny pointer
3320 msize = get_tiny_meta_header(ptr, &is_free);
3321 if (is_free)
3322 break; // a double free; let the standard free deal with it
3323 tiny_free_no_lock(szone, tiny_region, ptr, msize);
3324 to_be_freed[cc] = NULL;
3325 }
3326 cc++;
3327 }
3328 SZONE_UNLOCK(szone);
3329 CHECK(szone, __PRETTY_FUNCTION__);
3330 while (count--) {
3331 ptr = to_be_freed[count];
3332 if (ptr)
3333 szone_free(szone, ptr);
3334 }
3335 }
3336
3337 static void
3338 szone_destroy(szone_t *szone)
3339 {
3340 unsigned index;
3341 small_region_t pended_region = 0;
3342 large_entry_t *large;
3343 vm_range_t range_to_deallocate;
3344 huge_entry_t *huge;
3345 tiny_region_t tiny_region;
3346 small_region_t small_region;
3347
3348 /* destroy large entries */
3349 index = szone->num_large_entries;
3350 while (index--) {
3351 large = szone->large_entries + index;
3352 if (!LARGE_ENTRY_IS_EMPTY(*large)) {
3353 // we deallocate_pages, including guard pages
3354 deallocate_pages(szone, (void *)LARGE_ENTRY_ADDRESS(*large), LARGE_ENTRY_SIZE(*large), szone->debug_flags);
3355 }
3356 }
3357 if (szone->num_large_entries * sizeof(large_entry_t) >= LARGE_THRESHOLD) {
3358 // we do not free in the small chunk case
3359 large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate);
3360 if (range_to_deallocate.size)
3361 deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0);
3362 }
3363
3364 /* destroy huge entries */
3365 index = szone->num_huge_entries;
3366 while (index--) {
3367 huge = szone->huge_entries + index;
3368 deallocate_pages(szone, (void *)huge->address, huge->size, szone->debug_flags);
3369 }
3370
3371 /* destroy tiny regions */
3372 index = szone->num_tiny_regions;
3373 while (index--) {
3374 tiny_region = szone->tiny_regions[index];
3375 deallocate_pages(szone, TINY_REGION_ADDRESS(tiny_region), TINY_REGION_SIZE, 0);
3376 }
3377 /* destroy small regions; region 0 must go last as it contains the szone */
3378 index = szone->num_small_regions;
3379 while (index--) {
3380 small_region = szone->small_regions[index];
3381 /*
3382 * If we've allocated more than the basic set of small regions, avoid destroying the
3383 * region that contains the array.
3384 */
3385 if ((index > 0) &&
3386 (SMALL_REGION_FOR_PTR(szone->small_regions) == SMALL_REGION_ADDRESS(small_region))) {
3387 pended_region = small_region;
3388 } else {
3389 deallocate_pages(szone, (void *)SMALL_REGION_ADDRESS(small_region), SMALL_REGION_SIZE, 0);
3390 }
3391 }
3392 if (pended_region)
3393 deallocate_pages(NULL, (void *)SMALL_REGION_ADDRESS(pended_region), SMALL_REGION_SIZE, 0);
3394 }
3395
3396 static size_t
3397 szone_good_size(szone_t *szone, size_t size)
3398 {
3399 msize_t msize;
3400 unsigned num_pages;
3401
3402 if (size <= 31 * TINY_QUANTUM) {
3403 // think tiny
3404 msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
3405 if (! msize) msize = 1;
3406 return TINY_BYTES_FOR_MSIZE(msize << SHIFT_TINY_QUANTUM);
3407 }
3408 if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (size < LARGE_THRESHOLD)) {
3409 // think small
3410 msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
3411 if (! msize) msize = 1;
3412 return SMALL_BYTES_FOR_MSIZE(msize);
3413 } else {
3414 num_pages = round_page(size) >> vm_page_shift;
3415 if (!num_pages)
3416 num_pages = 1; // minimal allocation size for this
3417 return num_pages << vm_page_shift;
3418 }
3419 }
3420
3421 unsigned szone_check_counter = 0;
3422 unsigned szone_check_start = 0;
3423 unsigned szone_check_modulo = 1;
3424
3425 static boolean_t
3426 szone_check_all(szone_t *szone, const char *function)
3427 {
3428 int index;
3429 tiny_region_t *tiny;
3430 small_region_t *small;
3431
3432 SZONE_LOCK(szone);
3433 CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
3434
3435 /* check tiny regions - chould check region count */
3436 for (index = szone->num_tiny_regions - 1, tiny = szone->tiny_regions;
3437 index >= 0;
3438 index--, tiny++) {
3439 if (!tiny_check_region(szone, tiny)) {
3440 SZONE_UNLOCK(szone);
3441 szone->debug_flags &= ~ CHECK_REGIONS;
3442 malloc_printf("*** tiny region %d incorrect szone_check_all(%s) counter=%d\n",
3443 szone->num_tiny_regions - index, function, szone_check_counter);
3444 szone_error(szone, "check: tiny region incorrect", NULL);
3445 return 0;
3446 }
3447 }
3448
3449 for (index = NUM_TINY_SLOTS - 1; index >= 0; index--) {
3450 if (!tiny_free_list_check(szone, index)) {
3451 SZONE_UNLOCK(szone);
3452 szone->debug_flags &= ~ CHECK_REGIONS;
3453 malloc_printf("*** tiny free list incorrect (slot=%d) szone_check_all(%s) counter=%d\n",
3454 index, function, szone_check_counter);
3455 szone_error(szone, "check: tiny free list incorrect", NULL);
3456 return 0;
3457 }
3458 }
3459
3460 /* check small regions - could check region count */
3461 for (index = szone->num_small_regions - 1, small = szone->small_regions;
3462 index >= 0;
3463 index--, small++) {
3464 if (!szone_check_small_region(szone, small)) {
3465 SZONE_UNLOCK(szone);
3466 szone->debug_flags &= ~ CHECK_REGIONS;
3467 malloc_printf("*** small region %d incorrect szone_check_all(%s) counter=%d\n",
3468 szone->num_small_regions, index, function, szone_check_counter);
3469 szone_error(szone, "check: small region incorrect", NULL);
3470 return 0;
3471 }
3472 }
3473 for (index = NUM_SMALL_SLOTS - 1; index >= 0; index--) {
3474 if (!small_free_list_check(szone, index)) {
3475 SZONE_UNLOCK(szone);
3476 szone->debug_flags &= ~ CHECK_REGIONS;
3477 malloc_printf("*** small free list incorrect (grain=%d) szone_check_all(%s) counter=%d\n", index, function, szone_check_counter);
3478 szone_error(szone, "check: small free list incorrect", NULL);
3479 return 0;
3480 }
3481 }
3482 SZONE_UNLOCK(szone);
3483 // szone_print(szone, 1);
3484 return 1;
3485 }
3486
3487 static boolean_t
3488 szone_check(szone_t *szone)
3489 {
3490
3491 if ((++szone_check_counter % 10000) == 0)
3492 malloc_printf("at szone_check counter=%d\n", szone_check_counter);
3493 if (szone_check_counter < szone_check_start)
3494 return 1;
3495 if (szone_check_counter % szone_check_modulo)
3496 return 1;
3497 return szone_check_all(szone, "");
3498 }
3499
3500 static kern_return_t
3501 szone_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder)
3502 {
3503 szone_t *szone;
3504 kern_return_t err;
3505
3506 if (!reader) reader = _szone_default_reader;
3507 err = reader(task, zone_address, sizeof(szone_t), (void **)&szone);
3508 if (err) return err;
3509 err = tiny_in_use_enumerator(task, context, type_mask,
3510 (vm_address_t)szone->tiny_regions, szone->num_tiny_regions, szone->tiny_bytes_free_at_end , reader, recorder);
3511 if (err) return err;
3512 err = small_in_use_enumerator(task, context, type_mask,
3513 (vm_address_t)szone->small_regions, szone->num_small_regions, szone->small_bytes_free_at_end , reader, recorder);
3514 if (err) return err;
3515 err = large_in_use_enumerator(task, context, type_mask,
3516 (vm_address_t)szone->large_entries, szone->num_large_entries, reader,
3517 recorder);
3518 if (err) return err;
3519 err = huge_in_use_enumerator(task, context, type_mask,
3520 (vm_address_t)szone->huge_entries, szone->num_huge_entries, reader,
3521 recorder);
3522 return err;
3523 }
3524
3525 // Following method is deprecated: use scalable_zone_statistics instead
3526 void
3527 scalable_zone_info(malloc_zone_t *zone, unsigned *info_to_fill, unsigned count)
3528 {
3529 szone_t *szone = (void *)zone;
3530 unsigned info[13];
3531
3532 // We do not lock to facilitate debug
3533 info[4] = szone->num_tiny_objects;
3534 info[5] = szone->num_bytes_in_tiny_objects;
3535 info[6] = szone->num_small_objects;
3536 info[7] = szone->num_bytes_in_small_objects;
3537 info[8] = szone->num_large_objects_in_use;
3538 info[9] = szone->num_bytes_in_large_objects;
3539 info[10] = szone->num_huge_entries;
3540 info[11] = szone->num_bytes_in_huge_objects;
3541 info[12] = szone->debug_flags;
3542 info[0] = info[4] + info[6] + info[8] + info[10];
3543 info[1] = info[5] + info[7] + info[9] + info[11];
3544 info[3] = szone->num_tiny_regions * TINY_REGION_SIZE + szone->num_small_regions * SMALL_REGION_SIZE + info[9] + info[11];
3545 info[2] = info[3] - szone->tiny_bytes_free_at_end - szone->small_bytes_free_at_end;
3546 memcpy(info_to_fill, info, sizeof(unsigned)*count);
3547 }
3548
3549 static void
3550 szone_print(szone_t *szone, boolean_t verbose)
3551 {
3552 unsigned info[13];
3553 unsigned index = 0;
3554 tiny_region_t *region;
3555
3556 SZONE_LOCK(szone);
3557 scalable_zone_info((void *)szone, info, 13);
3558 malloc_printf("Scalable zone %p: inUse=%d(%y) touched=%y allocated=%y flags=%d\n",
3559 szone, info[0], info[1], info[2], info[3], info[12]);
3560 malloc_printf("\ttiny=%d(%y) small=%d(%y) large=%d(%y) huge=%d(%y)\n",
3561 info[4], info[5], info[6], info[7], info[8], info[9], info[10], info[11]);
3562 // tiny
3563 malloc_printf("%d tiny regions: \n", szone->num_tiny_regions);
3564 while (index < szone->num_tiny_regions) {
3565 region = szone->tiny_regions + index;
3566 print_tiny_region(verbose, *region, (index == szone->num_tiny_regions - 1) ? szone->tiny_bytes_free_at_end : 0);
3567 index++;
3568 }
3569 if (verbose) print_tiny_free_list(szone);
3570 // small
3571 malloc_printf("%d small regions: \n", szone->num_small_regions);
3572 index = 0;
3573 while (index < szone->num_small_regions) {
3574 region = szone->small_regions + index;
3575 print_small_region(szone, verbose, region, (index == szone->num_small_regions - 1) ? szone->small_bytes_free_at_end : 0);
3576 index++;
3577 }
3578 if (verbose)
3579 print_small_free_list(szone);
3580 SZONE_UNLOCK(szone);
3581 }
3582
3583 static void
3584 szone_log(malloc_zone_t *zone, void *log_address)
3585 {
3586 szone_t *szone = (szone_t *)zone;
3587
3588 szone->log_address = log_address;
3589 }
3590
3591 static void
3592 szone_force_lock(szone_t *szone)
3593 {
3594 SZONE_LOCK(szone);
3595 }
3596
3597 static void
3598 szone_force_unlock(szone_t *szone)
3599 {
3600 SZONE_UNLOCK(szone);
3601 }
3602
3603 boolean_t
3604 scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone)
3605 {
3606 szone_t *szone = (szone_t *)zone;
3607
3608 switch (subzone) {
3609 case 0:
3610 stats->blocks_in_use = szone->num_tiny_objects;
3611 stats->size_in_use = szone->num_bytes_in_tiny_objects;
3612 stats->size_allocated = szone->num_tiny_regions * TINY_REGION_SIZE;
3613 stats->max_size_in_use = stats->size_allocated - szone->tiny_bytes_free_at_end;
3614 return 1;
3615 case 1:
3616 stats->blocks_in_use = szone->num_small_objects;
3617 stats->size_in_use = szone->num_bytes_in_small_objects;
3618 stats->size_allocated = szone->num_small_regions * SMALL_REGION_SIZE;
3619 stats->max_size_in_use = stats->size_allocated - szone->small_bytes_free_at_end;
3620 return 1;
3621 case 2:
3622 stats->blocks_in_use = szone->num_large_objects_in_use;
3623 stats->size_in_use = szone->num_bytes_in_large_objects;
3624 stats->max_size_in_use = stats->size_allocated = stats->size_in_use;
3625 return 1;
3626 case 3:
3627 stats->blocks_in_use = szone->num_huge_entries;
3628 stats->size_in_use = szone->num_bytes_in_huge_objects;
3629 stats->max_size_in_use = stats->size_allocated = stats->size_in_use;
3630 return 1;
3631 }
3632 return 0;
3633 }
3634
3635 static void
3636 szone_statistics(szone_t *szone, malloc_statistics_t *stats)
3637 {
3638 size_t big_and_huge;
3639
3640 stats->blocks_in_use =
3641 szone->num_tiny_objects +
3642 szone->num_small_objects +
3643 szone->num_large_objects_in_use +
3644 szone->num_huge_entries;
3645 big_and_huge = szone->num_bytes_in_large_objects + szone->num_bytes_in_huge_objects;
3646 stats->size_in_use = szone->num_bytes_in_tiny_objects + szone->num_bytes_in_small_objects + big_and_huge;
3647 stats->max_size_in_use = stats->size_allocated =
3648 szone->num_tiny_regions * TINY_REGION_SIZE +
3649 szone->num_small_regions * SMALL_REGION_SIZE +
3650 big_and_huge ;
3651
3652 // Now we account for the untouched areas
3653 stats->max_size_in_use -= szone->tiny_bytes_free_at_end;
3654 stats->max_size_in_use -= szone->small_bytes_free_at_end;
3655 }
3656
3657 static const struct malloc_introspection_t szone_introspect = {
3658 (void *)szone_ptr_in_use_enumerator,
3659 (void *)szone_good_size,
3660 (void *)szone_check,
3661 (void *)szone_print,
3662 szone_log,
3663 (void *)szone_force_lock,
3664 (void *)szone_force_unlock,
3665 (void *)szone_statistics
3666 }; // marked as const to spare the DATA section
3667
3668 malloc_zone_t *
3669 create_scalable_zone(size_t initial_size, unsigned debug_flags)
3670 {
3671 szone_t *szone;
3672 size_t msize;
3673 size_t msize_used;
3674 msize_t free_msize;
3675
3676 /*
3677 * Sanity-check our build-time assumptions about the size of a page.
3678 * Since we have sized various things assuming the default page size,
3679 * attempting to determine it dynamically is not useful.
3680 */
3681 if ((vm_page_size != _vm_page_size) || (vm_page_shift != _vm_page_shift)) {
3682 malloc_printf("*** FATAL ERROR - machine page size does not match our assumptions.\n");
3683 exit(-1);
3684 }
3685
3686 /* get memory for the zone */
3687 szone = allocate_pages(NULL, SMALL_REGION_SIZE, SMALL_BLOCKS_ALIGN, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC));
3688 if (!szone)
3689 return NULL;
3690 /* set up the szone structure */
3691 szone->tiny_regions = szone->initial_tiny_regions;
3692 szone->small_regions = szone->initial_small_regions;
3693 msize_used = msize; szone->num_small_objects++;
3694 szone->basic_zone.version = 3;
3695 szone->basic_zone.size = (void *)szone_size;
3696 szone->basic_zone.malloc = (void *)szone_malloc;
3697 szone->basic_zone.calloc = (void *)szone_calloc;
3698 szone->basic_zone.valloc = (void *)szone_valloc;
3699 szone->basic_zone.free = (void *)szone_free;
3700 szone->basic_zone.realloc = (void *)szone_realloc;
3701 szone->basic_zone.destroy = (void *)szone_destroy;
3702 szone->basic_zone.batch_malloc = (void *)szone_batch_malloc;
3703 szone->basic_zone.batch_free = (void *)szone_batch_free;
3704 szone->basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect;
3705 szone->debug_flags = debug_flags;
3706
3707 /* as the szone is allocated out of the first tiny, region, reflect that allocation */
3708 szone->small_regions[0] = szone;
3709 szone->num_small_regions = 1;
3710 msize = SMALL_MSIZE_FOR_BYTES(sizeof(szone_t) + SMALL_QUANTUM - 1);
3711 free_msize = NUM_SMALL_BLOCKS - msize;
3712 *SMALL_METADATA_FOR_PTR(szone) = msize;
3713 *(SMALL_METADATA_FOR_PTR(szone) + msize) = free_msize;
3714 szone->small_bytes_free_at_end = SMALL_BYTES_FOR_MSIZE(free_msize);
3715
3716 LOCK_INIT(szone->lock);
3717 #if 0
3718 #warning CHECK_REGIONS enabled
3719 debug_flags |= CHECK_REGIONS;
3720 #endif
3721 #if 0
3722 #warning LOG enabled
3723 szone->log_address = ~0;
3724 #endif
3725 CHECK(szone, __PRETTY_FUNCTION__);
3726 return (malloc_zone_t *)szone;
3727 }
3728
3729 /********* Support code for emacs unexec ************/
3730
3731 /* History of freezedry version numbers:
3732 *
3733 * 1) Old malloc (before the scalable malloc implementation in this file
3734 * existed).
3735 * 2) Original freezedrying code for scalable malloc. This code was apparently
3736 * based on the old freezedrying code and was fundamentally flawed in its
3737 * assumption that tracking allocated memory regions was adequate to fake
3738 * operations on freezedried memory. This doesn't work, since scalable
3739 * malloc does not store flags in front of large page-aligned allocations.
3740 * 3) Original szone-based freezedrying code.
3741 * 4) Fresher malloc with tiny zone
3742 * 5) 32/64bit compatible malloc
3743 *
3744 * No version backward compatibility is provided, but the version number does
3745 * make it possible for malloc_jumpstart() to return an error if the application
3746 * was freezedried with an older version of malloc.
3747 */
3748 #define MALLOC_FREEZEDRY_VERSION 5
3749
3750 typedef struct {
3751 unsigned version;
3752 unsigned nszones;
3753 szone_t *szones;
3754 } malloc_frozen;
3755
3756 static void *
3757 frozen_malloc(szone_t *zone, size_t new_size)
3758 {
3759 return malloc(new_size);
3760 }
3761
3762 static void *
3763 frozen_calloc(szone_t *zone, size_t num_items, size_t size)
3764 {
3765 return calloc(num_items, size);
3766 }
3767
3768 static void *
3769 frozen_valloc(szone_t *zone, size_t new_size)
3770 {
3771 return valloc(new_size);
3772 }
3773
3774 static void *
3775 frozen_realloc(szone_t *zone, void *ptr, size_t new_size)
3776 {
3777 size_t old_size = szone_size(zone, ptr);
3778 void *new_ptr;
3779
3780 if (new_size <= old_size) {
3781 return ptr;
3782 }
3783 new_ptr = malloc(new_size);
3784 if (old_size > 0) {
3785 memcpy(new_ptr, ptr, old_size);
3786 }
3787 return new_ptr;
3788 }
3789
3790 static void
3791 frozen_free(szone_t *zone, void *ptr)
3792 {
3793 }
3794
3795 static void
3796 frozen_destroy(szone_t *zone)
3797 {
3798 }
3799
3800 /********* Pseudo-private API for emacs unexec ************/
3801
3802 /*
3803 * malloc_freezedry() records all of the szones in use, so that they can be
3804 * partially reconstituted by malloc_jumpstart(). Due to the differences
3805 * between reconstituted memory regions and those created by the szone code,
3806 * care is taken not to reallocate from the freezedried memory, except in the
3807 * case of a non-growing realloc().
3808 *
3809 * Due to the flexibility provided by the zone registration mechanism, it is
3810 * impossible to implement generic freezedrying for any zone type. This code
3811 * only handles applications that use the szone allocator, so malloc_freezedry()
3812 * returns 0 (error) if any non-szone zones are encountered.
3813 */
3814
3815 uintptr_t
3816 malloc_freezedry(void)
3817 {
3818 extern unsigned malloc_num_zones;
3819 extern malloc_zone_t **malloc_zones;
3820 malloc_frozen *data;
3821 unsigned i;
3822
3823 /* Allocate space in which to store the freezedry state. */
3824 data = (malloc_frozen *) malloc(sizeof(malloc_frozen));
3825
3826 /* Set freezedry version number so that malloc_jumpstart() can check for compatibility. */
3827 data->version = MALLOC_FREEZEDRY_VERSION;
3828
3829 /* Allocate the array of szone pointers. */
3830 data->nszones = malloc_num_zones;
3831 data->szones = (szone_t *) calloc(malloc_num_zones, sizeof(szone_t));
3832
3833 /*
3834 * Fill in the array of szone structures. They are copied rather than
3835 * referenced, since the originals are likely to be clobbered during malloc
3836 * initialization.
3837 */
3838 for (i = 0; i < malloc_num_zones; i++) {
3839 if (strcmp(malloc_zones[i]->zone_name, "DefaultMallocZone")) {
3840 /* Unknown zone type. */
3841 free(data->szones);
3842 free(data);
3843 return 0;
3844 }
3845 memcpy(&data->szones[i], malloc_zones[i], sizeof(szone_t));
3846 }
3847
3848 return((uintptr_t)data);
3849 }
3850
3851 int
3852 malloc_jumpstart(uintptr_t cookie)
3853 {
3854 malloc_frozen *data = (malloc_frozen *)cookie;
3855 unsigned i;
3856
3857 if (data->version != MALLOC_FREEZEDRY_VERSION) {
3858 /* Unsupported freezedry version. */
3859 return 1;
3860 }
3861
3862 for (i = 0; i < data->nszones; i++) {
3863 /* Set function pointers. Even the functions that stay the same must be
3864 * set, since there are no guarantees that they will be mapped to the
3865 * same addresses. */
3866 data->szones[i].basic_zone.size = (void *) szone_size;
3867 data->szones[i].basic_zone.malloc = (void *) frozen_malloc;
3868 data->szones[i].basic_zone.calloc = (void *) frozen_calloc;
3869 data->szones[i].basic_zone.valloc = (void *) frozen_valloc;
3870 data->szones[i].basic_zone.free = (void *) frozen_free;
3871 data->szones[i].basic_zone.realloc = (void *) frozen_realloc;
3872 data->szones[i].basic_zone.destroy = (void *) frozen_destroy;
3873 data->szones[i].basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect;
3874
3875 /* Register the freezedried zone. */
3876 malloc_zone_register(&data->szones[i].basic_zone);
3877 }
3878
3879 return 0;
3880 }
3881