]>
Commit | Line | Data |
---|---|---|
e9ce8d39 | 1 | /* |
224c7076 | 2 | * Copyright (c) 1999, 2006 Apple Computer, Inc. All rights reserved. |
e9ce8d39 A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
734aad71 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
e9ce8d39 A |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
734aad71 A |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
e9ce8d39 A |
20 | * |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
24 | /* Author: Bertrand Serlet, August 1999 */ | |
25 | ||
59e0d9fe | 26 | #include "scalable_malloc.h" |
224c7076 A |
27 | #include "malloc_printf.h" |
28 | #include "_simple.h" | |
e9ce8d39 | 29 | |
59e0d9fe | 30 | #include <pthread_internals.h> |
9385eb3d | 31 | |
59e0d9fe A |
32 | #include <unistd.h> |
33 | #include <libc.h> | |
e9ce8d39 | 34 | #include <mach/vm_statistics.h> |
59e0d9fe | 35 | #include <mach/mach_init.h> |
224c7076 A |
36 | #include <sys/types.h> |
37 | #include <sys/mman.h> | |
e9ce8d39 A |
38 | |
39 | /********************* DEFINITIONS ************************/ | |
40 | ||
e9ce8d39 | 41 | #define DEBUG_MALLOC 0 // set to one to debug malloc itself |
9385eb3d | 42 | |
e9ce8d39 A |
43 | #define DEBUG_CLIENT 0 // set to one to help debug a nasty memory smasher |
44 | ||
45 | #if DEBUG_MALLOC | |
9385eb3d | 46 | #warning DEBUG_MALLOC ENABLED |
59e0d9fe | 47 | # define INLINE |
3d9156a7 | 48 | # define ALWAYSINLINE |
59e0d9fe A |
49 | # define CHECK_LOCKED(szone, fun) \ |
50 | do { \ | |
51 | if (__is_threaded && TRY_LOCK(szone->lock)) { \ | |
9385eb3d | 52 | malloc_printf("*** lock was not set %p in %s\n", szone->lock, fun); \ |
59e0d9fe A |
53 | } \ |
54 | } while (0) | |
9385eb3d | 55 | #else |
59e0d9fe | 56 | # define INLINE __inline__ |
3d9156a7 | 57 | # define ALWAYSINLINE __attribute__((always_inline)) |
59e0d9fe | 58 | # define CHECK_LOCKED(szone, fun) {} |
9385eb3d A |
59 | #endif |
60 | ||
59e0d9fe A |
61 | /* |
62 | * Access to global variables is slow, so optimise our handling of vm_page_size | |
63 | * and vm_page_shift. | |
64 | */ | |
65 | #define _vm_page_size vm_page_size /* to get to the originals */ | |
66 | #define _vm_page_shift vm_page_shift | |
67 | #define vm_page_size 4096 /* our normal working sizes */ | |
9385eb3d | 68 | #define vm_page_shift 12 |
e9ce8d39 | 69 | |
9385eb3d A |
70 | typedef unsigned short msize_t; // a size in multiples of SHIFT_SMALL_QUANTUM or SHIFT_TINY_QUANTUM |
71 | ||
224c7076 A |
72 | typedef union { |
73 | void *p; | |
74 | uintptr_t u; | |
75 | } ptr_union; | |
76 | ||
9385eb3d | 77 | typedef struct { |
224c7076 A |
78 | ptr_union previous; |
79 | ptr_union next; | |
9385eb3d A |
80 | } free_list_t; |
81 | ||
82 | typedef struct { | |
59e0d9fe | 83 | uintptr_t address_and_num_pages; |
9385eb3d A |
84 | // this type represents both an address and a number of pages |
85 | // the low bits are the number of pages; the high bits are the address | |
86 | // note that the exact number of bits used for depends on the page size | |
87 | // also, this cannot represent pointers larger than 1 << (vm_page_shift * 2) | |
88 | } compact_range_t; | |
89 | ||
90 | typedef unsigned char grain_t; | |
91 | ||
e9ce8d39 A |
92 | #define CHECK_REGIONS (1 << 31) |
93 | ||
9385eb3d | 94 | #define MAX_RECORDER_BUFFER 256 |
e9ce8d39 | 95 | |
9385eb3d | 96 | /********************* DEFINITIONS for tiny ************************/ |
e9ce8d39 | 97 | |
59e0d9fe A |
98 | /* |
99 | * Memory in the Tiny range is allocated from regions (heaps) pointed to by the szone's tiny_regions | |
100 | * pointer. | |
101 | * | |
224c7076 A |
102 | * Each region is laid out as a heap, followed by a header block, all within |
103 | * a 1MB (2^20) block. This means there are 64520 16-byte blocks and the header is | |
104 | * 16138 bytes, making the total 1048458 bytes, leaving 118 bytes unused. | |
105 | * The header block is arranged: | |
59e0d9fe | 106 | * |
224c7076 | 107 | * 0xfc080 |
59e0d9fe | 108 | * header bits |
224c7076 | 109 | * 0xfe001 |
59e0d9fe | 110 | * 0xffffffff pad word |
224c7076 | 111 | * 0xfe005 |
59e0d9fe | 112 | * in-use bits |
224c7076 | 113 | * 0xfff86 |
59e0d9fe | 114 | * pad word (not written) |
224c7076 A |
115 | * 0xfff8a-0xfffff |
116 | * unused | |
59e0d9fe A |
117 | * |
118 | * Each bitfield comprises NUM_TINY_BLOCKS bits, and refers to the corresponding TINY_QUANTUM block | |
119 | * within the heap. | |
120 | * | |
121 | * The bitfields are used to encode the state of memory within the heap. The header bit indicates | |
122 | * that the corresponding quantum is the first quantum in a block (either in use or free). The | |
123 | * in-use bit is set for the header if the block has been handed out (allocated). If the header | |
124 | * bit is not set, the in-use bit is invalid. | |
125 | * | |
224c7076 A |
126 | * The szone maintains an array of 32 freelists, each of which is used to hold |
127 | * free objects of the corresponding quantum size. | |
59e0d9fe | 128 | * |
224c7076 A |
129 | * A free block is laid out depending on its size, in order to fit all free |
130 | * blocks in 16 bytes, on both 32 and 64 bit platforms. One quantum blocks do | |
131 | * not store their size in the block, instead relying on the header information | |
132 | * to determine their size. Blocks of two or more quanta have room to store | |
133 | * their size in the block, and store it both after the 'next' pointer, and in | |
134 | * the last 2 bytes of the block. | |
59e0d9fe | 135 | * |
224c7076 | 136 | * 1-quantum block |
59e0d9fe | 137 | * Offset (32-bit mode) (64-bit mode) |
224c7076 A |
138 | * 0x0 0x0 : previous |
139 | * 0x4 0x08 : next | |
140 | * end end | |
141 | * | |
142 | * >1-quantum block | |
143 | * Offset (32-bit mode) (64-bit mode) | |
144 | * 0x0 0x0 : previous | |
145 | * 0x4 0x08 : next | |
146 | * 0x8 0x10 : size (in quantum counts) | |
147 | * end - 2 end - 2 : size (in quantum counts) | |
148 | * end end | |
59e0d9fe A |
149 | * |
150 | * All fields are pointer-sized, except for the size which is an unsigned short. | |
151 | * | |
152 | */ | |
153 | ||
224c7076 | 154 | #define SHIFT_TINY_QUANTUM 4 // Required for AltiVec |
9385eb3d | 155 | #define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM) |
e9ce8d39 | 156 | |
59e0d9fe | 157 | #define FOLLOWING_TINY_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM)) |
e9ce8d39 | 158 | |
59e0d9fe | 159 | #define NUM_TINY_SLOTS 32 // number of slots for free-lists |
e9ce8d39 | 160 | |
224c7076 A |
161 | #define NUM_TINY_BLOCKS 64520 |
162 | #define SHIFT_TINY_CEIL_BLOCKS 16 // ceil(log2(NUM_TINY_BLOCKS)) | |
163 | #define NUM_TINY_CEIL_BLOCKS (1 << SHIFT_TINY_CEIL_BLOCKS) | |
164 | #define TINY_BLOCKS_ALIGN (SHIFT_TINY_CEIL_BLOCKS + SHIFT_TINY_QUANTUM) | |
e9ce8d39 | 165 | |
59e0d9fe A |
166 | /* |
167 | * Enough room for the data, followed by the bit arrays (2-bits per block) plus 2 words of padding | |
168 | * as our bitmap operators overflow, plus rounding to the nearest page. | |
169 | */ | |
224c7076 A |
170 | #define TINY_HEADER_SIZE ((NUM_TINY_BLOCKS >> 2) + 8) |
171 | #define TINY_REGION_SIZE ((NUM_TINY_BLOCKS * TINY_QUANTUM + TINY_HEADER_SIZE + vm_page_size - 1) & ~ (vm_page_size - 1)) | |
e9ce8d39 | 172 | |
59e0d9fe A |
173 | /* |
174 | * Beginning and end pointers for a region's heap. | |
175 | */ | |
176 | #define TINY_REGION_ADDRESS(region) ((void *)(region)) | |
224c7076 | 177 | #define TINY_REGION_END(region) (TINY_REGION_ADDRESS(region) + (NUM_TINY_BLOCKS * TINY_QUANTUM)) |
e9ce8d39 | 178 | |
59e0d9fe A |
179 | /* |
180 | * Locate the heap base for a pointer known to be within a tiny region. | |
181 | */ | |
182 | #define TINY_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << TINY_BLOCKS_ALIGN) - 1))) | |
9385eb3d | 183 | |
59e0d9fe A |
184 | /* |
185 | * Convert between byte and msize units. | |
186 | */ | |
187 | #define TINY_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_TINY_QUANTUM) | |
188 | #define TINY_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_TINY_QUANTUM) | |
189 | ||
224c7076 A |
190 | #ifdef __LP64__ |
191 | # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[8]) | |
192 | #else | |
193 | # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[4]) | |
194 | #endif | |
195 | #define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1] | |
196 | ||
59e0d9fe A |
197 | /* |
198 | * Locate the block header for a pointer known to be within a tiny region. | |
199 | */ | |
224c7076 A |
200 | #define TINY_HEADER_START (NUM_TINY_BLOCKS * TINY_QUANTUM) |
201 | #define TINY_BLOCK_HEADER_FOR_PTR(_p) ((void *)((uintptr_t)TINY_REGION_FOR_PTR(_p) + TINY_HEADER_START)) | |
59e0d9fe A |
202 | |
203 | /* | |
204 | * Locate the inuse map for a given block header pointer. | |
205 | */ | |
206 | #define TINY_INUSE_FOR_HEADER(_h) ((void *)((uintptr_t)(_h) + (NUM_TINY_BLOCKS >> 3) + 4)) | |
207 | ||
208 | /* | |
209 | * Compute the bitmap index for a pointer known to be within a tiny region. | |
210 | */ | |
224c7076 | 211 | #define TINY_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_TINY_QUANTUM) & (NUM_TINY_CEIL_BLOCKS - 1)) |
9385eb3d A |
212 | |
213 | #define TINY_CACHE 1 // This governs a last-free cache of 1 that bypasses the free-list | |
214 | ||
215 | #if ! TINY_CACHE | |
216 | #warning TINY_CACHE turned off | |
217 | #endif | |
218 | ||
219 | /********************* DEFINITIONS for small ************************/ | |
220 | ||
59e0d9fe A |
221 | /* |
222 | * Memory in the Small range is allocated from regions (heaps) pointed to by the szone's small_regions | |
223 | * pointer. | |
224 | * | |
224c7076 | 225 | * Each region is laid out as a heap, followed by the metadata array, all within an 8MB (2^23) block. |
59e0d9fe | 226 | * The array is arranged as an array of shorts, one for each SMALL_QUANTUM in the heap. |
224c7076 A |
227 | * This means there are 16320 512-blocks and the array is 16320*2 bytes, which totals 8388480, leaving |
228 | * 128 bytes unused. | |
59e0d9fe A |
229 | * |
230 | * The MSB of each short is set for the first quantum in a free block. The low 15 bits encode the | |
231 | * block size (in SMALL_QUANTUM units), or are zero if the quantum is not the first in a block. | |
232 | * | |
233 | * The szone maintains an array of 32 freelists, each of which is used to hold free objects | |
234 | * of the corresponding quantum size. | |
235 | * | |
236 | * A free block is laid out as: | |
237 | * | |
238 | * Offset (32-bit mode) (64-bit mode) | |
224c7076 A |
239 | * 0x0 0x0 : previous |
240 | * 0x4 0x08 : next | |
241 | * 0x8 0x10 : size (in quantum counts) | |
242 | * end - 2 end - 2 : size (in quantum counts) | |
59e0d9fe A |
243 | * end end |
244 | * | |
245 | * All fields are pointer-sized, except for the size which is an unsigned short. | |
246 | * | |
247 | */ | |
9385eb3d | 248 | |
59e0d9fe | 249 | #define SMALL_IS_FREE (1 << 15) |
9385eb3d A |
250 | |
251 | #define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9 | |
252 | #define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes | |
253 | ||
59e0d9fe | 254 | #define FOLLOWING_SMALL_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM)) |
9385eb3d | 255 | |
59e0d9fe | 256 | #define NUM_SMALL_SLOTS 32 // number of slots for free-lists |
9385eb3d | 257 | |
59e0d9fe A |
258 | /* |
259 | * We can only represent up to 1<<15 for msize; but we choose to stay even below that to avoid the | |
260 | * convention msize=0 => msize = (1<<15) | |
261 | */ | |
224c7076 A |
262 | #define NUM_SMALL_BLOCKS 16320 |
263 | #define SHIFT_SMALL_CEIL_BLOCKS 14 // ceil(log2(NUM_SMALL_BLOCKs)) | |
264 | #define NUM_SMALL_CEIL_BLOCKS (1 << SHIFT_SMALL_CEIL_BLOCKS) | |
265 | #define SMALL_BLOCKS_ALIGN (SHIFT_SMALL_CEIL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23 | |
266 | #define SMALL_ARRAY_SIZE (NUM_SMALL_BLOCKS * 2) | |
267 | #define SMALL_REGION_SIZE ((NUM_SMALL_BLOCKS * SMALL_QUANTUM + SMALL_ARRAY_SIZE + vm_page_size - 1) & ~ (vm_page_size - 1)) // data + meta data | |
9385eb3d A |
268 | |
269 | #define SMALL_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1] | |
270 | ||
59e0d9fe A |
271 | /* |
272 | * Convert between byte and msize units. | |
273 | */ | |
274 | #define SMALL_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_SMALL_QUANTUM) | |
275 | #define SMALL_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_SMALL_QUANTUM) | |
276 | ||
277 | ||
278 | #define SMALL_REGION_ADDRESS(region) ((unsigned char *)region) | |
224c7076 | 279 | #define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region) + (NUM_SMALL_BLOCKS * SMALL_QUANTUM)) |
59e0d9fe A |
280 | |
281 | /* | |
282 | * Locate the heap base for a pointer known to be within a small region. | |
283 | */ | |
284 | #define SMALL_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << SMALL_BLOCKS_ALIGN) - 1))) | |
285 | ||
286 | /* | |
287 | * Locate the metadata base for a pointer known to be within a small region. | |
288 | */ | |
224c7076 A |
289 | #define SMALL_HEADER_START (NUM_SMALL_BLOCKS * SMALL_QUANTUM) |
290 | #define SMALL_META_HEADER_FOR_PTR(_p) ((msize_t *)((uintptr_t)SMALL_REGION_FOR_PTR(_p) + SMALL_HEADER_START)) | |
59e0d9fe A |
291 | |
292 | /* | |
293 | * Compute the metadata index for a pointer known to be within a small region. | |
294 | */ | |
224c7076 | 295 | #define SMALL_META_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_SMALL_QUANTUM) & (NUM_SMALL_CEIL_BLOCKS - 1)) |
59e0d9fe A |
296 | |
297 | /* | |
298 | * Find the metadata word for a pointer known to be within a small region. | |
299 | */ | |
300 | #define SMALL_METADATA_FOR_PTR(_p) (SMALL_META_HEADER_FOR_PTR(_p) + SMALL_META_INDEX_FOR_PTR(_p)) | |
301 | ||
302 | /* | |
303 | * Determine whether a pointer known to be within a small region points to memory which is free. | |
304 | */ | |
305 | #define SMALL_PTR_IS_FREE(_p) (*SMALL_METADATA_FOR_PTR(_p) & SMALL_IS_FREE) | |
9385eb3d | 306 | |
59e0d9fe A |
307 | /* |
308 | * Extract the msize value for a pointer known to be within a small region. | |
309 | */ | |
310 | #define SMALL_PTR_SIZE(_p) (*SMALL_METADATA_FOR_PTR(_p) & ~SMALL_IS_FREE) | |
311 | ||
59e0d9fe | 312 | #define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use |
e9ce8d39 | 313 | |
9385eb3d A |
314 | #define SMALL_CACHE 1 |
315 | #if !SMALL_CACHE | |
316 | #warning SMALL_CACHE turned off | |
317 | #endif | |
e9ce8d39 | 318 | |
224c7076 | 319 | /********************* DEFINITIONS for large and huge ***********************/ |
e9ce8d39 | 320 | |
9385eb3d | 321 | #define LARGE_THRESHOLD (15 * 1024) // at or above this use "large" |
e9ce8d39 | 322 | |
9385eb3d A |
323 | #if (LARGE_THRESHOLD > NUM_SMALL_SLOTS * SMALL_QUANTUM) |
324 | #error LARGE_THRESHOLD should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM | |
325 | #endif | |
e9ce8d39 | 326 | |
9385eb3d A |
327 | #define VM_COPY_THRESHOLD (40 * 1024) |
328 | // When all memory is touched after a copy, vm_copy() is always a lose | |
329 | // But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages (on a G3/300MHz) | |
330 | // This must be larger than LARGE_THRESHOLD | |
e9ce8d39 | 331 | |
59e0d9fe A |
332 | /* |
333 | * Given a large_entry, return the address of the allocated block. | |
334 | */ | |
9385eb3d | 335 | #define LARGE_ENTRY_ADDRESS(entry) \ |
59e0d9fe A |
336 | (void *)(((entry).address_and_num_pages >> vm_page_shift) << vm_page_shift) |
337 | ||
338 | /* | |
339 | * Given a large entry, return the number of pages or bytes in the allocated block. | |
340 | */ | |
9385eb3d | 341 | #define LARGE_ENTRY_NUM_PAGES(entry) \ |
59e0d9fe | 342 | ((entry).address_and_num_pages & (vm_page_size - 1)) |
9385eb3d A |
343 | #define LARGE_ENTRY_SIZE(entry) \ |
344 | (LARGE_ENTRY_NUM_PAGES(entry) << vm_page_shift) | |
59e0d9fe A |
345 | |
346 | /* | |
347 | * Compare a pointer with a large entry. | |
348 | */ | |
9385eb3d | 349 | #define LARGE_ENTRY_MATCHES(entry,ptr) \ |
59e0d9fe A |
350 | ((((entry).address_and_num_pages - (uintptr_t)(ptr)) >> vm_page_shift) == 0) |
351 | ||
352 | #define LARGE_ENTRY_IS_EMPTY(entry) (((entry).address_and_num_pages) == 0) | |
e9ce8d39 A |
353 | |
354 | typedef compact_range_t large_entry_t; | |
224c7076 | 355 | typedef vm_range_t huge_entry_t; |
e9ce8d39 | 356 | |
224c7076 A |
357 | /******************************************************************************* |
358 | * Definitions for region hash | |
359 | ******************************************************************************/ | |
9385eb3d | 360 | |
224c7076 A |
361 | typedef void * region_t; |
362 | ||
363 | #define INITIAL_NUM_REGIONS 63 // Must be odd to hash well | |
e9ce8d39 | 364 | |
9385eb3d | 365 | /********************* zone itself ************************/ |
e9ce8d39 A |
366 | |
367 | typedef struct { | |
368 | malloc_zone_t basic_zone; | |
369 | pthread_lock_t lock; | |
370 | unsigned debug_flags; | |
371 | void *log_address; | |
9385eb3d A |
372 | |
373 | /* Regions for tiny objects */ | |
224c7076 A |
374 | size_t num_tiny_regions; |
375 | size_t num_tiny_regions_allocated; | |
376 | region_t *tiny_regions; // hashed by location | |
377 | region_t *last_tiny_region; | |
9385eb3d A |
378 | void *last_tiny_free; // low SHIFT_TINY_QUANTUM indicate the msize |
379 | unsigned tiny_bitmap; // cache of the 32 free lists | |
380 | free_list_t *tiny_free_list[NUM_TINY_SLOTS]; // 31 free lists for 1*TINY_QUANTUM to 31*TINY_QUANTUM plus 1 for larger than 32*SMALL_QUANTUM | |
381 | size_t tiny_bytes_free_at_end; // the last free region in the last block is treated as a big block in use that is not accounted for | |
382 | unsigned num_tiny_objects; | |
59e0d9fe | 383 | size_t num_bytes_in_tiny_objects; |
9385eb3d | 384 | |
e9ce8d39 | 385 | /* Regions for small objects */ |
224c7076 A |
386 | size_t num_small_regions; |
387 | size_t num_small_regions_allocated; | |
388 | region_t *small_regions; // hashed by location | |
389 | region_t *last_small_region; | |
9385eb3d A |
390 | void *last_small_free; // low SHIFT_SMALL_QUANTUM indicate the msize |
391 | unsigned small_bitmap; // cache of the free list | |
392 | free_list_t *small_free_list[NUM_SMALL_SLOTS]; | |
393 | size_t small_bytes_free_at_end; // the last free region in the last block is treated as a big block in use that is not accounted for | |
e9ce8d39 | 394 | unsigned num_small_objects; |
59e0d9fe | 395 | size_t num_bytes_in_small_objects; |
9385eb3d | 396 | |
e9ce8d39 | 397 | /* large objects: vm_page_shift <= log2(size) < 2 *vm_page_shift */ |
5b2abdfb | 398 | unsigned num_large_objects_in_use; |
e9ce8d39 | 399 | unsigned num_large_entries; |
9385eb3d | 400 | large_entry_t *large_entries; // hashed by location; null entries don't count |
59e0d9fe | 401 | size_t num_bytes_in_large_objects; |
e9ce8d39 A |
402 | |
403 | /* huge objects: log2(size) >= 2 *vm_page_shift */ | |
59e0d9fe | 404 | unsigned num_huge_entries; |
e9ce8d39 | 405 | huge_entry_t *huge_entries; |
59e0d9fe | 406 | size_t num_bytes_in_huge_objects; |
9385eb3d A |
407 | |
408 | /* Initial region list */ | |
224c7076 A |
409 | region_t initial_tiny_regions[INITIAL_NUM_REGIONS]; |
410 | region_t initial_small_regions[INITIAL_NUM_REGIONS]; | |
e9ce8d39 A |
411 | } szone_t; |
412 | ||
224c7076 A |
413 | #define SZONE_PAGED_SIZE ((sizeof(szone_t) + vm_page_size - 1) & ~ (vm_page_size - 1)) |
414 | ||
59e0d9fe A |
415 | #if DEBUG_MALLOC || DEBUG_CLIENT |
416 | static void szone_sleep(void); | |
417 | #endif | |
224c7076 A |
418 | __private_extern__ void malloc_error_break(void); |
419 | ||
420 | // msg prints after fmt, ... | |
421 | static void szone_error(szone_t *szone, const char *msg, const void *ptr, const char *fmt, ...) __printflike(4, 5); | |
422 | ||
423 | static void protect(void *address, size_t size, unsigned protection, unsigned debug_flags); | |
59e0d9fe A |
424 | static void *allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label); |
425 | static void deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags); | |
426 | static kern_return_t _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr); | |
427 | ||
3d9156a7 A |
428 | static INLINE void free_list_checksum(szone_t *szone, free_list_t *ptr, const char *msg) ALWAYSINLINE; |
429 | static INLINE void free_list_set_checksum(szone_t *szone, free_list_t *ptr) ALWAYSINLINE; | |
224c7076 A |
430 | static INLINE uintptr_t free_list_checksum_ptr(void *p) ALWAYSINLINE; |
431 | static INLINE void * free_list_unchecksum_ptr(ptr_union ptr) ALWAYSINLINE; | |
59e0d9fe A |
432 | static unsigned free_list_count(const free_list_t *ptr); |
433 | ||
3d9156a7 A |
434 | static INLINE msize_t get_tiny_meta_header(const void *ptr, boolean_t *is_free) ALWAYSINLINE; |
435 | static INLINE void set_tiny_meta_header_in_use(const void *ptr, msize_t msize) ALWAYSINLINE; | |
436 | static INLINE void set_tiny_meta_header_middle(const void *ptr) ALWAYSINLINE; | |
437 | static INLINE void set_tiny_meta_header_free(const void *ptr, msize_t msize) ALWAYSINLINE; | |
438 | static INLINE boolean_t tiny_meta_header_is_free(const void *ptr) ALWAYSINLINE; | |
439 | static INLINE void *tiny_previous_preceding_free(void *ptr, msize_t *prev_msize) ALWAYSINLINE; | |
224c7076 A |
440 | static void tiny_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize); |
441 | static void tiny_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize); | |
442 | static INLINE region_t *tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr) ALWAYSINLINE; | |
443 | static INLINE void tiny_free_no_lock(szone_t *szone, region_t *region, void *ptr, msize_t msize) ALWAYSINLINE; | |
59e0d9fe | 444 | static void *tiny_malloc_from_region_no_lock(szone_t *szone, msize_t msize); |
3d9156a7 | 445 | static INLINE boolean_t try_realloc_tiny_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) ALWAYSINLINE; |
224c7076 A |
446 | static boolean_t tiny_check_region(szone_t *szone, region_t region); |
447 | static kern_return_t tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, memory_reader_t reader, vm_range_recorder_t recorder); | |
448 | static void *tiny_malloc_from_free_list(szone_t *szone, msize_t msize); | |
3d9156a7 | 449 | static INLINE void *tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) ALWAYSINLINE; |
224c7076 | 450 | static INLINE void free_tiny(szone_t *szone, void *ptr, region_t *tiny_region) ALWAYSINLINE; |
59e0d9fe | 451 | static void print_tiny_free_list(szone_t *szone); |
224c7076 | 452 | static void print_tiny_region(boolean_t verbose, region_t region, size_t bytes_at_end); |
59e0d9fe A |
453 | static boolean_t tiny_free_list_check(szone_t *szone, grain_t slot); |
454 | ||
3d9156a7 A |
455 | static INLINE void small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize) ALWAYSINLINE; |
456 | static INLINE void small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize) ALWAYSINLINE; | |
457 | static INLINE void small_meta_header_set_middle(msize_t *meta_headers, msize_t index) ALWAYSINLINE; | |
59e0d9fe A |
458 | static void small_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize); |
459 | static void small_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize); | |
224c7076 A |
460 | static INLINE region_t *small_region_for_ptr_no_lock(szone_t *szone, const void *ptr) ALWAYSINLINE; |
461 | static INLINE void small_free_no_lock(szone_t *szone, region_t *region, void *ptr, msize_t msize) ALWAYSINLINE; | |
59e0d9fe | 462 | static void *small_malloc_from_region_no_lock(szone_t *szone, msize_t msize); |
3d9156a7 | 463 | static INLINE boolean_t try_realloc_small_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) ALWAYSINLINE; |
224c7076 A |
464 | static boolean_t szone_check_small_region(szone_t *szone, region_t region); |
465 | static kern_return_t small_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, memory_reader_t reader, vm_range_recorder_t recorder); | |
466 | static void *small_malloc_from_free_list(szone_t *szone, msize_t msize); | |
3d9156a7 A |
467 | static INLINE void *small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) ALWAYSINLINE; |
468 | static INLINE void *small_malloc_cleared_no_lock(szone_t *szone, msize_t msize) ALWAYSINLINE; | |
224c7076 | 469 | static INLINE void free_small(szone_t *szone, void *ptr, region_t *small_region) ALWAYSINLINE; |
59e0d9fe | 470 | static void print_small_free_list(szone_t *szone); |
224c7076 | 471 | static void print_small_region(szone_t *szone, boolean_t verbose, region_t region, size_t bytes_at_end); |
59e0d9fe | 472 | static boolean_t small_free_list_check(szone_t *szone, grain_t grain); |
e9ce8d39 | 473 | |
224c7076 A |
474 | static region_t * hash_lookup_region_no_lock(region_t *regions, size_t num_entries, region_t r); |
475 | static void hash_region_insert_no_lock(region_t *regions, size_t num_entries, region_t r); | |
476 | static region_t * hash_regions_alloc_no_lock(szone_t *szone, size_t num_entries); | |
477 | static region_t * hash_regions_grow_no_lock(szone_t *szone, region_t *regions, size_t old_size, size_t *new_size); | |
478 | ||
9385eb3d | 479 | #if DEBUG_MALLOC |
59e0d9fe A |
480 | static void large_debug_print(szone_t *szone); |
481 | #endif | |
482 | static large_entry_t *large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr); | |
483 | static void large_entry_insert_no_lock(szone_t *szone, large_entry_t range); | |
3d9156a7 A |
484 | static INLINE void large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry) ALWAYSINLINE; |
485 | static INLINE large_entry_t *large_entries_alloc_no_lock(szone_t *szone, unsigned num) ALWAYSINLINE; | |
59e0d9fe | 486 | static void large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate); |
224c7076 | 487 | static large_entry_t * large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate); |
59e0d9fe A |
488 | static vm_range_t large_free_no_lock(szone_t *szone, large_entry_t *entry); |
489 | static kern_return_t large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder); | |
490 | static huge_entry_t *huge_entry_for_pointer_no_lock(szone_t *szone, const void *ptr); | |
491 | static boolean_t huge_entry_append(szone_t *szone, huge_entry_t huge); | |
492 | static kern_return_t huge_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t huge_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder); | |
224c7076 | 493 | static void *large_and_huge_malloc(szone_t *szone, size_t num_pages); |
3d9156a7 A |
494 | static INLINE void free_large_or_huge(szone_t *szone, void *ptr) ALWAYSINLINE; |
495 | static INLINE int try_realloc_large_or_huge_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) ALWAYSINLINE; | |
59e0d9fe A |
496 | |
497 | static void szone_free(szone_t *szone, void *ptr); | |
3d9156a7 | 498 | static INLINE void *szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested) ALWAYSINLINE; |
59e0d9fe A |
499 | static void *szone_malloc(szone_t *szone, size_t size); |
500 | static void *szone_calloc(szone_t *szone, size_t num_items, size_t size); | |
501 | static void *szone_valloc(szone_t *szone, size_t size); | |
502 | static size_t szone_size(szone_t *szone, const void *ptr); | |
503 | static void *szone_realloc(szone_t *szone, void *ptr, size_t new_size); | |
504 | static unsigned szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count); | |
505 | static void szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count); | |
506 | static void szone_destroy(szone_t *szone); | |
507 | static size_t szone_good_size(szone_t *szone, size_t size); | |
508 | ||
509 | static boolean_t szone_check_all(szone_t *szone, const char *function); | |
510 | static boolean_t szone_check(szone_t *szone); | |
511 | static kern_return_t szone_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); | |
512 | static void szone_print(szone_t *szone, boolean_t verbose); | |
513 | static void szone_log(malloc_zone_t *zone, void *log_address); | |
514 | static void szone_force_lock(szone_t *szone); | |
515 | static void szone_force_unlock(szone_t *szone); | |
516 | ||
517 | static void szone_statistics(szone_t *szone, malloc_statistics_t *stats); | |
518 | ||
519 | static void *frozen_malloc(szone_t *zone, size_t new_size); | |
520 | static void *frozen_calloc(szone_t *zone, size_t num_items, size_t size); | |
521 | static void *frozen_valloc(szone_t *zone, size_t new_size); | |
522 | static void *frozen_realloc(szone_t *zone, void *ptr, size_t new_size); | |
523 | static void frozen_free(szone_t *zone, void *ptr); | |
524 | static void frozen_destroy(szone_t *zone); | |
525 | ||
526 | #if DEBUG_MALLOC | |
527 | # define LOG(szone,ptr) \ | |
528 | (szone->log_address && (((uintptr_t)szone->log_address == -1) || (szone->log_address == (void *)(ptr)))) | |
9385eb3d | 529 | #else |
59e0d9fe | 530 | # define LOG(szone,ptr) 0 |
9385eb3d | 531 | #endif |
e9ce8d39 | 532 | |
59e0d9fe A |
533 | #define SZONE_LOCK(szone) \ |
534 | do { \ | |
535 | LOCK(szone->lock); \ | |
536 | } while (0) | |
e9ce8d39 | 537 | |
59e0d9fe A |
538 | #define SZONE_UNLOCK(szone) \ |
539 | do { \ | |
540 | UNLOCK(szone->lock); \ | |
541 | } while (0) | |
e9ce8d39 | 542 | |
59e0d9fe A |
543 | #define LOCK_AND_NOTE_LOCKED(szone,locked) \ |
544 | do { \ | |
545 | CHECK(szone, __PRETTY_FUNCTION__); \ | |
546 | locked = 1; SZONE_LOCK(szone); \ | |
547 | } while (0) | |
e9ce8d39 | 548 | |
9385eb3d | 549 | #if DEBUG_MALLOC || DEBUG_CLIENT |
59e0d9fe | 550 | # define CHECK(szone,fun) \ |
9385eb3d A |
551 | if ((szone)->debug_flags & CHECK_REGIONS) szone_check_all(szone, fun) |
552 | #else | |
59e0d9fe | 553 | # define CHECK(szone,fun) do {} while (0) |
9385eb3d | 554 | #endif |
e9ce8d39 | 555 | |
9385eb3d | 556 | /********************* VERY LOW LEVEL UTILITIES ************************/ |
e9ce8d39 | 557 | |
9385eb3d A |
558 | #if DEBUG_MALLOC || DEBUG_CLIENT |
559 | static void | |
59e0d9fe A |
560 | szone_sleep(void) |
561 | { | |
562 | ||
9385eb3d | 563 | if (getenv("MallocErrorSleep")) { |
224c7076 | 564 | _malloc_printf(ASL_LEVEL_NOTICE, "*** sleeping to help debug\n"); |
9385eb3d A |
565 | sleep(3600); // to help debug |
566 | } | |
567 | } | |
568 | #endif | |
e9ce8d39 | 569 | |
224c7076 A |
570 | // msg prints after fmt, ... |
571 | static __attribute__((noinline)) void | |
572 | szone_error(szone_t *szone, const char *msg, const void *ptr, const char *fmt, ...) | |
59e0d9fe | 573 | { |
224c7076 A |
574 | va_list ap; |
575 | _SIMPLE_STRING b = _simple_salloc(); | |
59e0d9fe | 576 | |
e9ce8d39 | 577 | if (szone) SZONE_UNLOCK(szone); |
224c7076 A |
578 | if (b) { |
579 | if (fmt) { | |
580 | va_start(ap, fmt); | |
581 | _simple_vsprintf(b, fmt, ap); | |
582 | va_end(ap); | |
583 | } | |
584 | if (ptr) { | |
585 | _simple_sprintf(b, "*** error for object %p: %s\n", ptr, msg); | |
586 | } else { | |
587 | _simple_sprintf(b, "*** error: %s\n", msg); | |
588 | } | |
589 | malloc_printf("%s*** set a breakpoint in malloc_error_break to debug\n", _simple_string(b)); | |
590 | _simple_sfree(b); | |
e9ce8d39 | 591 | } else { |
224c7076 A |
592 | /* |
593 | * Should only get here if vm_allocate() can't get a single page of | |
594 | * memory, implying _simple_asl_log() would also fail. So we just | |
595 | * print to the file descriptor. | |
596 | */ | |
597 | if (fmt) { | |
598 | va_start(ap, fmt); | |
599 | _malloc_vprintf(MALLOC_PRINTF_NOLOG, fmt, ap); | |
600 | va_end(ap); | |
601 | } | |
602 | if (ptr) { | |
603 | _malloc_printf(MALLOC_PRINTF_NOLOG, "*** error for object %p: %s\n", ptr, msg); | |
604 | } else { | |
605 | _malloc_printf(MALLOC_PRINTF_NOLOG, "*** error: %s\n", msg); | |
606 | } | |
607 | _malloc_printf(MALLOC_PRINTF_NOLOG, "*** set a breakpoint in malloc_error_break to debug\n"); | |
e9ce8d39 | 608 | } |
224c7076 | 609 | malloc_error_break(); |
9385eb3d A |
610 | #if DEBUG_MALLOC |
611 | szone_print(szone, 1); | |
612 | szone_sleep(); | |
613 | #endif | |
e9ce8d39 | 614 | #if DEBUG_CLIENT |
9385eb3d | 615 | szone_sleep(); |
e9ce8d39 | 616 | #endif |
224c7076 | 617 | if (szone->debug_flags & SCALABLE_MALLOC_ABORT_ON_ERROR) abort(); |
e9ce8d39 A |
618 | } |
619 | ||
9385eb3d | 620 | static void |
224c7076 | 621 | protect(void *address, size_t size, unsigned protection, unsigned debug_flags) |
59e0d9fe | 622 | { |
e9ce8d39 | 623 | kern_return_t err; |
59e0d9fe | 624 | |
e9ce8d39 | 625 | if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_PRELUDE)) { |
59e0d9fe | 626 | err = vm_protect(mach_task_self(), (vm_address_t)(uintptr_t)address - vm_page_size, vm_page_size, 0, protection); |
9385eb3d | 627 | if (err) { |
59e0d9fe A |
628 | malloc_printf("*** can't protect(%p) region for prelude guard page at %p\n", |
629 | protection,address - (1 << vm_page_shift)); | |
9385eb3d | 630 | } |
e9ce8d39 A |
631 | } |
632 | if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE)) { | |
59e0d9fe | 633 | err = vm_protect(mach_task_self(), (vm_address_t)(uintptr_t)address + size, vm_page_size, 0, protection); |
9385eb3d | 634 | if (err) { |
59e0d9fe A |
635 | malloc_printf("*** can't protect(%p) region for postlude guard page at %p\n", |
636 | protection, address + size); | |
9385eb3d | 637 | } |
e9ce8d39 A |
638 | } |
639 | } | |
640 | ||
59e0d9fe A |
641 | static void * |
642 | allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label) | |
643 | { | |
9385eb3d | 644 | // align specifies a desired alignment (as a log) or 0 if no alignment requested |
224c7076 | 645 | void *vm_addr; |
59e0d9fe | 646 | uintptr_t addr, aligned_address; |
e9ce8d39 A |
647 | boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES; |
648 | size_t allocation_size = round_page(size); | |
59e0d9fe | 649 | size_t delta; |
224c7076 | 650 | |
9385eb3d A |
651 | if (align) add_guard_pages = 0; // too cumbersome to deal with that |
652 | if (!allocation_size) allocation_size = 1 << vm_page_shift; | |
653 | if (add_guard_pages) allocation_size += 2 * (1 << vm_page_shift); | |
59e0d9fe | 654 | if (align) allocation_size += (size_t)1 << align; |
224c7076 A |
655 | vm_addr = mmap(0, allocation_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, VM_MAKE_TAG(vm_page_label), 0); |
656 | if ((uintptr_t)vm_addr == -1) { | |
657 | szone_error(szone, "can't allocate region", NULL, "*** mmap(size=%lld) failed (error code=%d)\n", (long long)allocation_size, errno); | |
658 | return NULL; | |
9385eb3d | 659 | } |
59e0d9fe | 660 | addr = (uintptr_t)vm_addr; |
9385eb3d | 661 | if (align) { |
59e0d9fe | 662 | aligned_address = (addr + ((uintptr_t)1 << align) - 1) & ~ (((uintptr_t)1 << align) - 1); |
9385eb3d | 663 | if (aligned_address != addr) { |
59e0d9fe | 664 | delta = aligned_address - addr; |
224c7076 A |
665 | if (munmap((void *)addr, delta) == -1) |
666 | malloc_printf("*** freeing unaligned header failed with %d\n", errno); | |
9385eb3d A |
667 | addr = aligned_address; |
668 | allocation_size -= delta; | |
669 | } | |
670 | if (allocation_size > size) { | |
224c7076 A |
671 | if (munmap((void *)(addr + size), allocation_size - size) == -1) |
672 | malloc_printf("*** freeing unaligned footer failed with %d\n", errno); | |
9385eb3d | 673 | } |
e9ce8d39 A |
674 | } |
675 | if (add_guard_pages) { | |
59e0d9fe | 676 | addr += (uintptr_t)1 << vm_page_shift; |
224c7076 | 677 | protect((void *)addr, size, 0, debug_flags); |
e9ce8d39 | 678 | } |
59e0d9fe | 679 | return (void *)addr; |
e9ce8d39 A |
680 | } |
681 | ||
9385eb3d | 682 | static void |
59e0d9fe A |
683 | deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags) |
684 | { | |
224c7076 A |
685 | int err; |
686 | boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES; | |
59e0d9fe | 687 | |
e9ce8d39 | 688 | if (add_guard_pages) { |
9385eb3d A |
689 | addr -= 1 << vm_page_shift; |
690 | size += 2 * (1 << vm_page_shift); | |
e9ce8d39 | 691 | } |
224c7076 A |
692 | err = munmap(addr, size); |
693 | if ((err == -1) && szone) | |
694 | szone_error(szone, "Can't deallocate_pages region", addr, NULL); | |
e9ce8d39 A |
695 | } |
696 | ||
9385eb3d | 697 | static kern_return_t |
59e0d9fe A |
698 | _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) |
699 | { | |
e9ce8d39 A |
700 | *ptr = (void *)address; |
701 | return 0; | |
702 | } | |
703 | ||
224c7076 A |
704 | /********************* FREE LIST UTILITIES ************************/ |
705 | ||
706 | // A free list entry is comprised of a pair of pointers, previous and next. | |
707 | // Because the free list entries are previously freed objects, there is a | |
708 | // non-zero chance that a misbehaved program will write to an allocated object | |
709 | // after it has called free() on the pointer. This write would then potentially | |
710 | // corrupt the previous and next pointers, leading to a crash. In order to | |
711 | // detect this case, we take advantage of the fact that pointers are known to | |
712 | // be at least 16 byte aligned, and thus have at least 4 trailing zero bits. | |
713 | // When an entry is added to the free list, the previous and next pointers are | |
714 | // shifted right by 2 bits, and then have the high and low 2 bits set, to act | |
715 | // as guard bits. Before accessing a free list object, we verify that these | |
716 | // bits are still set, and log an error if they are not. | |
717 | ||
9385eb3d | 718 | static INLINE void |
59e0d9fe A |
719 | free_list_checksum(szone_t *szone, free_list_t *ptr, const char *msg) |
720 | { | |
224c7076 A |
721 | uintptr_t ptrs = ptr->previous.u & ptr->next.u; |
722 | ||
723 | #ifdef __LP64__ | |
724 | ptrs = (ptrs << 2) | (ptrs >> (64-2)); | |
725 | #else | |
726 | ptrs = (ptrs << 2) | (ptrs >> (32-2)); | |
9385eb3d | 727 | #endif |
224c7076 A |
728 | |
729 | if ((ptrs & 15) != 15) | |
730 | szone_error(szone, "incorrect checksum for freed object " | |
731 | "- object was probably modified after being freed.", ptr, NULL); | |
732 | } | |
733 | ||
734 | static INLINE uintptr_t | |
735 | free_list_checksum_ptr(void *p) | |
736 | { | |
737 | ptr_union ptr; | |
738 | ptr.p = p; | |
739 | ||
740 | #ifdef __LP64__ | |
741 | return (ptr.u >> 2) | 0xC000000000000003ULL; | |
742 | #else | |
743 | return (ptr.u >> 2) | 0xC0000003U; | |
744 | #endif | |
745 | } | |
746 | ||
747 | static INLINE void * | |
748 | free_list_unchecksum_ptr(ptr_union ptr) | |
749 | { | |
750 | uintptr_t u = (ptr.u >> 2) << 4; | |
751 | return (void *)u; | |
9385eb3d A |
752 | } |
753 | ||
754 | static INLINE void | |
59e0d9fe A |
755 | free_list_set_checksum(szone_t *szone, free_list_t *ptr) |
756 | { | |
224c7076 A |
757 | ptr->previous.u = free_list_checksum_ptr(ptr->previous.p); |
758 | ptr->next.u = free_list_checksum_ptr(ptr->next.p); | |
9385eb3d A |
759 | } |
760 | ||
761 | static unsigned | |
59e0d9fe A |
762 | free_list_count(const free_list_t *ptr) |
763 | { | |
9385eb3d | 764 | unsigned count = 0; |
59e0d9fe | 765 | |
9385eb3d | 766 | while (ptr) { |
224c7076 A |
767 | count++; |
768 | ptr = free_list_unchecksum_ptr(ptr->next); | |
9385eb3d A |
769 | } |
770 | return count; | |
771 | } | |
772 | ||
59e0d9fe A |
773 | /* XXX inconsistent use of BITMAP32 and BITARRAY operations could be cleaned up */ |
774 | ||
9385eb3d A |
775 | #define BITMAP32_SET(bitmap,bit) (bitmap |= 1 << (bit)) |
776 | #define BITMAP32_CLR(bitmap,bit) (bitmap &= ~ (1 << (bit))) | |
777 | #define BITMAP32_BIT(bitmap,bit) ((bitmap >> (bit)) & 1) | |
778 | ||
224c7076 A |
779 | /* returns bit # of least-significant one bit, starting at 0 (undefined if !bitmap) */ |
780 | #define BITMAP32_CTZ(bitmap) (__builtin_ctz(bitmap)) | |
9385eb3d A |
781 | |
782 | /********************* TINY FREE LIST UTILITIES ************************/ | |
783 | ||
784 | // We encode the meta-headers as follows: | |
785 | // Each quantum has an associated set of 2 bits: | |
786 | // block_header when 1 says this block is the beginning of a block | |
787 | // in_use when 1 says this block is in use | |
788 | // so a block in use of size 3 is 1-1 0-X 0-X | |
789 | // for a free block TINY_FREE_SIZE(ptr) carries the size and the bits are 1-0 X-X X-X | |
790 | // for a block middle the bits are 0-0 | |
791 | ||
792 | // Attention double evaluation for these | |
793 | #define BITARRAY_SET(bits,index) (bits[index>>3] |= (1 << (index & 7))) | |
794 | #define BITARRAY_CLR(bits,index) (bits[index>>3] &= ~(1 << (index & 7))) | |
795 | #define BITARRAY_BIT(bits,index) (((bits[index>>3]) >> (index & 7)) & 1) | |
796 | ||
797 | // Following is for start<8 and end<=start+32 | |
224c7076 A |
798 | static void ALWAYSINLINE |
799 | bitarray_mclr(void *bits, unsigned start, unsigned end) { | |
800 | unsigned word = OSReadLittleInt32(bits, 0); | |
801 | unsigned mask = (0xFFFFFFFFU >> (31 - start)) >> 1; | |
802 | ||
803 | if (end > 31) { | |
804 | unsigned char *bytes = (unsigned char *)bits; | |
805 | bytes[4] &= ~((1 << (end - 32)) - 1); | |
806 | } else { | |
807 | mask |= (0xFFFFFFFF << end); | |
808 | } | |
809 | OSWriteLittleInt32(bits, 0, word & mask); | |
810 | } | |
9385eb3d | 811 | |
224c7076 A |
812 | /* |
813 | * Obtain the size of a free tiny block (in msize_t units). | |
814 | */ | |
815 | static msize_t | |
816 | get_tiny_free_size(const void *ptr) | |
817 | { | |
818 | void *next_block = (void *)((uintptr_t)ptr + TINY_QUANTUM); | |
819 | void *region_end = TINY_REGION_END(TINY_REGION_FOR_PTR(ptr)); | |
e9ce8d39 | 820 | |
224c7076 A |
821 | // check whether the next block is outside the tiny region or a block header |
822 | // if so, then the size of this block is one, and there is no stored size. | |
823 | if (next_block < region_end) | |
824 | { | |
825 | unsigned char *next_header = TINY_BLOCK_HEADER_FOR_PTR(next_block); | |
826 | msize_t next_index = TINY_INDEX_FOR_PTR(next_block); | |
827 | ||
828 | if (!BITARRAY_BIT(next_header, next_index)) | |
829 | return TINY_FREE_SIZE(ptr); | |
830 | } | |
831 | return 1; | |
832 | } | |
833 | ||
834 | /* | |
835 | * Get the size of the previous free block, which is stored in the last two | |
836 | * bytes of the block. If the previous block is not free, then the result is | |
837 | * undefined. | |
838 | */ | |
839 | static msize_t | |
840 | get_tiny_previous_free_msize(const void *ptr) | |
841 | { | |
842 | // check whether the previous block is in the tiny region and a block header | |
843 | // if so, then the size of the previous block is one, and there is no stored | |
844 | // size. | |
845 | if (ptr != TINY_REGION_FOR_PTR(ptr)) | |
846 | { | |
847 | void *prev_block = (void *)((uintptr_t)ptr - TINY_QUANTUM); | |
848 | unsigned char *prev_header = TINY_BLOCK_HEADER_FOR_PTR(prev_block); | |
849 | msize_t prev_index = TINY_INDEX_FOR_PTR(prev_block); | |
850 | if (BITARRAY_BIT(prev_header, prev_index)) | |
851 | return 1; | |
852 | return TINY_PREVIOUS_MSIZE(ptr); | |
853 | } | |
854 | // don't read possibly unmapped memory before the beginning of the region | |
855 | return 0; | |
856 | } | |
9385eb3d A |
857 | |
858 | static INLINE msize_t | |
59e0d9fe A |
859 | get_tiny_meta_header(const void *ptr, boolean_t *is_free) |
860 | { | |
9385eb3d A |
861 | // returns msize and is_free |
862 | // may return 0 for the msize component (meaning 65536) | |
59e0d9fe A |
863 | unsigned char *block_header; |
864 | unsigned char *in_use; | |
865 | msize_t index; | |
866 | unsigned byte_index; | |
867 | ||
868 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
869 | index = TINY_INDEX_FOR_PTR(ptr); | |
870 | byte_index = index >> 3; | |
871 | ||
9385eb3d A |
872 | block_header += byte_index; |
873 | index &= 7; | |
874 | *is_free = 0; | |
59e0d9fe | 875 | if (!BITMAP32_BIT(*block_header, index)) |
224c7076 | 876 | return 0; |
59e0d9fe | 877 | in_use = TINY_INUSE_FOR_HEADER(block_header); |
9385eb3d | 878 | if (!BITMAP32_BIT(*in_use, index)) { |
224c7076 A |
879 | *is_free = 1; |
880 | return get_tiny_free_size(ptr); | |
9385eb3d | 881 | } |
59e0d9fe A |
882 | uint32_t *addr = (uint32_t *)((uintptr_t)block_header & ~3); |
883 | uint32_t word0 = OSReadLittleInt32(addr, 0) >> index; | |
884 | uint32_t word1 = OSReadLittleInt32(addr, 4) << (8 - index); | |
885 | uint32_t bits = (((uintptr_t)block_header & 3) * 8); // precision loss on LP64 OK here | |
886 | uint32_t word = (word0 >> bits) | (word1 << (24 - bits)); | |
887 | uint32_t result = ffs(word >> 1); | |
9385eb3d | 888 | return result; |
e9ce8d39 A |
889 | } |
890 | ||
9385eb3d | 891 | static INLINE void |
59e0d9fe A |
892 | set_tiny_meta_header_in_use(const void *ptr, msize_t msize) |
893 | { | |
894 | unsigned char *block_header; | |
895 | unsigned char *in_use; | |
896 | msize_t index; | |
897 | unsigned byte_index; | |
898 | msize_t clr_msize; | |
899 | unsigned end_bit; | |
900 | ||
901 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
902 | index = TINY_INDEX_FOR_PTR(ptr); | |
903 | byte_index = index >> 3; | |
904 | ||
9385eb3d | 905 | #if DEBUG_MALLOC |
59e0d9fe A |
906 | if (msize >= 32) |
907 | malloc_printf("set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr, msize); | |
908 | if ((unsigned)index + (unsigned)msize > 0x10000) | |
909 | malloc_printf("set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr, msize); | |
9385eb3d | 910 | #endif |
9385eb3d A |
911 | block_header += byte_index; |
912 | index &= 7; | |
913 | BITMAP32_SET(*block_header, index); | |
59e0d9fe | 914 | in_use = TINY_INUSE_FOR_HEADER(block_header); |
9385eb3d A |
915 | BITMAP32_SET(*in_use, index); |
916 | index++; | |
59e0d9fe | 917 | clr_msize = msize-1; |
9385eb3d A |
918 | if (clr_msize) { |
919 | byte_index = index >> 3; | |
920 | block_header += byte_index; in_use += byte_index; | |
921 | index &= 7; | |
59e0d9fe | 922 | end_bit = index + clr_msize; |
224c7076 A |
923 | bitarray_mclr(block_header, index, end_bit); |
924 | bitarray_mclr(in_use, index, end_bit); | |
9385eb3d A |
925 | } |
926 | BITARRAY_SET(block_header, index+clr_msize); // we set the block_header bit for the following block to reaffirm next block is a block | |
927 | #if DEBUG_MALLOC | |
59e0d9fe A |
928 | { |
929 | boolean_t ff; | |
930 | msize_t mf; | |
931 | ||
932 | mf = get_tiny_meta_header(ptr, &ff); | |
933 | if (msize != mf) { | |
934 | malloc_printf("setting header for tiny in_use %p : %d\n", ptr, msize); | |
935 | malloc_printf("reading header for tiny %p : %d %d\n", ptr, mf, ff); | |
936 | } | |
9385eb3d A |
937 | } |
938 | #endif | |
e9ce8d39 A |
939 | } |
940 | ||
9385eb3d | 941 | static INLINE void |
59e0d9fe A |
942 | set_tiny_meta_header_middle(const void *ptr) |
943 | { | |
9385eb3d | 944 | // indicates this block is in the middle of an in use block |
59e0d9fe A |
945 | unsigned char *block_header; |
946 | unsigned char *in_use; | |
947 | msize_t index; | |
948 | ||
949 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
950 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
951 | index = TINY_INDEX_FOR_PTR(ptr); | |
952 | ||
953 | BITARRAY_CLR(block_header, index); | |
954 | BITARRAY_CLR(in_use, index); | |
9385eb3d A |
955 | } |
956 | ||
957 | static INLINE void | |
59e0d9fe A |
958 | set_tiny_meta_header_free(const void *ptr, msize_t msize) |
959 | { | |
9385eb3d | 960 | // !msize is acceptable and means 65536 |
59e0d9fe A |
961 | unsigned char *block_header; |
962 | unsigned char *in_use; | |
963 | msize_t index; | |
964 | ||
965 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
966 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
967 | index = TINY_INDEX_FOR_PTR(ptr); | |
968 | ||
9385eb3d A |
969 | #if DEBUG_MALLOC |
970 | if ((unsigned)index + (unsigned)msize > 0x10000) { | |
59e0d9fe | 971 | malloc_printf("setting header for tiny free %p msize too large: %d\n", ptr, msize); |
e9ce8d39 | 972 | } |
9385eb3d | 973 | #endif |
224c7076 A |
974 | BITARRAY_SET(block_header, index); |
975 | BITARRAY_CLR(in_use, index); | |
976 | // mark the end of this block if msize is > 1. For msize == 0, the whole | |
977 | // region is free, so there is no following block. For msize == 1, there is | |
978 | // no space to write the size on 64 bit systems. The size for 1 quantum | |
979 | // blocks is computed from the metadata bitmaps. | |
980 | if (msize > 1) { | |
981 | void *follower = FOLLOWING_TINY_PTR(ptr, msize); | |
982 | TINY_PREVIOUS_MSIZE(follower) = msize; | |
983 | TINY_FREE_SIZE(ptr) = msize; | |
984 | } | |
985 | if (msize == 0) { | |
986 | TINY_FREE_SIZE(ptr) = msize; | |
9385eb3d A |
987 | } |
988 | #if DEBUG_MALLOC | |
989 | boolean_t ff; | |
990 | msize_t mf = get_tiny_meta_header(ptr, &ff); | |
991 | if ((msize != mf) || !ff) { | |
224c7076 A |
992 | malloc_printf("setting header for tiny free %p : %u\n", ptr, msize); |
993 | malloc_printf("reading header for tiny %p : %u %u\n", ptr, mf, ff); | |
9385eb3d A |
994 | } |
995 | #endif | |
e9ce8d39 A |
996 | } |
997 | ||
9385eb3d | 998 | static INLINE boolean_t |
59e0d9fe A |
999 | tiny_meta_header_is_free(const void *ptr) |
1000 | { | |
59e0d9fe A |
1001 | unsigned char *block_header; |
1002 | unsigned char *in_use; | |
1003 | msize_t index; | |
1004 | ||
1005 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
1006 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
1007 | index = TINY_INDEX_FOR_PTR(ptr); | |
1008 | if (!BITARRAY_BIT(block_header, index)) | |
1009 | return 0; | |
9385eb3d A |
1010 | return !BITARRAY_BIT(in_use, index); |
1011 | } | |
1012 | ||
1013 | static INLINE void * | |
59e0d9fe A |
1014 | tiny_previous_preceding_free(void *ptr, msize_t *prev_msize) |
1015 | { | |
9385eb3d | 1016 | // returns the previous block, assuming and verifying it's free |
59e0d9fe A |
1017 | unsigned char *block_header; |
1018 | unsigned char *in_use; | |
1019 | msize_t index; | |
1020 | msize_t previous_msize; | |
1021 | msize_t previous_index; | |
1022 | void *previous_ptr; | |
1023 | ||
1024 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
1025 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
1026 | index = TINY_INDEX_FOR_PTR(ptr); | |
1027 | ||
1028 | if (!index) | |
224c7076 A |
1029 | return NULL; |
1030 | if ((previous_msize = get_tiny_previous_free_msize(ptr)) > index) | |
1031 | return NULL; | |
59e0d9fe A |
1032 | |
1033 | previous_index = index - previous_msize; | |
1034 | previous_ptr = (void *)(TINY_REGION_FOR_PTR(ptr) + TINY_BYTES_FOR_MSIZE(previous_index)); | |
59e0d9fe | 1035 | if (!BITARRAY_BIT(block_header, previous_index)) |
224c7076 | 1036 | return NULL; |
59e0d9fe | 1037 | if (BITARRAY_BIT(in_use, previous_index)) |
224c7076 A |
1038 | return NULL; |
1039 | if (get_tiny_free_size(previous_ptr) != previous_msize) | |
1040 | return NULL; | |
59e0d9fe | 1041 | |
9385eb3d A |
1042 | // conservative check did match true check |
1043 | *prev_msize = previous_msize; | |
9385eb3d | 1044 | return previous_ptr; |
e9ce8d39 A |
1045 | } |
1046 | ||
224c7076 A |
1047 | /* |
1048 | * Adds an item to the proper free list, and also marks the meta-header of the | |
1049 | * block properly. | |
1050 | * Assumes szone has been locked | |
1051 | */ | |
1052 | static void | |
59e0d9fe A |
1053 | tiny_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize) |
1054 | { | |
9385eb3d | 1055 | grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1; |
e9ce8d39 | 1056 | free_list_t *free_ptr = ptr; |
9385eb3d | 1057 | free_list_t *free_head = szone->tiny_free_list[slot]; |
59e0d9fe | 1058 | |
e9ce8d39 | 1059 | #if DEBUG_MALLOC |
9385eb3d | 1060 | if (LOG(szone,ptr)) { |
224c7076 | 1061 | malloc_printf("in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); |
9385eb3d | 1062 | } |
224c7076 A |
1063 | if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) { |
1064 | szone_error(szone, "tiny_free_list_add_ptr: Unaligned ptr", ptr, NULL); | |
e9ce8d39 A |
1065 | } |
1066 | #endif | |
9385eb3d | 1067 | set_tiny_meta_header_free(ptr, msize); |
e9ce8d39 | 1068 | if (free_head) { |
224c7076 | 1069 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); |
e9ce8d39 | 1070 | #if DEBUG_MALLOC |
224c7076 A |
1071 | if (free_list_unchecksum_ptr(free_head->previous)) { |
1072 | szone_error(szone, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr, | |
1073 | "ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, free_head, free_head->previous.p); | |
1074 | } | |
1075 | if (! tiny_meta_header_is_free(free_head)) { | |
1076 | szone_error(szone, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr, | |
1077 | "ptr=%p slot=%d free_head=%p\n", ptr, slot, free_head); | |
1078 | } | |
9385eb3d | 1079 | #endif |
224c7076 | 1080 | free_head->previous.u = free_list_checksum_ptr(free_ptr); |
9385eb3d | 1081 | } else { |
224c7076 | 1082 | BITMAP32_SET(szone->tiny_bitmap, slot); |
e9ce8d39 | 1083 | } |
224c7076 A |
1084 | free_ptr->previous.p = NULL; |
1085 | free_ptr->next.p = free_head; | |
e9ce8d39 | 1086 | free_list_set_checksum(szone, free_ptr); |
9385eb3d | 1087 | szone->tiny_free_list[slot] = free_ptr; |
e9ce8d39 A |
1088 | } |
1089 | ||
224c7076 A |
1090 | /* |
1091 | * Removes the item pointed to by ptr in the proper free list. | |
1092 | * Assumes szone has been locked | |
1093 | */ | |
9385eb3d | 1094 | static INLINE void |
59e0d9fe A |
1095 | tiny_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize) |
1096 | { | |
9385eb3d | 1097 | grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1; |
224c7076 A |
1098 | free_list_t *free_ptr = ptr, *next, *previous; |
1099 | free_list_checksum(szone, free_ptr, __PRETTY_FUNCTION__); | |
1100 | ||
1101 | next = free_list_unchecksum_ptr(free_ptr->next); | |
1102 | previous = free_list_unchecksum_ptr(free_ptr->previous); | |
59e0d9fe | 1103 | |
e9ce8d39 | 1104 | #if DEBUG_MALLOC |
9385eb3d | 1105 | if (LOG(szone,ptr)) { |
224c7076 | 1106 | malloc_printf("In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); |
9385eb3d | 1107 | } |
e9ce8d39 | 1108 | #endif |
224c7076 A |
1109 | if (!previous) { |
1110 | // The block to remove is the head of the free list | |
e9ce8d39 | 1111 | #if DEBUG_MALLOC |
224c7076 A |
1112 | if (szone->tiny_free_list[slot] != ptr) { |
1113 | szone_error(szone, "tiny_free_list_remove_ptr: Internal invariant broken (szone->tiny_free_list[slot])", ptr, | |
1114 | "ptr=%p slot=%d msize=%d szone->tiny_free_list[slot]=%p\n", | |
1115 | ptr, slot, msize, szone->tiny_free_list[slot]); | |
1116 | return; | |
1117 | } | |
e9ce8d39 | 1118 | #endif |
224c7076 A |
1119 | szone->tiny_free_list[slot] = next; |
1120 | if (!next) BITMAP32_CLR(szone->tiny_bitmap, slot); | |
e9ce8d39 | 1121 | } else { |
224c7076 A |
1122 | // We know free_ptr is already checksummed, so we don't need to do it |
1123 | // again. | |
1124 | previous->next = free_ptr->next; | |
e9ce8d39 A |
1125 | } |
1126 | if (next) { | |
224c7076 A |
1127 | // We know free_ptr is already checksummed, so we don't need to do it |
1128 | // again. | |
1129 | next->previous = free_ptr->previous; | |
e9ce8d39 | 1130 | } |
e9ce8d39 A |
1131 | } |
1132 | ||
59e0d9fe | 1133 | /* |
224c7076 A |
1134 | * tiny_region_for_ptr_no_lock - Returns the tiny region containing the pointer, |
1135 | * or NULL if not found. | |
59e0d9fe | 1136 | */ |
224c7076 | 1137 | static INLINE region_t * |
59e0d9fe A |
1138 | tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr) |
1139 | { | |
224c7076 A |
1140 | return hash_lookup_region_no_lock(szone->tiny_regions, |
1141 | szone->num_tiny_regions_allocated, | |
1142 | TINY_REGION_FOR_PTR(ptr)); | |
9385eb3d A |
1143 | } |
1144 | ||
1145 | static INLINE void | |
224c7076 | 1146 | tiny_free_no_lock(szone_t *szone, region_t *region, void *ptr, msize_t msize) |
59e0d9fe A |
1147 | { |
1148 | size_t original_size = TINY_BYTES_FOR_MSIZE(msize); | |
9385eb3d | 1149 | void *next_block = ((char *)ptr + original_size); |
59e0d9fe A |
1150 | msize_t previous_msize; |
1151 | void *previous; | |
1152 | msize_t next_msize; | |
1153 | free_list_t *big_free_block; | |
1154 | free_list_t *after_next_block; | |
1155 | free_list_t *before_next_block; | |
1156 | ||
9385eb3d A |
1157 | #if DEBUG_MALLOC |
1158 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1159 | malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1160 | } |
1161 | if (! msize) { | |
224c7076 A |
1162 | szone_error(szone, "trying to free tiny block that is too small", ptr, |
1163 | "in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); | |
9385eb3d A |
1164 | } |
1165 | #endif | |
1166 | // We try to coalesce this block with the preceeding one | |
59e0d9fe | 1167 | previous = tiny_previous_preceding_free(ptr, &previous_msize); |
9385eb3d A |
1168 | if (previous) { |
1169 | #if DEBUG_MALLOC | |
224c7076 A |
1170 | if (LOG(szone, ptr) || LOG(szone,previous)) { |
1171 | malloc_printf("in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous); | |
1172 | } | |
9385eb3d | 1173 | #endif |
224c7076 A |
1174 | // clear the meta_header since this is no longer the start of a block |
1175 | set_tiny_meta_header_middle(ptr); | |
1176 | tiny_free_list_remove_ptr(szone, previous, previous_msize); | |
1177 | ptr = previous; | |
1178 | msize += previous_msize; | |
9385eb3d A |
1179 | } |
1180 | // We try to coalesce with the next block | |
59e0d9fe | 1181 | if ((next_block < TINY_REGION_END(*region)) && tiny_meta_header_is_free(next_block)) { |
224c7076 | 1182 | next_msize = get_tiny_free_size(next_block); |
9385eb3d | 1183 | #if DEBUG_MALLOC |
224c7076 A |
1184 | if (LOG(szone, ptr) || LOG(szone, next_block)) { |
1185 | malloc_printf("in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n", | |
1186 | ptr, msize, next_block, next_msize); | |
1187 | } | |
9385eb3d | 1188 | #endif |
224c7076 A |
1189 | // If we are coalescing with the next block, and the next block is in |
1190 | // the last slot of the free list, then we optimize this case here to | |
1191 | // avoid removing next_block from the slot 31 and then adding ptr back | |
1192 | // to slot 31. | |
1193 | if (next_msize >= NUM_TINY_SLOTS) { | |
1194 | msize += next_msize; | |
1195 | big_free_block = (free_list_t *)next_block; | |
1196 | free_list_checksum(szone, big_free_block, __PRETTY_FUNCTION__); | |
1197 | after_next_block = free_list_unchecksum_ptr(big_free_block->next); | |
1198 | before_next_block = free_list_unchecksum_ptr(big_free_block->previous); | |
1199 | if (!before_next_block) { | |
1200 | szone->tiny_free_list[NUM_TINY_SLOTS-1] = ptr; | |
1201 | } else { | |
1202 | before_next_block->next.u = free_list_checksum_ptr(ptr); | |
1203 | } | |
1204 | if (after_next_block) { | |
1205 | after_next_block->previous.u = free_list_checksum_ptr(ptr); | |
1206 | } | |
1207 | // we don't need to checksum these since they are already checksummed | |
1208 | ((free_list_t *)ptr)->previous = big_free_block->previous; | |
1209 | ((free_list_t *)ptr)->next = big_free_block->next; | |
1210 | ||
1211 | // clear the meta_header to enable coalescing backwards | |
1212 | set_tiny_meta_header_middle(big_free_block); | |
1213 | set_tiny_meta_header_free(ptr, msize); | |
1214 | goto tiny_free_ending; | |
1215 | } | |
1216 | tiny_free_list_remove_ptr(szone, next_block, next_msize); | |
1217 | set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards | |
1218 | msize += next_msize; | |
1219 | } | |
1220 | #if !TINY_CACHE | |
1221 | // The tiny cache already scribbles free blocks as they go through the | |
1222 | // cache, so we do not need to do it here. | |
9385eb3d | 1223 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize) { |
59e0d9fe | 1224 | memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize)); |
9385eb3d | 1225 | } |
224c7076 | 1226 | #endif |
9385eb3d A |
1227 | tiny_free_list_add_ptr(szone, ptr, msize); |
1228 | tiny_free_ending: | |
1229 | // When in proper debug mode we write on the memory to help debug memory smashers | |
1230 | szone->num_tiny_objects--; | |
1231 | szone->num_bytes_in_tiny_objects -= original_size; // we use original_size and not msize to avoid double counting the coalesced blocks | |
1232 | } | |
1233 | ||
224c7076 | 1234 | // Allocates from the last region or a freshly allocated region |
9385eb3d | 1235 | static void * |
59e0d9fe A |
1236 | tiny_malloc_from_region_no_lock(szone_t *szone, msize_t msize) |
1237 | { | |
224c7076 A |
1238 | void *last_block, *ptr, *aligned_address; |
1239 | unsigned char *last_header; | |
1240 | msize_t last_msize, last_index; | |
1241 | ||
1242 | // Before anything we transform any remaining tiny_bytes_free_at_end into a | |
1243 | // regular free block. We take special care here to update the bitfield | |
1244 | // information, since we are bypassing the normal free codepath. If there | |
1245 | // is more than one quanta worth of memory in tiny_bytes_free_at_end, then | |
1246 | // there will be two block headers: | |
1247 | // 1) header for the free space at end, msize = 1 | |
1248 | // 2) header inserted by set_tiny_meta_header_in_use after block | |
1249 | // We must clear the second one so that when the free block's size is | |
1250 | // queried, we do not think the block is only 1 quantum in size because | |
1251 | // of the second set header bit. | |
9385eb3d | 1252 | if (szone->tiny_bytes_free_at_end) { |
224c7076 A |
1253 | last_block = TINY_REGION_END(szone->last_tiny_region) - szone->tiny_bytes_free_at_end; |
1254 | last_msize = TINY_MSIZE_FOR_BYTES(szone->tiny_bytes_free_at_end); | |
1255 | last_header = TINY_BLOCK_HEADER_FOR_PTR(last_block); | |
1256 | last_index = TINY_INDEX_FOR_PTR(last_block); | |
1257 | ||
1258 | if (last_index != (NUM_TINY_BLOCKS - 1)) | |
1259 | BITARRAY_CLR(last_header, last_index + 1); | |
1260 | ||
1261 | tiny_free_list_add_ptr(szone, last_block, last_msize); | |
1262 | szone->tiny_bytes_free_at_end = 0; | |
9385eb3d | 1263 | } |
9385eb3d | 1264 | // time to create a new region |
224c7076 A |
1265 | aligned_address = allocate_pages(szone, TINY_REGION_SIZE, TINY_BLOCKS_ALIGN, 0, VM_MEMORY_MALLOC_TINY); |
1266 | if (!aligned_address) // out of memory! | |
1267 | return NULL; | |
9385eb3d | 1268 | // We set the padding after block_header to be all 1 |
224c7076 A |
1269 | ((uint32_t *)(aligned_address + TINY_HEADER_START + (NUM_TINY_BLOCKS >> 3)))[0] = ~0; |
1270 | ||
1271 | // Check to see if the hash ring of tiny regions needs to grow. Try to | |
1272 | // avoid the hash ring becoming too dense. | |
1273 | if (szone->num_tiny_regions_allocated < (2 * szone->num_tiny_regions)) { | |
1274 | region_t *new_regions; | |
1275 | size_t new_size; | |
1276 | new_regions = hash_regions_grow_no_lock(szone, szone->tiny_regions, | |
1277 | szone->num_tiny_regions_allocated, | |
1278 | &new_size); | |
1279 | // Do not deallocate the current tiny_regions allocation since someone may | |
1280 | // be iterating it. Instead, just leak it. | |
1281 | szone->tiny_regions = new_regions; | |
1282 | szone->num_tiny_regions_allocated = new_size; | |
1283 | } | |
1284 | // Insert the new region into the hash ring, and update malloc statistics | |
1285 | hash_region_insert_no_lock(szone->tiny_regions, | |
1286 | szone->num_tiny_regions_allocated, | |
1287 | aligned_address); | |
1288 | szone->last_tiny_region = aligned_address; | |
1289 | ||
1290 | szone->num_tiny_regions++; | |
59e0d9fe | 1291 | ptr = aligned_address; |
9385eb3d A |
1292 | set_tiny_meta_header_in_use(ptr, msize); |
1293 | szone->num_tiny_objects++; | |
59e0d9fe | 1294 | szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(msize); |
224c7076 | 1295 | |
9385eb3d | 1296 | // We put a header on the last block so that it appears in use (for coalescing, etc...) |
59e0d9fe A |
1297 | set_tiny_meta_header_in_use(ptr + TINY_BYTES_FOR_MSIZE(msize), 1); |
1298 | szone->tiny_bytes_free_at_end = TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS - msize); | |
9385eb3d A |
1299 | #if DEBUG_MALLOC |
1300 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1301 | malloc_printf("in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1302 | } |
1303 | #endif | |
1304 | return ptr; | |
1305 | } | |
1306 | ||
1307 | static INLINE boolean_t | |
59e0d9fe A |
1308 | try_realloc_tiny_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) |
1309 | { | |
9385eb3d | 1310 | // returns 1 on success |
59e0d9fe A |
1311 | msize_t index; |
1312 | msize_t old_msize; | |
1313 | unsigned next_index; | |
1314 | void *next_block; | |
1315 | boolean_t is_free; | |
1316 | msize_t next_msize, coalesced_msize, leftover_msize; | |
1317 | void *leftover; | |
1318 | ||
1319 | index = TINY_INDEX_FOR_PTR(ptr); | |
1320 | old_msize = TINY_MSIZE_FOR_BYTES(old_size); | |
1321 | next_index = index + old_msize; | |
1322 | ||
9385eb3d | 1323 | if (next_index >= NUM_TINY_BLOCKS) { |
9385eb3d A |
1324 | return 0; |
1325 | } | |
59e0d9fe | 1326 | next_block = (char *)ptr + old_size; |
9385eb3d | 1327 | SZONE_LOCK(szone); |
59e0d9fe | 1328 | is_free = tiny_meta_header_is_free(next_block); |
9385eb3d A |
1329 | if (!is_free) { |
1330 | SZONE_UNLOCK(szone); | |
1331 | return 0; // next_block is in use; | |
1332 | } | |
224c7076 | 1333 | next_msize = get_tiny_free_size(next_block); |
59e0d9fe | 1334 | if (old_size + TINY_MSIZE_FOR_BYTES(next_msize) < new_size) { |
9385eb3d A |
1335 | SZONE_UNLOCK(szone); |
1336 | return 0; // even with next block, not enough | |
1337 | } | |
1338 | tiny_free_list_remove_ptr(szone, next_block, next_msize); | |
1339 | set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards | |
59e0d9fe A |
1340 | coalesced_msize = TINY_MSIZE_FOR_BYTES(new_size - old_size + TINY_QUANTUM - 1); |
1341 | leftover_msize = next_msize - coalesced_msize; | |
9385eb3d | 1342 | if (leftover_msize) { |
59e0d9fe | 1343 | leftover = next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize); |
9385eb3d A |
1344 | tiny_free_list_add_ptr(szone, leftover, leftover_msize); |
1345 | } | |
1346 | set_tiny_meta_header_in_use(ptr, old_msize + coalesced_msize); | |
1347 | #if DEBUG_MALLOC | |
1348 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1349 | malloc_printf("in try_realloc_tiny_in_place(), ptr=%p, msize=%d\n", ptr, old_msize + coalesced_msize); |
9385eb3d A |
1350 | } |
1351 | #endif | |
59e0d9fe | 1352 | szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(coalesced_msize); |
9385eb3d A |
1353 | SZONE_UNLOCK(szone); |
1354 | CHECK(szone, __PRETTY_FUNCTION__); | |
9385eb3d A |
1355 | return 1; |
1356 | } | |
1357 | ||
1358 | static boolean_t | |
224c7076 | 1359 | tiny_check_region(szone_t *szone, region_t region) |
59e0d9fe | 1360 | { |
224c7076 A |
1361 | uintptr_t start, ptr, region_end; |
1362 | boolean_t prev_free = 0; | |
1363 | boolean_t is_free; | |
1364 | msize_t msize; | |
1365 | free_list_t *free_head; | |
1366 | void *follower, *previous, *next; | |
59e0d9fe A |
1367 | |
1368 | /* establish region limits */ | |
224c7076 | 1369 | start = (uintptr_t)TINY_REGION_ADDRESS(region); |
59e0d9fe | 1370 | ptr = start; |
224c7076 | 1371 | region_end = (uintptr_t)TINY_REGION_END(region); |
59e0d9fe A |
1372 | |
1373 | /* | |
1374 | * The last region may have a trailing chunk which has not been converted into inuse/freelist | |
1375 | * blocks yet. | |
1376 | */ | |
224c7076 | 1377 | if (region == szone->last_tiny_region) |
59e0d9fe A |
1378 | region_end -= szone->tiny_bytes_free_at_end; |
1379 | ||
1380 | ||
1381 | /* | |
1382 | * Scan blocks within the region. | |
1383 | */ | |
1384 | while (ptr < region_end) { | |
1385 | /* | |
1386 | * If the first block is free, and its size is 65536 (msize = 0) then the entire region is | |
1387 | * free. | |
1388 | */ | |
1389 | msize = get_tiny_meta_header((void *)ptr, &is_free); | |
1390 | if (is_free && !msize && (ptr == start)) { | |
9385eb3d A |
1391 | return 1; |
1392 | } | |
59e0d9fe A |
1393 | |
1394 | /* | |
1395 | * If the block's size is 65536 (msize = 0) then since we're not the first entry the size is | |
1396 | * corrupt. | |
1397 | */ | |
1398 | if (!msize) { | |
1399 | malloc_printf("*** invariant broken for tiny block %p this msize=%d - size is too small\n", | |
1400 | ptr, msize); | |
9385eb3d A |
1401 | return 0; |
1402 | } | |
59e0d9fe | 1403 | |
9385eb3d | 1404 | if (!is_free) { |
59e0d9fe A |
1405 | /* |
1406 | * In use blocks cannot be more than 31 quanta large. | |
1407 | */ | |
9385eb3d | 1408 | prev_free = 0; |
59e0d9fe A |
1409 | if (msize > 31 * TINY_QUANTUM) { |
1410 | malloc_printf("*** invariant broken for %p this tiny msize=%d[%p] - size is too large\n", | |
1411 | ptr, msize, msize); | |
9385eb3d A |
1412 | return 0; |
1413 | } | |
59e0d9fe A |
1414 | /* move to next block */ |
1415 | ptr += TINY_BYTES_FOR_MSIZE(msize); | |
9385eb3d | 1416 | } else { |
59e0d9fe A |
1417 | /* |
1418 | * Free blocks must have been coalesced, we cannot have a free block following another | |
1419 | * free block. | |
1420 | */ | |
9385eb3d | 1421 | if (prev_free) { |
59e0d9fe A |
1422 | malloc_printf("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n", |
1423 | ptr, msize); | |
9385eb3d A |
1424 | return 0; |
1425 | } | |
1426 | prev_free = 1; | |
59e0d9fe A |
1427 | /* |
1428 | * Check the integrity of this block's entry in its freelist. | |
1429 | */ | |
1430 | free_head = (free_list_t *)ptr; | |
9385eb3d | 1431 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); |
224c7076 A |
1432 | previous = free_list_unchecksum_ptr(free_head->previous); |
1433 | next = free_list_unchecksum_ptr(free_head->next); | |
1434 | if (previous && !tiny_meta_header_is_free(previous)) { | |
59e0d9fe | 1435 | malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n", |
224c7076 | 1436 | ptr, previous); |
9385eb3d A |
1437 | return 0; |
1438 | } | |
224c7076 | 1439 | if (next && !tiny_meta_header_is_free(next)) { |
59e0d9fe | 1440 | malloc_printf("*** invariant broken for %p (next in free list %p is not a free pointer)\n", |
224c7076 | 1441 | ptr, next); |
9385eb3d A |
1442 | return 0; |
1443 | } | |
59e0d9fe A |
1444 | /* |
1445 | * Check the free block's trailing size value. | |
1446 | */ | |
224c7076 A |
1447 | follower = FOLLOWING_TINY_PTR(ptr, msize); |
1448 | if (((uintptr_t)follower != region_end) && (get_tiny_previous_free_msize(follower) != msize)) { | |
59e0d9fe A |
1449 | malloc_printf("*** invariant broken for tiny free %p followed by %p in region [%p-%p] " |
1450 | "(end marker incorrect) should be %d; in fact %d\n", | |
224c7076 | 1451 | ptr, follower, TINY_REGION_ADDRESS(region), region_end, msize, get_tiny_previous_free_msize(follower)); |
9385eb3d A |
1452 | return 0; |
1453 | } | |
59e0d9fe | 1454 | /* move to next block */ |
224c7076 | 1455 | ptr = (uintptr_t)follower; |
9385eb3d A |
1456 | } |
1457 | } | |
59e0d9fe A |
1458 | /* |
1459 | * Ensure that we scanned the entire region | |
1460 | */ | |
1461 | if (ptr != region_end) { | |
1462 | malloc_printf("*** invariant broken for region end %p - %p\n", ptr, region_end); | |
9385eb3d A |
1463 | return 0; |
1464 | } | |
59e0d9fe A |
1465 | /* |
1466 | * Check the trailing block's integrity. | |
1467 | */ | |
224c7076 | 1468 | if (region == szone->last_tiny_region) { |
9385eb3d | 1469 | if (szone->tiny_bytes_free_at_end) { |
59e0d9fe | 1470 | msize = get_tiny_meta_header((void *)ptr, &is_free); |
9385eb3d | 1471 | if (is_free || (msize != 1)) { |
59e0d9fe | 1472 | malloc_printf("*** invariant broken for blocker block %p - %d %d\n", ptr, msize, is_free); |
9385eb3d A |
1473 | } |
1474 | } | |
1475 | } | |
1476 | return 1; | |
1477 | } | |
1478 | ||
1479 | static kern_return_t | |
224c7076 | 1480 | tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, memory_reader_t reader, vm_range_recorder_t recorder) |
59e0d9fe | 1481 | { |
224c7076 A |
1482 | size_t num_regions = szone->num_tiny_regions_allocated; |
1483 | void *last_tiny_free = szone->last_tiny_free; | |
1484 | size_t index; | |
1485 | region_t *regions; | |
1486 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
1487 | unsigned count = 0; | |
1488 | kern_return_t err; | |
1489 | region_t region; | |
1490 | vm_range_t range; | |
1491 | vm_range_t admin_range; | |
1492 | vm_range_t ptr_range; | |
1493 | unsigned char *mapped_region; | |
1494 | unsigned char *block_header; | |
1495 | unsigned char *in_use; | |
1496 | unsigned block_index; | |
1497 | unsigned block_limit; | |
1498 | boolean_t is_free; | |
1499 | msize_t msize; | |
1500 | void *mapped_ptr; | |
1501 | unsigned bit; | |
1502 | vm_address_t last_tiny_free_ptr = 0; | |
1503 | msize_t last_tiny_free_msize = 0; | |
1504 | ||
1505 | if (last_tiny_free) { | |
1506 | last_tiny_free_ptr = (uintptr_t) last_tiny_free & ~(TINY_QUANTUM - 1); | |
1507 | last_tiny_free_msize = (uintptr_t) last_tiny_free & (TINY_QUANTUM - 1); | |
1508 | } | |
1509 | ||
1510 | err = reader(task, (vm_address_t)szone->tiny_regions, sizeof(region_t) * num_regions, (void **)®ions); | |
1511 | if (err) return err; | |
1512 | for (index = 0; index < num_regions; ++index) { | |
1513 | region = regions[index]; | |
1514 | if (region) { | |
1515 | range.address = (vm_address_t)TINY_REGION_ADDRESS(region); | |
1516 | range.size = (vm_size_t)TINY_REGION_SIZE; | |
1517 | if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { | |
1518 | admin_range.address = range.address + TINY_HEADER_START; | |
1519 | admin_range.size = TINY_HEADER_SIZE; | |
1520 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1); | |
1521 | } | |
1522 | if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { | |
1523 | ptr_range.address = range.address; | |
1524 | ptr_range.size = NUM_TINY_BLOCKS * TINY_QUANTUM; | |
1525 | recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); | |
1526 | } | |
1527 | if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { | |
1528 | err = reader(task, range.address, range.size, (void **)&mapped_region); | |
1529 | if (err) | |
1530 | return err; | |
1531 | ||
1532 | block_header = (unsigned char *)(mapped_region + TINY_HEADER_START); | |
1533 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
1534 | block_index = 0; | |
1535 | block_limit = NUM_TINY_BLOCKS; | |
1536 | if (region == szone->last_tiny_region) | |
1537 | block_limit -= TINY_MSIZE_FOR_BYTES(szone->tiny_bytes_free_at_end); | |
1538 | ||
1539 | while (block_index < block_limit) { | |
1540 | vm_size_t block_offset = TINY_BYTES_FOR_MSIZE(block_index); | |
1541 | is_free = !BITARRAY_BIT(in_use, block_index); | |
1542 | if (is_free) { | |
1543 | mapped_ptr = mapped_region + block_offset; | |
1544 | ||
1545 | // mapped_region, the address at which 'range' in 'task' has been | |
1546 | // mapped into our process, is not necessarily aligned to | |
1547 | // TINY_BLOCKS_ALIGN. | |
1548 | // | |
1549 | // Since the code in get_tiny_free_size() assumes the pointer came | |
1550 | // from a properly aligned tiny region, and mapped_region is not | |
1551 | // necessarily aligned, then do the size calculation directly. | |
1552 | // If the next bit is set in the header bitmap, then the size is one | |
1553 | // quantum. Otherwise, read the size field. | |
1554 | if (!BITARRAY_BIT(block_header, block_index+1)) | |
1555 | msize = TINY_FREE_SIZE(mapped_ptr); | |
1556 | else | |
1557 | msize = 1; | |
1558 | ||
1559 | if (!msize) | |
1560 | break; | |
1561 | } else if (range.address + block_offset != last_tiny_free_ptr) { | |
1562 | msize = 1; | |
1563 | bit = block_index + 1; | |
1564 | while (! BITARRAY_BIT(block_header, bit)) { | |
1565 | bit++; | |
1566 | msize ++; | |
1567 | } | |
1568 | buffer[count].address = range.address + block_offset; | |
1569 | buffer[count].size = TINY_BYTES_FOR_MSIZE(msize); | |
1570 | count++; | |
1571 | if (count >= MAX_RECORDER_BUFFER) { | |
1572 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
1573 | count = 0; | |
1574 | } | |
1575 | } else { | |
1576 | // Block is not free but it matches last_tiny_free_ptr so even | |
1577 | // though it is not marked free in the bitmap, we treat it as if | |
1578 | // it is and move on | |
1579 | msize = last_tiny_free_msize; | |
1580 | } | |
1581 | block_index += msize; | |
1582 | } | |
1583 | } | |
1584 | } | |
1585 | } | |
1586 | if (count) { | |
1587 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
1588 | } | |
1589 | return 0; | |
9385eb3d A |
1590 | } |
1591 | ||
224c7076 | 1592 | static void * |
59e0d9fe A |
1593 | tiny_malloc_from_free_list(szone_t *szone, msize_t msize) |
1594 | { | |
9385eb3d | 1595 | // Assumes we've locked the region |
59e0d9fe | 1596 | free_list_t *ptr; |
224c7076 A |
1597 | msize_t this_msize; |
1598 | grain_t slot = msize - 1; | |
59e0d9fe A |
1599 | free_list_t **free_list = szone->tiny_free_list; |
1600 | free_list_t **the_slot = free_list + slot; | |
1601 | free_list_t *next; | |
1602 | free_list_t **limit; | |
1603 | unsigned bitmap; | |
224c7076 | 1604 | msize_t leftover_msize; |
59e0d9fe A |
1605 | free_list_t *leftover_ptr; |
1606 | ||
224c7076 A |
1607 | // Assumes locked |
1608 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
1609 | ||
1610 | // Look for an exact match by checking the freelist for this msize. | |
1611 | // | |
9385eb3d A |
1612 | ptr = *the_slot; |
1613 | if (ptr) { | |
224c7076 A |
1614 | next = free_list_unchecksum_ptr(ptr->next); |
1615 | if (next) { | |
1616 | next->previous = ptr->previous; | |
1617 | } else { | |
1618 | BITMAP32_CLR(szone->tiny_bitmap, slot); | |
1619 | } | |
1620 | *the_slot = next; | |
1621 | this_msize = msize; | |
9385eb3d | 1622 | #if DEBUG_MALLOC |
224c7076 A |
1623 | if (LOG(szone, ptr)) { |
1624 | malloc_printf("in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr, this_msize); | |
1625 | } | |
9385eb3d | 1626 | #endif |
224c7076 | 1627 | goto return_tiny_alloc; |
9385eb3d | 1628 | } |
59e0d9fe | 1629 | |
224c7076 A |
1630 | // Mask off the bits representing slots holding free blocks smaller than the |
1631 | // size we need. If there are no larger free blocks, try allocating from | |
1632 | // the free space at the end of the tiny region. | |
59e0d9fe A |
1633 | bitmap = szone->tiny_bitmap & ~ ((1 << slot) - 1); |
1634 | if (!bitmap) | |
224c7076 A |
1635 | goto try_tiny_malloc_from_end; |
1636 | ||
1637 | slot = BITMAP32_CTZ(bitmap); | |
59e0d9fe | 1638 | limit = free_list + NUM_TINY_SLOTS - 1; |
9385eb3d | 1639 | free_list += slot; |
224c7076 A |
1640 | |
1641 | // Iterate over freelists looking for free blocks, starting at first list | |
1642 | // which is not empty, and contains blocks which are large enough to satisfy | |
1643 | // our request. | |
9385eb3d | 1644 | while (free_list < limit) { |
224c7076 A |
1645 | ptr = *free_list; |
1646 | if (ptr) { | |
1647 | next = free_list_unchecksum_ptr(ptr->next); | |
1648 | *free_list = next; | |
1649 | this_msize = get_tiny_free_size(ptr); | |
1650 | if (next) { | |
1651 | next->previous = ptr->previous; | |
1652 | } else { | |
1653 | BITMAP32_CLR(szone->tiny_bitmap, this_msize - 1); | |
1654 | } | |
1655 | goto add_leftover_and_proceed; | |
1656 | } | |
1657 | free_list++; | |
1658 | } | |
1659 | ||
1660 | // We are now looking at the last slot, which contains blocks equal to, or | |
1661 | // due to coalescing of free blocks, larger than 31 * tiny quantum size. | |
1662 | // If the last freelist is not empty, and the head contains a block that is | |
1663 | // larger than our request, then the remainder is put back on the free list. | |
9385eb3d A |
1664 | ptr = *limit; |
1665 | if (ptr) { | |
224c7076 A |
1666 | free_list_checksum(szone, ptr, __PRETTY_FUNCTION__); |
1667 | this_msize = get_tiny_free_size(ptr); | |
1668 | next = free_list_unchecksum_ptr(ptr->next); | |
1669 | if (this_msize - msize >= NUM_TINY_SLOTS) { | |
1670 | // the leftover will go back to the free list, so we optimize by | |
1671 | // modifying the free list rather than a pop and push of the head | |
1672 | leftover_msize = this_msize - msize; | |
1673 | leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); | |
1674 | *limit = leftover_ptr; | |
1675 | if (next) { | |
1676 | next->previous.u = free_list_checksum_ptr(leftover_ptr); | |
1677 | } | |
1678 | leftover_ptr->previous = ptr->previous; | |
1679 | leftover_ptr->next = ptr->next; | |
1680 | set_tiny_meta_header_free(leftover_ptr, leftover_msize); | |
9385eb3d | 1681 | #if DEBUG_MALLOC |
224c7076 A |
1682 | if (LOG(szone,ptr)) { |
1683 | malloc_printf("in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize); | |
1684 | } | |
9385eb3d | 1685 | #endif |
224c7076 A |
1686 | this_msize = msize; |
1687 | goto return_tiny_alloc; | |
1688 | } | |
1689 | if (next) { | |
1690 | next->previous = ptr->previous; | |
1691 | } | |
1692 | *limit = next; | |
1693 | goto add_leftover_and_proceed; | |
9385eb3d | 1694 | } |
224c7076 | 1695 | |
9385eb3d A |
1696 | try_tiny_malloc_from_end: |
1697 | // Let's see if we can use szone->tiny_bytes_free_at_end | |
59e0d9fe | 1698 | if (szone->tiny_bytes_free_at_end >= TINY_BYTES_FOR_MSIZE(msize)) { |
224c7076 A |
1699 | ptr = (free_list_t *)(TINY_REGION_END(szone->last_tiny_region) - szone->tiny_bytes_free_at_end); |
1700 | szone->tiny_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(msize); | |
1701 | if (szone->tiny_bytes_free_at_end) { | |
1702 | // let's add an in use block after ptr to serve as boundary | |
1703 | set_tiny_meta_header_in_use((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize), 1); | |
1704 | } | |
1705 | this_msize = msize; | |
9385eb3d | 1706 | #if DEBUG_MALLOC |
224c7076 A |
1707 | if (LOG(szone, ptr)) { |
1708 | malloc_printf("in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr, msize); | |
1709 | } | |
9385eb3d | 1710 | #endif |
224c7076 | 1711 | goto return_tiny_alloc; |
9385eb3d A |
1712 | } |
1713 | return NULL; | |
224c7076 | 1714 | |
9385eb3d | 1715 | add_leftover_and_proceed: |
9385eb3d | 1716 | if (!this_msize || (this_msize > msize)) { |
224c7076 A |
1717 | leftover_msize = this_msize - msize; |
1718 | leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); | |
9385eb3d | 1719 | #if DEBUG_MALLOC |
224c7076 A |
1720 | if (LOG(szone,ptr)) { |
1721 | malloc_printf("in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize); | |
1722 | } | |
9385eb3d | 1723 | #endif |
224c7076 A |
1724 | tiny_free_list_add_ptr(szone, leftover_ptr, leftover_msize); |
1725 | this_msize = msize; | |
9385eb3d | 1726 | } |
224c7076 | 1727 | |
9385eb3d A |
1728 | return_tiny_alloc: |
1729 | szone->num_tiny_objects++; | |
59e0d9fe | 1730 | szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(this_msize); |
9385eb3d A |
1731 | #if DEBUG_MALLOC |
1732 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1733 | malloc_printf("in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize); |
9385eb3d A |
1734 | } |
1735 | #endif | |
1736 | set_tiny_meta_header_in_use(ptr, this_msize); | |
1737 | return ptr; | |
1738 | } | |
1739 | ||
1740 | static INLINE void * | |
59e0d9fe A |
1741 | tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) |
1742 | { | |
9385eb3d A |
1743 | boolean_t locked = 0; |
1744 | void *ptr; | |
59e0d9fe | 1745 | |
9385eb3d | 1746 | #if DEBUG_MALLOC |
59e0d9fe | 1747 | if (!msize) { |
224c7076 | 1748 | szone_error(szone, "invariant broken (!msize) in allocation (region)", NULL, NULL); |
59e0d9fe | 1749 | return(NULL); |
9385eb3d A |
1750 | } |
1751 | #endif | |
1752 | #if TINY_CACHE | |
59e0d9fe A |
1753 | ptr = szone->last_tiny_free; |
1754 | if ((((uintptr_t)ptr) & (TINY_QUANTUM - 1)) == msize) { | |
9385eb3d A |
1755 | // we have a candidate - let's lock to make sure |
1756 | LOCK_AND_NOTE_LOCKED(szone, locked); | |
59e0d9fe | 1757 | if (ptr == szone->last_tiny_free) { |
9385eb3d | 1758 | szone->last_tiny_free = NULL; |
9385eb3d A |
1759 | SZONE_UNLOCK(szone); |
1760 | CHECK(szone, __PRETTY_FUNCTION__); | |
59e0d9fe | 1761 | ptr = (void *)((uintptr_t)ptr & ~ (TINY_QUANTUM - 1)); |
9385eb3d | 1762 | if (cleared_requested) { |
59e0d9fe | 1763 | memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
1764 | } |
1765 | #if DEBUG_MALLOC | |
1766 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1767 | malloc_printf("in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1768 | } |
1769 | #endif | |
1770 | return ptr; | |
1771 | } | |
9385eb3d A |
1772 | } |
1773 | #endif | |
1774 | // Except in rare occasions where we need to add a new region, we are going to end up locking, so we might as well lock right away to avoid doing unnecessary optimistic probes | |
1775 | if (!locked) LOCK_AND_NOTE_LOCKED(szone, locked); | |
1776 | ptr = tiny_malloc_from_free_list(szone, msize); | |
9385eb3d A |
1777 | if (ptr) { |
1778 | SZONE_UNLOCK(szone); | |
1779 | CHECK(szone, __PRETTY_FUNCTION__); | |
1780 | if (cleared_requested) { | |
59e0d9fe | 1781 | memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
1782 | } |
1783 | return ptr; | |
1784 | } | |
1785 | ptr = tiny_malloc_from_region_no_lock(szone, msize); | |
9385eb3d A |
1786 | // we don't clear because this freshly allocated space is pristine |
1787 | SZONE_UNLOCK(szone); | |
1788 | CHECK(szone, __PRETTY_FUNCTION__); | |
1789 | return ptr; | |
1790 | } | |
1791 | ||
1792 | static INLINE void | |
224c7076 | 1793 | free_tiny(szone_t *szone, void *ptr, region_t *tiny_region) |
59e0d9fe A |
1794 | { |
1795 | msize_t msize; | |
1796 | boolean_t is_free; | |
1797 | #if TINY_CACHE | |
1798 | void *ptr2; | |
1799 | #endif | |
1800 | ||
9385eb3d A |
1801 | // ptr is known to be in tiny_region |
1802 | SZONE_LOCK(szone); | |
1803 | #if TINY_CACHE | |
59e0d9fe A |
1804 | ptr2 = szone->last_tiny_free; |
1805 | /* check that we don't already have this pointer in the cache */ | |
1806 | if (ptr == (void *)((uintptr_t)ptr2 & ~ (TINY_QUANTUM - 1))) { | |
224c7076 | 1807 | szone_error(szone, "double free", ptr, NULL); |
9385eb3d A |
1808 | return; |
1809 | } | |
1810 | #endif /* TINY_CACHE */ | |
59e0d9fe | 1811 | msize = get_tiny_meta_header(ptr, &is_free); |
9385eb3d | 1812 | if (is_free) { |
224c7076 | 1813 | szone_error(szone, "double free", ptr, NULL); |
9385eb3d A |
1814 | return; |
1815 | } | |
9385eb3d A |
1816 | #if DEBUG_MALLOC |
1817 | if (!msize) { | |
1818 | malloc_printf("*** szone_free() block in use is too large: %p\n", ptr); | |
59e0d9fe | 1819 | return; |
9385eb3d A |
1820 | } |
1821 | #endif | |
1822 | #if TINY_CACHE | |
1823 | if (msize < TINY_QUANTUM) { // to see if the bits fit in the last 4 bits | |
224c7076 A |
1824 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize) |
1825 | memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize)); | |
1826 | szone->last_tiny_free = (void *)(((uintptr_t)ptr) | msize); | |
9385eb3d | 1827 | if (!ptr2) { |
9385eb3d A |
1828 | SZONE_UNLOCK(szone); |
1829 | CHECK(szone, __PRETTY_FUNCTION__); | |
1830 | return; | |
1831 | } | |
59e0d9fe A |
1832 | msize = (uintptr_t)ptr2 & (TINY_QUANTUM - 1); |
1833 | ptr = (void *)(((uintptr_t)ptr2) & ~(TINY_QUANTUM - 1)); | |
9385eb3d A |
1834 | tiny_region = tiny_region_for_ptr_no_lock(szone, ptr); |
1835 | if (!tiny_region) { | |
224c7076 | 1836 | szone_error(szone, "double free (tiny cache)", ptr, NULL); |
9385eb3d A |
1837 | } |
1838 | } | |
1839 | #endif | |
1840 | tiny_free_no_lock(szone, tiny_region, ptr, msize); | |
1841 | SZONE_UNLOCK(szone); | |
1842 | CHECK(szone, __PRETTY_FUNCTION__); | |
1843 | } | |
1844 | ||
1845 | static void | |
59e0d9fe A |
1846 | print_tiny_free_list(szone_t *szone) |
1847 | { | |
1848 | grain_t slot = 0; | |
1849 | free_list_t *ptr; | |
224c7076 A |
1850 | _SIMPLE_STRING b = _simple_salloc(); |
1851 | ||
1852 | if (b) { | |
1853 | _simple_sappend(b, "tiny free sizes: "); | |
1854 | while (slot < NUM_TINY_SLOTS) { | |
1855 | ptr = szone->tiny_free_list[slot]; | |
1856 | if (ptr) { | |
1857 | _simple_sprintf(b, "%s%y[%d]; ", (slot == NUM_TINY_SLOTS-1) ? ">=" : "", (slot+1)*TINY_QUANTUM, free_list_count(ptr)); | |
1858 | } | |
1859 | slot++; | |
9385eb3d | 1860 | } |
224c7076 A |
1861 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); |
1862 | _simple_sfree(b); | |
9385eb3d | 1863 | } |
9385eb3d A |
1864 | } |
1865 | ||
1866 | static void | |
224c7076 | 1867 | print_tiny_region(boolean_t verbose, region_t region, size_t bytes_at_end) |
59e0d9fe | 1868 | { |
9385eb3d A |
1869 | unsigned counts[1024]; |
1870 | unsigned in_use = 0; | |
59e0d9fe A |
1871 | uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(region); |
1872 | uintptr_t current = start; | |
1873 | uintptr_t limit = (uintptr_t)TINY_REGION_END(region) - bytes_at_end; | |
1874 | boolean_t is_free; | |
1875 | msize_t msize; | |
1876 | unsigned ci; | |
224c7076 | 1877 | _SIMPLE_STRING b; |
59e0d9fe | 1878 | |
9385eb3d A |
1879 | memset(counts, 0, 1024 * sizeof(unsigned)); |
1880 | while (current < limit) { | |
59e0d9fe | 1881 | msize = get_tiny_meta_header((void *)current, &is_free); |
9385eb3d A |
1882 | if (is_free & !msize && (current == start)) { |
1883 | // first block is all free | |
1884 | break; | |
1885 | } | |
1886 | if (!msize) { | |
59e0d9fe | 1887 | malloc_printf("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize); |
9385eb3d A |
1888 | break; |
1889 | } | |
59e0d9fe | 1890 | if (!is_free) { |
9385eb3d | 1891 | // block in use |
59e0d9fe A |
1892 | if (msize > 32) |
1893 | malloc_printf("*** error at %p msize for in_use is %d\n", (void *)current, msize); | |
1894 | if (msize < 1024) | |
1895 | counts[msize]++; | |
9385eb3d A |
1896 | in_use++; |
1897 | } | |
59e0d9fe | 1898 | current += TINY_BYTES_FOR_MSIZE(msize); |
9385eb3d | 1899 | } |
224c7076 A |
1900 | if ((b = _simple_salloc()) != NULL) { |
1901 | _simple_sprintf(b, "Tiny region [%p-%p, %y]\t", (void *)start, TINY_REGION_END(region), (int)TINY_REGION_SIZE); | |
1902 | _simple_sprintf(b, "In_use=%d ", in_use); | |
1903 | if (bytes_at_end) _simple_sprintf(b, "untouched=%ly", bytes_at_end); | |
1904 | if (verbose && in_use) { | |
1905 | _simple_sappend(b, "\n\tSizes in use: "); | |
1906 | for (ci = 0; ci < 1024; ci++) | |
1907 | if (counts[ci]) | |
1908 | _simple_sprintf(b, "%d[%d]", TINY_BYTES_FOR_MSIZE(ci), counts[ci]); | |
1909 | } | |
1910 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); | |
1911 | _simple_sfree(b); | |
9385eb3d | 1912 | } |
9385eb3d A |
1913 | } |
1914 | ||
1915 | static boolean_t | |
59e0d9fe A |
1916 | tiny_free_list_check(szone_t *szone, grain_t slot) |
1917 | { | |
e9ce8d39 | 1918 | unsigned count = 0; |
9385eb3d | 1919 | free_list_t *ptr = szone->tiny_free_list[slot]; |
e9ce8d39 | 1920 | free_list_t *previous = NULL; |
59e0d9fe A |
1921 | boolean_t is_free; |
1922 | ||
1923 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
e9ce8d39 | 1924 | while (ptr) { |
224c7076 A |
1925 | free_list_checksum(szone, ptr, __PRETTY_FUNCTION__); |
1926 | is_free = tiny_meta_header_is_free(ptr); | |
1927 | if (! is_free) { | |
1928 | malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
1929 | return 0; | |
1930 | } | |
1931 | if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) { | |
1932 | malloc_printf("*** unaligned ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
1933 | return 0; | |
1934 | } | |
1935 | if (!tiny_region_for_ptr_no_lock(szone, ptr)) { | |
1936 | malloc_printf("*** ptr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
1937 | return 0; | |
1938 | } | |
1939 | if (free_list_unchecksum_ptr(ptr->previous) != previous) { | |
1940 | malloc_printf("*** previous incorrectly set slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
1941 | return 0; | |
1942 | } | |
1943 | previous = ptr; | |
1944 | ptr = free_list_unchecksum_ptr(ptr->next); | |
1945 | count++; | |
e9ce8d39 A |
1946 | } |
1947 | return 1; | |
1948 | } | |
1949 | ||
9385eb3d A |
1950 | /********************* SMALL FREE LIST UTILITIES ************************/ |
1951 | ||
59e0d9fe A |
1952 | /* |
1953 | * Mark a block as free. Only the first quantum of a block is marked thusly, | |
1954 | * the remainder are marked "middle". | |
1955 | */ | |
9385eb3d | 1956 | static INLINE void |
59e0d9fe A |
1957 | small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize) |
1958 | { | |
9385eb3d A |
1959 | meta_headers[index] = msize | SMALL_IS_FREE; |
1960 | } | |
1961 | ||
59e0d9fe A |
1962 | /* |
1963 | * Mark a block as in use. Only the first quantum of a block is marked thusly, | |
1964 | * the remainder are marked "middle". | |
1965 | */ | |
9385eb3d | 1966 | static INLINE void |
59e0d9fe A |
1967 | small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize) |
1968 | { | |
9385eb3d A |
1969 | meta_headers[index] = msize; |
1970 | } | |
1971 | ||
59e0d9fe A |
1972 | /* |
1973 | * Mark a quantum as being the second or later in a block. | |
1974 | */ | |
9385eb3d | 1975 | static INLINE void |
59e0d9fe A |
1976 | small_meta_header_set_middle(msize_t *meta_headers, msize_t index) |
1977 | { | |
9385eb3d A |
1978 | meta_headers[index] = 0; |
1979 | } | |
e9ce8d39 | 1980 | |
59e0d9fe A |
1981 | // Adds an item to the proper free list |
1982 | // Also marks the header of the block properly | |
1983 | // Assumes szone has been locked | |
9385eb3d | 1984 | static void |
59e0d9fe A |
1985 | small_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize) |
1986 | { | |
224c7076 | 1987 | grain_t slot = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1; |
9385eb3d | 1988 | free_list_t *free_ptr = ptr; |
224c7076 | 1989 | free_list_t *free_head = szone->small_free_list[slot]; |
59e0d9fe A |
1990 | void *follower; |
1991 | ||
9385eb3d A |
1992 | #if DEBUG_MALLOC |
1993 | if (LOG(szone,ptr)) { | |
224c7076 | 1994 | malloc_printf("in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); |
9385eb3d | 1995 | } |
59e0d9fe | 1996 | if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) { |
224c7076 | 1997 | szone_error(szone, "small_free_list_add_ptr: Unaligned ptr", ptr, NULL); |
9385eb3d A |
1998 | } |
1999 | #endif | |
2000 | if (free_head) { | |
224c7076 | 2001 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); |
9385eb3d | 2002 | #if DEBUG_MALLOC |
224c7076 A |
2003 | if (free_list_unchecksum_ptr(free_head->previous)) { |
2004 | szone_error(szone, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr, | |
2005 | "ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, free_head, free_head->previous.p); | |
2006 | } | |
2007 | if (!SMALL_PTR_IS_FREE(free_head)) { | |
2008 | szone_error(szone, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr, | |
2009 | "ptr=%p slot=%d free_head=%p\n", ptr, slot, free_head); | |
2010 | } | |
9385eb3d | 2011 | #endif |
224c7076 | 2012 | free_head->previous.u = free_list_checksum_ptr(free_ptr); |
e9ce8d39 | 2013 | } else { |
224c7076 | 2014 | BITMAP32_SET(szone->small_bitmap, slot); |
9385eb3d | 2015 | } |
224c7076 A |
2016 | free_ptr->previous.p = NULL; |
2017 | free_ptr->next.p = free_head; | |
9385eb3d | 2018 | free_list_set_checksum(szone, free_ptr); |
224c7076 | 2019 | szone->small_free_list[slot] = free_ptr; |
59e0d9fe | 2020 | follower = ptr + SMALL_BYTES_FOR_MSIZE(msize); |
9385eb3d A |
2021 | SMALL_PREVIOUS_MSIZE(follower) = msize; |
2022 | } | |
2023 | ||
59e0d9fe A |
2024 | // Removes item in the proper free list |
2025 | // msize could be read, but all callers have it so we pass it in | |
2026 | // Assumes szone has been locked | |
9385eb3d | 2027 | static void |
59e0d9fe A |
2028 | small_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize) |
2029 | { | |
224c7076 A |
2030 | grain_t slot = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1; |
2031 | free_list_t *free_ptr = ptr, *next, *previous; | |
2032 | free_list_checksum(szone, free_ptr, __PRETTY_FUNCTION__); | |
2033 | ||
2034 | next = free_list_unchecksum_ptr(free_ptr->next); | |
2035 | previous = free_list_unchecksum_ptr(free_ptr->previous); | |
59e0d9fe | 2036 | |
e9ce8d39 | 2037 | #if DEBUG_MALLOC |
9385eb3d | 2038 | if (LOG(szone,ptr)) { |
224c7076 | 2039 | malloc_printf("In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); |
9385eb3d A |
2040 | } |
2041 | #endif | |
224c7076 A |
2042 | if (!previous) { |
2043 | // The block to remove is the head of the free list | |
e9ce8d39 | 2044 | #if DEBUG_MALLOC |
224c7076 A |
2045 | if (szone->small_free_list[slot] != ptr) { |
2046 | szone_error(szone, "small_free_list_remove_ptr: Internal invariant broken (szone->small_free_list[grain])", ptr, | |
2047 | "ptr=%p slot=%d msize=%d szone->small_free_list[slot]=%p\n", | |
2048 | ptr, slot, msize, szone->small_free_list[slot]); | |
2049 | return; | |
2050 | } | |
9385eb3d | 2051 | #endif |
224c7076 A |
2052 | szone->small_free_list[slot] = next; |
2053 | if (!next) BITMAP32_CLR(szone->small_bitmap, slot); | |
9385eb3d | 2054 | } else { |
224c7076 A |
2055 | // We know free_ptr is already checksummed, so we don't need to do it |
2056 | // again. | |
2057 | previous->next = free_ptr->next; | |
9385eb3d A |
2058 | } |
2059 | if (next) { | |
224c7076 A |
2060 | // We know free_ptr is already checksummed, so we don't need to do it |
2061 | // again. | |
2062 | next->previous = free_ptr->previous; | |
9385eb3d A |
2063 | } |
2064 | } | |
2065 | ||
224c7076 | 2066 | static INLINE region_t * |
59e0d9fe A |
2067 | small_region_for_ptr_no_lock(szone_t *szone, const void *ptr) |
2068 | { | |
224c7076 A |
2069 | return hash_lookup_region_no_lock(szone->small_regions, |
2070 | szone->num_small_regions_allocated, | |
2071 | SMALL_REGION_FOR_PTR(ptr)); | |
9385eb3d A |
2072 | } |
2073 | ||
2074 | static INLINE void | |
224c7076 | 2075 | small_free_no_lock(szone_t *szone, region_t *region, void *ptr, msize_t msize) |
59e0d9fe A |
2076 | { |
2077 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
2078 | unsigned index = SMALL_META_INDEX_FOR_PTR(ptr); | |
2079 | size_t original_size = SMALL_BYTES_FOR_MSIZE(msize); | |
2080 | unsigned char *next_block = ((unsigned char *)ptr + original_size); | |
2081 | msize_t next_index = index + msize; | |
2082 | msize_t previous_msize, next_msize; | |
2083 | void *previous; | |
2084 | ||
9385eb3d A |
2085 | // Assumes locked |
2086 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d A |
2087 | #if DEBUG_MALLOC |
2088 | if (LOG(szone,ptr)) { | |
59e0d9fe | 2089 | malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
2090 | } |
2091 | if (! msize) { | |
224c7076 A |
2092 | szone_error(szone, "trying to free small block that is too small", ptr, |
2093 | "in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); | |
9385eb3d | 2094 | } |
9385eb3d A |
2095 | #endif |
2096 | // We try to coalesce this block with the preceeding one | |
2097 | if (index && (SMALL_PREVIOUS_MSIZE(ptr) <= index)) { | |
59e0d9fe | 2098 | previous_msize = SMALL_PREVIOUS_MSIZE(ptr); |
9385eb3d | 2099 | if (meta_headers[index - previous_msize] == (previous_msize | SMALL_IS_FREE)) { |
59e0d9fe | 2100 | previous = ptr - SMALL_BYTES_FOR_MSIZE(previous_msize); |
9385eb3d | 2101 | // previous is really to be coalesced |
e9ce8d39 | 2102 | #if DEBUG_MALLOC |
9385eb3d | 2103 | if (LOG(szone, ptr) || LOG(szone,previous)) { |
59e0d9fe | 2104 | malloc_printf("in small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous); |
9385eb3d A |
2105 | } |
2106 | #endif | |
9385eb3d A |
2107 | small_free_list_remove_ptr(szone, previous, previous_msize); |
2108 | small_meta_header_set_middle(meta_headers, index); | |
2109 | ptr = previous; | |
2110 | msize += previous_msize; | |
2111 | index -= previous_msize; | |
e9ce8d39 | 2112 | } |
9385eb3d A |
2113 | } |
2114 | // We try to coalesce with the next block | |
59e0d9fe | 2115 | if ((next_block < SMALL_REGION_END(*region)) && (meta_headers[next_index] & SMALL_IS_FREE)) { |
9385eb3d | 2116 | // next block is free, we coalesce |
59e0d9fe | 2117 | next_msize = meta_headers[next_index] & ~ SMALL_IS_FREE; |
9385eb3d A |
2118 | #if DEBUG_MALLOC |
2119 | if (LOG(szone,ptr)) malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr, msize, next_block, next_msize); | |
2120 | #endif | |
9385eb3d A |
2121 | small_free_list_remove_ptr(szone, next_block, next_msize); |
2122 | small_meta_header_set_middle(meta_headers, next_index); | |
2123 | msize += next_msize; | |
e9ce8d39 A |
2124 | } |
2125 | if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) { | |
2126 | if (!msize) { | |
224c7076 | 2127 | szone_error(szone, "incorrect size information - block header was damaged", ptr, NULL); |
e9ce8d39 | 2128 | } else { |
59e0d9fe | 2129 | memset(ptr, 0x55, SMALL_BYTES_FOR_MSIZE(msize)); |
e9ce8d39 A |
2130 | } |
2131 | } | |
9385eb3d A |
2132 | small_free_list_add_ptr(szone, ptr, msize); |
2133 | small_meta_header_set_is_free(meta_headers, index, msize); | |
e9ce8d39 A |
2134 | szone->num_small_objects--; |
2135 | szone->num_bytes_in_small_objects -= original_size; // we use original_size and not msize to avoid double counting the coalesced blocks | |
2136 | } | |
2137 | ||
9385eb3d | 2138 | static void * |
59e0d9fe A |
2139 | small_malloc_from_region_no_lock(szone_t *szone, msize_t msize) |
2140 | { | |
59e0d9fe A |
2141 | void *last_block; |
2142 | void *ptr; | |
2143 | void *new_address; | |
2144 | msize_t *meta_headers; | |
2145 | msize_t index ; | |
59e0d9fe A |
2146 | msize_t msize_left; |
2147 | ||
e9ce8d39 | 2148 | // Allocates from the last region or a freshly allocated region |
9385eb3d A |
2149 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); |
2150 | // Before anything we transform the small_bytes_free_at_end - if any - to a regular free block | |
2151 | if (szone->small_bytes_free_at_end) { | |
224c7076 A |
2152 | last_block = (void *)(SMALL_REGION_END(szone->last_small_region) - szone->small_bytes_free_at_end); |
2153 | small_free_list_add_ptr(szone, last_block, SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end)); | |
2154 | *SMALL_METADATA_FOR_PTR(last_block) = SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end) | SMALL_IS_FREE; | |
2155 | szone->small_bytes_free_at_end = 0; | |
e9ce8d39 A |
2156 | } |
2157 | // time to create a new region | |
224c7076 A |
2158 | new_address = allocate_pages(szone, SMALL_REGION_SIZE, SMALL_BLOCKS_ALIGN, |
2159 | 0, VM_MEMORY_MALLOC_SMALL); | |
2160 | if (!new_address) | |
2161 | return NULL; | |
2162 | ||
59e0d9fe A |
2163 | ptr = new_address; |
2164 | meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
2165 | index = 0; | |
224c7076 A |
2166 | |
2167 | // Check to see if the hash ring of small regions needs to grow. Try to | |
2168 | // avoid the hash ring becoming too dense. | |
2169 | if (szone->num_small_regions_allocated < (2 * szone->num_small_regions)) { | |
2170 | region_t *new_regions; | |
2171 | size_t new_size; | |
2172 | new_regions = hash_regions_grow_no_lock(szone, szone->small_regions, | |
2173 | szone->num_small_regions_allocated, | |
2174 | &new_size); | |
2175 | // Do not deallocate the current small_regions allocation since someone | |
2176 | // may be iterating it. Instead, just leak it. | |
2177 | szone->small_regions = new_regions; | |
2178 | szone->num_small_regions_allocated = new_size; | |
2179 | } | |
2180 | // Insert the new region into the hash ring, and update malloc statistics | |
2181 | hash_region_insert_no_lock(szone->small_regions, | |
2182 | szone->num_small_regions_allocated, | |
2183 | new_address); | |
2184 | szone->last_small_region = new_address; | |
2185 | ||
2186 | // we bump the number of regions AFTER we have changes the regions pointer | |
2187 | // to enable finding a small region without taking the lock | |
2188 | // | |
2189 | // FIXME: naive assumption assumes memory ordering coherence between this | |
2190 | // and other CPUs. This also applies to the near-identical code in | |
2191 | // tiny_malloc_from_region_no_lock. | |
59e0d9fe A |
2192 | szone->num_small_regions++; |
2193 | small_meta_header_set_in_use(meta_headers, index, msize); | |
2194 | msize_left = NUM_SMALL_BLOCKS - index; | |
2195 | szone->num_small_objects++; | |
2196 | szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(msize); | |
2197 | // add a big free block | |
2198 | index += msize; msize_left -= msize; | |
2199 | meta_headers[index] = msize_left; | |
2200 | szone->small_bytes_free_at_end = SMALL_BYTES_FOR_MSIZE(msize_left); | |
2201 | return ptr; | |
2202 | } | |
2203 | ||
2204 | static INLINE boolean_t | |
2205 | try_realloc_small_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) | |
2206 | { | |
2207 | // returns 1 on success | |
2208 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
2209 | unsigned index = SMALL_META_INDEX_FOR_PTR(ptr); | |
2210 | msize_t old_msize = SMALL_MSIZE_FOR_BYTES(old_size); | |
2211 | msize_t new_msize = SMALL_MSIZE_FOR_BYTES(new_size + SMALL_QUANTUM - 1); | |
2212 | void *next_block = (char *)ptr + old_size; | |
2213 | unsigned next_index = index + old_msize; | |
2214 | msize_t next_msize_and_free; | |
2215 | msize_t next_msize; | |
2216 | msize_t leftover_msize; | |
2217 | void *leftover; | |
2218 | unsigned leftover_index; | |
2219 | ||
2220 | if (next_index >= NUM_SMALL_BLOCKS) { | |
2221 | return 0; | |
2222 | } | |
2223 | #if DEBUG_MALLOC | |
2224 | if ((uintptr_t)next_block & (SMALL_QUANTUM - 1)) { | |
224c7076 | 2225 | szone_error(szone, "internal invariant broken in realloc(next_block)", next_block, NULL); |
59e0d9fe A |
2226 | } |
2227 | if (meta_headers[index] != old_msize) | |
2228 | malloc_printf("*** try_realloc_small_in_place incorrect old %d %d\n", | |
2229 | meta_headers[index], old_msize); | |
2230 | #endif | |
2231 | SZONE_LOCK(szone); | |
2232 | /* | |
2233 | * Look for a free block immediately afterwards. If it's large enough, we can consume (part of) | |
2234 | * it. | |
2235 | */ | |
2236 | next_msize_and_free = meta_headers[next_index]; | |
2237 | next_msize = next_msize_and_free & ~ SMALL_IS_FREE; | |
2238 | if (!(next_msize_and_free & SMALL_IS_FREE) || (old_msize + next_msize < new_msize)) { | |
2239 | SZONE_UNLOCK(szone); | |
2240 | return 0; | |
2241 | } | |
2242 | /* | |
2243 | * The following block is big enough; pull it from its freelist and chop off enough to satisfy | |
2244 | * our needs. | |
2245 | */ | |
2246 | small_free_list_remove_ptr(szone, next_block, next_msize); | |
2247 | small_meta_header_set_middle(meta_headers, next_index); | |
2248 | leftover_msize = old_msize + next_msize - new_msize; | |
2249 | if (leftover_msize) { | |
2250 | /* there's some left, so put the remainder back */ | |
2251 | leftover = (unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(new_msize); | |
2252 | small_free_list_add_ptr(szone, leftover, leftover_msize); | |
2253 | leftover_index = index + new_msize; | |
2254 | small_meta_header_set_is_free(meta_headers, leftover_index, leftover_msize); | |
2255 | } | |
2256 | #if DEBUG_MALLOC | |
2257 | if (SMALL_BYTES_FOR_MSIZE(new_msize) >= LARGE_THRESHOLD) { | |
2258 | malloc_printf("*** realloc in place for %p exceeded msize=%d\n", new_msize); | |
2259 | } | |
2260 | #endif | |
2261 | small_meta_header_set_in_use(meta_headers, index, new_msize); | |
2262 | #if DEBUG_MALLOC | |
2263 | if (LOG(szone,ptr)) { | |
2264 | malloc_printf("in szone_realloc(), ptr=%p, msize=%d\n", ptr, *SMALL_METADATA_FOR_PTR(ptr)); | |
2265 | } | |
2266 | #endif | |
2267 | szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(new_msize - old_msize); | |
2268 | SZONE_UNLOCK(szone); | |
2269 | CHECK(szone, __PRETTY_FUNCTION__); | |
2270 | return 1; | |
e9ce8d39 A |
2271 | } |
2272 | ||
9385eb3d | 2273 | static boolean_t |
224c7076 | 2274 | szone_check_small_region(szone_t *szone, region_t region) |
59e0d9fe | 2275 | { |
224c7076 | 2276 | unsigned char *ptr = SMALL_REGION_ADDRESS(region); |
59e0d9fe | 2277 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); |
224c7076 | 2278 | unsigned char *region_end = SMALL_REGION_END(region); |
e9ce8d39 | 2279 | msize_t prev_free = 0; |
59e0d9fe A |
2280 | unsigned index; |
2281 | msize_t msize_and_free; | |
2282 | msize_t msize; | |
2283 | free_list_t *free_head; | |
224c7076 | 2284 | void *previous, *next; |
59e0d9fe A |
2285 | msize_t *follower; |
2286 | ||
2287 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
224c7076 | 2288 | if (region == szone->last_small_region) region_end -= szone->small_bytes_free_at_end; |
59e0d9fe A |
2289 | while (ptr < region_end) { |
2290 | index = SMALL_META_INDEX_FOR_PTR(ptr); | |
2291 | msize_and_free = meta_headers[index]; | |
2292 | if (!(msize_and_free & SMALL_IS_FREE)) { | |
9385eb3d | 2293 | // block is in use |
59e0d9fe | 2294 | msize = msize_and_free; |
9385eb3d | 2295 | if (!msize) { |
224c7076 A |
2296 | malloc_printf("*** invariant broken: null msize ptr=%p num_small_regions=%d end=%p\n", |
2297 | ptr, szone->num_small_regions, region_end); | |
9385eb3d A |
2298 | return 0; |
2299 | } | |
2300 | if (msize > (LARGE_THRESHOLD / SMALL_QUANTUM)) { | |
59e0d9fe A |
2301 | malloc_printf("*** invariant broken for %p this small msize=%d - size is too large\n", |
2302 | ptr, msize_and_free); | |
9385eb3d A |
2303 | return 0; |
2304 | } | |
59e0d9fe | 2305 | ptr += SMALL_BYTES_FOR_MSIZE(msize); |
9385eb3d A |
2306 | prev_free = 0; |
2307 | } else { | |
2308 | // free pointer | |
59e0d9fe A |
2309 | msize = msize_and_free & ~ SMALL_IS_FREE; |
2310 | free_head = (free_list_t *)ptr; | |
2311 | follower = (msize_t *)FOLLOWING_SMALL_PTR(ptr, msize); | |
2312 | if (!msize) { | |
2313 | malloc_printf("*** invariant broken for free block %p this msize=%d\n", ptr, msize); | |
9385eb3d A |
2314 | return 0; |
2315 | } | |
2316 | if (prev_free) { | |
59e0d9fe | 2317 | malloc_printf("*** invariant broken for %p (2 free in a row)\n", ptr); |
9385eb3d A |
2318 | return 0; |
2319 | } | |
2320 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); | |
224c7076 A |
2321 | previous = free_list_unchecksum_ptr(free_head->previous); |
2322 | next = free_list_unchecksum_ptr(free_head->next); | |
2323 | if (previous && !SMALL_PTR_IS_FREE(previous)) { | |
59e0d9fe A |
2324 | malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n", |
2325 | ptr, free_head->previous); | |
9385eb3d A |
2326 | return 0; |
2327 | } | |
224c7076 | 2328 | if (next && !SMALL_PTR_IS_FREE(next)) { |
59e0d9fe | 2329 | malloc_printf("*** invariant broken for %p (next is not a free pointer)\n", ptr); |
9385eb3d | 2330 | return 0; |
e9ce8d39 | 2331 | } |
9385eb3d | 2332 | if (SMALL_PREVIOUS_MSIZE(follower) != msize) { |
59e0d9fe A |
2333 | malloc_printf("*** invariant broken for small free %p followed by %p in region [%p-%p] " |
2334 | "(end marker incorrect) should be %d; in fact %d\n", | |
224c7076 | 2335 | ptr, follower, SMALL_REGION_ADDRESS(region), region_end, msize, SMALL_PREVIOUS_MSIZE(follower)); |
9385eb3d | 2336 | return 0; |
e9ce8d39 | 2337 | } |
59e0d9fe | 2338 | ptr = (unsigned char *)follower; |
9385eb3d A |
2339 | prev_free = SMALL_IS_FREE; |
2340 | } | |
e9ce8d39 A |
2341 | } |
2342 | return 1; | |
2343 | } | |
2344 | ||
9385eb3d | 2345 | static kern_return_t |
224c7076 | 2346 | small_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, memory_reader_t reader, vm_range_recorder_t recorder) |
59e0d9fe | 2347 | { |
224c7076 A |
2348 | size_t num_regions = szone->num_small_regions_allocated; |
2349 | void *last_small_free = szone->last_small_free; | |
2350 | size_t index; | |
2351 | region_t *regions; | |
2352 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
2353 | unsigned count = 0; | |
2354 | kern_return_t err; | |
2355 | region_t region; | |
2356 | vm_range_t range; | |
2357 | vm_range_t admin_range; | |
2358 | vm_range_t ptr_range; | |
2359 | unsigned char *mapped_region; | |
2360 | msize_t *block_header; | |
2361 | unsigned block_index; | |
2362 | unsigned block_limit; | |
2363 | msize_t msize_and_free; | |
2364 | msize_t msize; | |
2365 | vm_address_t last_small_free_ptr = 0; | |
2366 | msize_t last_small_free_msize = 0; | |
2367 | ||
2368 | if (last_small_free) { | |
2369 | last_small_free_ptr = (uintptr_t)last_small_free & ~(SMALL_QUANTUM - 1); | |
2370 | last_small_free_msize = (uintptr_t)last_small_free & (SMALL_QUANTUM - 1); | |
2371 | } | |
2372 | ||
2373 | err = reader(task, (vm_address_t)szone->small_regions, sizeof(region_t) * num_regions, (void **)®ions); | |
2374 | if (err) return err; | |
2375 | for (index = 0; index < num_regions; ++index) { | |
2376 | region = regions[index]; | |
2377 | if (region) { | |
2378 | range.address = (vm_address_t)SMALL_REGION_ADDRESS(region); | |
2379 | range.size = SMALL_REGION_SIZE; | |
2380 | if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { | |
2381 | admin_range.address = range.address + SMALL_HEADER_START; | |
2382 | admin_range.size = SMALL_ARRAY_SIZE; | |
2383 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1); | |
2384 | } | |
2385 | if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { | |
2386 | ptr_range.address = range.address; | |
2387 | ptr_range.size = NUM_SMALL_BLOCKS * SMALL_QUANTUM; | |
2388 | recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); | |
2389 | } | |
2390 | if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { | |
2391 | err = reader(task, range.address, range.size, (void **)&mapped_region); | |
2392 | if (err) return err; | |
2393 | block_header = (msize_t *)(mapped_region + SMALL_HEADER_START); | |
2394 | block_index = 0; | |
2395 | block_limit = NUM_SMALL_BLOCKS; | |
2396 | if (region == szone->last_small_region) | |
2397 | block_limit -= SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end); | |
2398 | while (block_index < block_limit) { | |
2399 | msize_and_free = block_header[block_index]; | |
2400 | msize = msize_and_free & ~ SMALL_IS_FREE; | |
2401 | if (! (msize_and_free & SMALL_IS_FREE) && | |
2402 | range.address + SMALL_BYTES_FOR_MSIZE(block_index) != last_small_free_ptr) { | |
2403 | // Block in use | |
2404 | buffer[count].address = range.address + SMALL_BYTES_FOR_MSIZE(block_index); | |
2405 | buffer[count].size = SMALL_BYTES_FOR_MSIZE(msize); | |
2406 | count++; | |
2407 | if (count >= MAX_RECORDER_BUFFER) { | |
2408 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
2409 | count = 0; | |
2410 | } | |
2411 | } | |
2412 | block_index += msize; | |
2413 | } | |
2414 | } | |
2415 | } | |
2416 | } | |
2417 | if (count) { | |
2418 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
2419 | } | |
2420 | return 0; | |
e9ce8d39 A |
2421 | } |
2422 | ||
224c7076 | 2423 | static void * |
59e0d9fe A |
2424 | small_malloc_from_free_list(szone_t *szone, msize_t msize) |
2425 | { | |
224c7076 A |
2426 | free_list_t *ptr; |
2427 | msize_t this_msize; | |
2428 | grain_t slot = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1; | |
2429 | free_list_t **free_list = szone->small_free_list; | |
2430 | free_list_t *next; | |
2431 | free_list_t **limit; | |
2432 | unsigned bitmap = szone->small_bitmap & ~ ((1 << slot) - 1); | |
2433 | msize_t leftover_msize; | |
2434 | free_list_t *leftover_ptr; | |
2435 | msize_t *meta_headers; | |
2436 | unsigned leftover_index; | |
59e0d9fe A |
2437 | |
2438 | // Assumes locked | |
2439 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
2440 | ||
224c7076 A |
2441 | // Mask off the bits representing slots holding free blocks smaller than the |
2442 | // size we need. If there are no larger free blocks, try allocating from | |
2443 | // the free space at the end of the tiny region. | |
2444 | if (!bitmap) | |
2445 | goto try_small_from_end; | |
2446 | ||
2447 | slot = BITMAP32_CTZ(bitmap); | |
2448 | limit = free_list + NUM_SMALL_SLOTS - 1; | |
2449 | free_list += slot; | |
2450 | ||
2451 | // Iterate over freelists looking for free blocks, starting at first list | |
2452 | // which is not empty, and contains blocks which are large enough to satisfy | |
2453 | // our request. | |
e9ce8d39 | 2454 | while (free_list < limit) { |
224c7076 A |
2455 | ptr = *free_list; |
2456 | if (ptr) { | |
2457 | next = free_list_unchecksum_ptr(ptr->next); | |
2458 | *free_list = next; | |
2459 | this_msize = SMALL_PTR_SIZE(ptr); | |
2460 | if (next) { | |
2461 | next->previous = ptr->previous; | |
2462 | } else { | |
2463 | BITMAP32_CLR(szone->small_bitmap, this_msize - 1); | |
2464 | } | |
2465 | goto add_leftover_and_proceed; | |
2466 | } | |
2467 | free_list++; | |
2468 | } | |
2469 | ||
2470 | // We are now looking at the last slot, which contains blocks equal to, or | |
2471 | // due to coalescing of free blocks, larger than 31 * small quantum size. | |
2472 | // If the last freelist is not empty, and the head contains a block that is | |
2473 | // larger than our request, then the remainder is put back on the free list. | |
2474 | // | |
2475 | // FIXME: This code doesn't have the optimization from the 'tiny' codepath | |
2476 | // that optimizes for the this_msize >= 2 * num slots | |
2477 | // FIXME: this code also seems somewhat bogus. There's a check for | |
2478 | // this_msize >= msize, but by definition we can't ask for a small | |
2479 | // block larger than 31 small quanta, and every free block in this | |
2480 | // slot has to be at least that large. | |
2481 | ptr = *limit; | |
e9ce8d39 | 2482 | while (ptr) { |
224c7076 A |
2483 | free_list_checksum(szone, ptr, __PRETTY_FUNCTION__); |
2484 | next = free_list_unchecksum_ptr(ptr->next); | |
2485 | this_msize = SMALL_PTR_SIZE(ptr); | |
2486 | if (this_msize >= msize) { | |
2487 | small_free_list_remove_ptr(szone, ptr, this_msize); | |
2488 | goto add_leftover_and_proceed; | |
2489 | } | |
2490 | ptr = next; | |
9385eb3d | 2491 | } |
224c7076 | 2492 | |
9385eb3d A |
2493 | try_small_from_end: |
2494 | // Let's see if we can use szone->small_bytes_free_at_end | |
59e0d9fe | 2495 | if (szone->small_bytes_free_at_end >= SMALL_BYTES_FOR_MSIZE(msize)) { |
224c7076 A |
2496 | ptr = (free_list_t *)(SMALL_REGION_END(szone->last_small_region) - szone->small_bytes_free_at_end); |
2497 | szone->small_bytes_free_at_end -= SMALL_BYTES_FOR_MSIZE(msize); | |
2498 | if (szone->small_bytes_free_at_end) { | |
2499 | // let's mark this block as in use to serve as boundary | |
2500 | *SMALL_METADATA_FOR_PTR((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize)) = SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end); | |
2501 | } | |
2502 | this_msize = msize; | |
2503 | goto return_small_alloc; | |
e9ce8d39 A |
2504 | } |
2505 | return NULL; | |
224c7076 | 2506 | |
e9ce8d39 | 2507 | add_leftover_and_proceed: |
9385eb3d | 2508 | if (this_msize > msize) { |
224c7076 A |
2509 | leftover_msize = this_msize - msize; |
2510 | leftover_ptr = (free_list_t *)((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize)); | |
9385eb3d | 2511 | #if DEBUG_MALLOC |
224c7076 A |
2512 | if (LOG(szone,ptr)) { |
2513 | malloc_printf("in small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize); | |
2514 | } | |
9385eb3d | 2515 | #endif |
224c7076 A |
2516 | small_free_list_add_ptr(szone, leftover_ptr, leftover_msize); |
2517 | meta_headers = SMALL_META_HEADER_FOR_PTR(leftover_ptr); | |
2518 | leftover_index = SMALL_META_INDEX_FOR_PTR(leftover_ptr); | |
2519 | small_meta_header_set_is_free(meta_headers, leftover_index, leftover_msize); | |
2520 | this_msize = msize; | |
e9ce8d39 | 2521 | } |
224c7076 | 2522 | |
9385eb3d | 2523 | return_small_alloc: |
e9ce8d39 | 2524 | szone->num_small_objects++; |
59e0d9fe | 2525 | szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(this_msize); |
e9ce8d39 | 2526 | #if DEBUG_MALLOC |
9385eb3d | 2527 | if (LOG(szone,ptr)) { |
59e0d9fe | 2528 | malloc_printf("in small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize); |
9385eb3d | 2529 | } |
e9ce8d39 | 2530 | #endif |
59e0d9fe | 2531 | *SMALL_METADATA_FOR_PTR(ptr) = this_msize; |
e9ce8d39 A |
2532 | return ptr; |
2533 | } | |
2534 | ||
9385eb3d | 2535 | static INLINE void * |
59e0d9fe A |
2536 | small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) |
2537 | { | |
e9ce8d39 | 2538 | boolean_t locked = 0; |
59e0d9fe | 2539 | #if SMALL_CACHE |
e9ce8d39 | 2540 | void *ptr; |
59e0d9fe A |
2541 | #endif |
2542 | ||
9385eb3d A |
2543 | #if SMALL_CACHE |
2544 | ptr = (void *)szone->last_small_free; | |
59e0d9fe | 2545 | if ((((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) == msize) { |
9385eb3d A |
2546 | // we have a candidate - let's lock to make sure |
2547 | LOCK_AND_NOTE_LOCKED(szone, locked); | |
2548 | if (ptr == (void *)szone->last_small_free) { | |
2549 | szone->last_small_free = NULL; | |
9385eb3d A |
2550 | SZONE_UNLOCK(szone); |
2551 | CHECK(szone, __PRETTY_FUNCTION__); | |
59e0d9fe | 2552 | ptr = (void *)((uintptr_t)ptr & ~ (SMALL_QUANTUM - 1)); |
9385eb3d | 2553 | if (cleared_requested) { |
59e0d9fe | 2554 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
2555 | } |
2556 | return ptr; | |
2557 | } | |
e9ce8d39 A |
2558 | } |
2559 | #endif | |
59e0d9fe A |
2560 | // Except in rare occasions where we need to add a new region, we are going to end up locking, |
2561 | // so we might as well lock right away to avoid doing unnecessary optimistic probes | |
9385eb3d A |
2562 | if (!locked) LOCK_AND_NOTE_LOCKED(szone, locked); |
2563 | ptr = small_malloc_from_free_list(szone, msize); | |
e9ce8d39 | 2564 | if (ptr) { |
9385eb3d A |
2565 | SZONE_UNLOCK(szone); |
2566 | CHECK(szone, __PRETTY_FUNCTION__); | |
2567 | if (cleared_requested) { | |
59e0d9fe | 2568 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
2569 | } |
2570 | return ptr; | |
e9ce8d39 | 2571 | } |
9385eb3d A |
2572 | ptr = small_malloc_from_region_no_lock(szone, msize); |
2573 | // we don't clear because this freshly allocated space is pristine | |
2574 | SZONE_UNLOCK(szone); | |
2575 | CHECK(szone, __PRETTY_FUNCTION__); | |
e9ce8d39 A |
2576 | return ptr; |
2577 | } | |
2578 | ||
59e0d9fe | 2579 | // tries to allocate a small, cleared block |
9385eb3d | 2580 | static INLINE void * |
59e0d9fe A |
2581 | small_malloc_cleared_no_lock(szone_t *szone, msize_t msize) |
2582 | { | |
2583 | void *ptr; | |
2584 | ||
9385eb3d A |
2585 | // Assumes already locked |
2586 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d | 2587 | ptr = small_malloc_from_free_list(szone, msize); |
e9ce8d39 | 2588 | if (ptr) { |
59e0d9fe | 2589 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); |
9385eb3d | 2590 | return ptr; |
e9ce8d39 | 2591 | } else { |
9385eb3d A |
2592 | ptr = small_malloc_from_region_no_lock(szone, msize); |
2593 | // we don't clear because this freshly allocated space is pristine | |
e9ce8d39 A |
2594 | } |
2595 | return ptr; | |
2596 | } | |
2597 | ||
9385eb3d | 2598 | static INLINE void |
224c7076 | 2599 | free_small(szone_t *szone, void *ptr, region_t *small_region) |
59e0d9fe | 2600 | { |
224c7076 | 2601 | msize_t msize = SMALL_PTR_SIZE(ptr); |
59e0d9fe A |
2602 | #if SMALL_CACHE |
2603 | void *ptr2; | |
2604 | #endif | |
2605 | ||
2606 | // ptr is known to be in small_region | |
9385eb3d A |
2607 | SZONE_LOCK(szone); |
2608 | #if SMALL_CACHE | |
59e0d9fe | 2609 | ptr2 = szone->last_small_free; |
224c7076 A |
2610 | /* check that we don't already have this pointer in the cache */ |
2611 | if (ptr == (void *)((uintptr_t)ptr2 & ~ (SMALL_QUANTUM - 1))) { | |
2612 | szone_error(szone, "double free", ptr, NULL); | |
2613 | return; | |
2614 | } | |
2615 | #endif | |
2616 | if (SMALL_PTR_IS_FREE(ptr)) { | |
2617 | szone_error(szone, "double free", ptr, NULL); | |
2618 | return; | |
2619 | } | |
2620 | #if SMALL_CACHE | |
2621 | szone->last_small_free = (void *)(((uintptr_t)ptr) | msize); | |
9385eb3d | 2622 | if (!ptr2) { |
9385eb3d A |
2623 | SZONE_UNLOCK(szone); |
2624 | CHECK(szone, __PRETTY_FUNCTION__); | |
2625 | return; | |
2626 | } | |
224c7076 | 2627 | msize = (uintptr_t)ptr2 & (SMALL_QUANTUM - 1); |
59e0d9fe | 2628 | ptr = (void *)(((uintptr_t)ptr2) & ~ (SMALL_QUANTUM - 1)); |
9385eb3d A |
2629 | small_region = small_region_for_ptr_no_lock(szone, ptr); |
2630 | if (!small_region) { | |
224c7076 | 2631 | szone_error(szone, "double free (small cache)", ptr, NULL); |
59e0d9fe | 2632 | return; |
9385eb3d A |
2633 | } |
2634 | #endif | |
224c7076 | 2635 | small_free_no_lock(szone, small_region, ptr, msize); |
9385eb3d A |
2636 | SZONE_UNLOCK(szone); |
2637 | CHECK(szone, __PRETTY_FUNCTION__); | |
2638 | } | |
2639 | ||
2640 | static void | |
59e0d9fe A |
2641 | print_small_free_list(szone_t *szone) |
2642 | { | |
9385eb3d | 2643 | grain_t grain = 0; |
59e0d9fe | 2644 | free_list_t *ptr; |
224c7076 | 2645 | _SIMPLE_STRING b = _simple_salloc(); |
59e0d9fe | 2646 | |
224c7076 A |
2647 | if (b) { |
2648 | _simple_sappend(b, "small free sizes: "); | |
2649 | while (grain < NUM_SMALL_SLOTS) { | |
2650 | ptr = szone->small_free_list[grain]; | |
2651 | if (ptr) { | |
2652 | _simple_sprintf(b, "%s%y[%d]; ", (grain == NUM_SMALL_SLOTS-1) ? ">=" : "", (grain + 1) * SMALL_QUANTUM, free_list_count(ptr)); | |
2653 | } | |
2654 | grain++; | |
9385eb3d | 2655 | } |
224c7076 A |
2656 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); |
2657 | _simple_sfree(b); | |
9385eb3d | 2658 | } |
9385eb3d A |
2659 | } |
2660 | ||
2661 | static void | |
224c7076 | 2662 | print_small_region(szone_t *szone, boolean_t verbose, region_t region, size_t bytes_at_end) |
59e0d9fe A |
2663 | { |
2664 | unsigned counts[1024]; | |
2665 | unsigned in_use = 0; | |
224c7076 A |
2666 | void *start = SMALL_REGION_ADDRESS(region); |
2667 | void *limit = SMALL_REGION_END(region) - bytes_at_end; | |
59e0d9fe A |
2668 | msize_t msize_and_free; |
2669 | msize_t msize; | |
2670 | unsigned ci; | |
224c7076 | 2671 | _SIMPLE_STRING b; |
59e0d9fe | 2672 | |
9385eb3d A |
2673 | memset(counts, 0, 1024 * sizeof(unsigned)); |
2674 | while (start < limit) { | |
59e0d9fe A |
2675 | msize_and_free = *SMALL_METADATA_FOR_PTR(start); |
2676 | msize = msize_and_free & ~ SMALL_IS_FREE; | |
9385eb3d A |
2677 | if (!(msize_and_free & SMALL_IS_FREE)) { |
2678 | // block in use | |
59e0d9fe A |
2679 | if (msize < 1024) |
2680 | counts[msize]++; | |
9385eb3d A |
2681 | in_use++; |
2682 | } | |
59e0d9fe | 2683 | start += SMALL_BYTES_FOR_MSIZE(msize); |
9385eb3d | 2684 | } |
224c7076 A |
2685 | if ((b = _simple_salloc()) != NULL) { |
2686 | _simple_sprintf(b, "Small region [%p-%p, %y]\tIn_use=%d ", | |
2687 | SMALL_REGION_ADDRESS(region), SMALL_REGION_END(region), (int)SMALL_REGION_SIZE, in_use); | |
2688 | if (bytes_at_end) | |
2689 | _simple_sprintf(b, "Untouched=%ly", bytes_at_end); | |
2690 | if (verbose && in_use) { | |
2691 | _simple_sappend(b, "\n\tSizes in use: "); | |
2692 | for (ci = 0; ci < 1024; ci++) | |
2693 | if (counts[ci]) | |
2694 | _simple_sprintf(b, "%d[%d] ", SMALL_BYTES_FOR_MSIZE(ci), counts[ci]); | |
2695 | } | |
2696 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); | |
2697 | _simple_sfree(b); | |
9385eb3d | 2698 | } |
9385eb3d A |
2699 | } |
2700 | ||
2701 | static boolean_t | |
59e0d9fe A |
2702 | small_free_list_check(szone_t *szone, grain_t grain) |
2703 | { | |
9385eb3d A |
2704 | unsigned count = 0; |
2705 | free_list_t *ptr = szone->small_free_list[grain]; | |
2706 | free_list_t *previous = NULL; | |
59e0d9fe A |
2707 | msize_t msize_and_free; |
2708 | ||
2709 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d | 2710 | while (ptr) { |
224c7076 A |
2711 | msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); |
2712 | count++; | |
2713 | if (!(msize_and_free & SMALL_IS_FREE)) { | |
2714 | malloc_printf("*** in-use ptr in free list grain=%d count=%d ptr=%p\n", grain, count, ptr); | |
2715 | return 0; | |
2716 | } | |
2717 | if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) { | |
2718 | malloc_printf("*** unaligned ptr in free list grain=%d count=%d ptr=%p\n", grain, count, ptr); | |
2719 | return 0; | |
2720 | } | |
2721 | if (!small_region_for_ptr_no_lock(szone, ptr)) { | |
2722 | malloc_printf("*** ptr not in szone grain=%d count=%d ptr=%p\n", grain, count, ptr); | |
2723 | return 0; | |
2724 | } | |
2725 | free_list_checksum(szone, ptr, __PRETTY_FUNCTION__); | |
2726 | if (free_list_unchecksum_ptr(ptr->previous) != previous) { | |
2727 | malloc_printf("*** previous incorrectly set grain=%d count=%d ptr=%p\n", grain, count, ptr); | |
2728 | return 0; | |
2729 | } | |
2730 | previous = ptr; | |
2731 | ptr = free_list_unchecksum_ptr(ptr->next); | |
9385eb3d A |
2732 | } |
2733 | return 1; | |
2734 | } | |
2735 | ||
224c7076 A |
2736 | /******************************************************************************* |
2737 | * Region hash implementation | |
2738 | * | |
2739 | * This is essentially a duplicate of the existing Large allocator hash, minus | |
2740 | * the ability to remove entries. The two should be combined eventually. | |
2741 | ******************************************************************************/ | |
2742 | #pragma mark region hash | |
2743 | ||
2744 | /* | |
2745 | * hash_lookup_region_no_lock - Scan a hash ring looking for an entry for a | |
2746 | * given region. | |
2747 | * | |
2748 | * FIXME: If consecutive queries of the same region are likely, a one-entry | |
2749 | * cache would likely be a significant performance win here. | |
2750 | */ | |
2751 | static region_t * | |
2752 | hash_lookup_region_no_lock(region_t *regions, size_t num_entries, region_t r) { | |
2753 | size_t index, hash_index; | |
2754 | region_t *entry; | |
2755 | ||
2756 | if (!num_entries) | |
2757 | return 0; | |
2758 | ||
2759 | index = hash_index = ((uintptr_t)r >> 20) % num_entries; | |
2760 | do { | |
2761 | entry = regions + index; | |
2762 | if (*entry == 0) | |
2763 | return 0; | |
2764 | if (*entry == r) | |
2765 | return entry; | |
2766 | if (++index == num_entries) | |
2767 | index = 0; | |
2768 | } while (index != hash_index); | |
2769 | return 0; | |
2770 | } | |
2771 | ||
2772 | /* | |
2773 | * hash_region_insert_no_lock - Insert a region into the hash ring. | |
2774 | */ | |
2775 | static void | |
2776 | hash_region_insert_no_lock(region_t *regions, size_t num_entries, region_t r) { | |
2777 | size_t index, hash_index; | |
2778 | region_t *entry; | |
2779 | ||
2780 | index = hash_index = ((uintptr_t)r >> 20) % num_entries; | |
2781 | do { | |
2782 | entry = regions + index; | |
2783 | if (*entry == 0) { | |
2784 | *entry = r; | |
2785 | return; | |
2786 | } | |
2787 | if (++index == num_entries) | |
2788 | index = 0; | |
2789 | } while (index != hash_index); | |
2790 | } | |
2791 | ||
2792 | /* | |
2793 | * hash_regions_alloc_no_lock - Allocate space for a number of entries. This | |
2794 | * must be a VM allocation as to avoid recursing between allocating a new small | |
2795 | * region, and asking the small region to allocate space for the new list of | |
2796 | * regions. | |
2797 | */ | |
2798 | static region_t * | |
2799 | hash_regions_alloc_no_lock(szone_t *szone, size_t num_entries) | |
2800 | { | |
2801 | size_t size = num_entries * sizeof(region_t); | |
2802 | return allocate_pages(szone, round_page(size), 0, 0, VM_MEMORY_MALLOC); | |
2803 | } | |
2804 | ||
2805 | /* | |
2806 | * hash_regions_grow_no_lock - Grow the hash ring, and rehash the entries. | |
2807 | * Return the new region and new size to update the szone. Do not deallocate | |
2808 | * the old entries since someone may still be allocating them. | |
2809 | */ | |
2810 | static region_t * | |
2811 | hash_regions_grow_no_lock(szone_t *szone, region_t *regions, size_t old_size, | |
2812 | size_t *new_size) | |
2813 | { | |
2814 | // double in size and allocate memory for the regions | |
2815 | *new_size = old_size * 2 + 1; | |
2816 | region_t *new_regions = hash_regions_alloc_no_lock(szone, *new_size); | |
2817 | ||
2818 | // rehash the entries into the new list | |
2819 | size_t index; | |
2820 | for (index = 0; index < old_size; ++index) { | |
2821 | region_t r = regions[index]; | |
2822 | if (r != 0) | |
2823 | hash_region_insert_no_lock(new_regions, *new_size, r); | |
2824 | } | |
2825 | return new_regions; | |
2826 | } | |
2827 | ||
2828 | /******************************************************************************* | |
2829 | * Large allocator implementation | |
2830 | ******************************************************************************/ | |
2831 | #pragma mark large allocator | |
e9ce8d39 A |
2832 | |
2833 | #if DEBUG_MALLOC | |
2834 | ||
9385eb3d | 2835 | static void |
59e0d9fe A |
2836 | large_debug_print(szone_t *szone) |
2837 | { | |
2838 | unsigned num_large_entries = szone->num_large_entries; | |
2839 | unsigned index = num_large_entries; | |
2840 | large_entry_t *range; | |
224c7076 | 2841 | _SIMPLE_STRING b = _simple_salloc(); |
59e0d9fe | 2842 | |
224c7076 A |
2843 | if (b) { |
2844 | for (index = 0, range = szone->large_entries; index < szone->num_large_entries; index++, range++) | |
2845 | if (!LARGE_ENTRY_IS_EMPTY(*range)) | |
2846 | _simple_sprintf(b, "%d: %p(%y); ", index, LARGE_ENTRY_ADDRESS(*range), LARGE_ENTRY_SIZE(*range)); | |
59e0d9fe | 2847 | |
224c7076 A |
2848 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); |
2849 | _simple_sfree(b); | |
2850 | } | |
e9ce8d39 A |
2851 | } |
2852 | #endif | |
2853 | ||
59e0d9fe A |
2854 | /* |
2855 | * Scan the hash ring looking for an entry for the given pointer. | |
2856 | */ | |
9385eb3d | 2857 | static large_entry_t * |
59e0d9fe A |
2858 | large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr) |
2859 | { | |
2860 | // result only valid with lock held | |
2861 | unsigned num_large_entries = szone->num_large_entries; | |
2862 | unsigned hash_index; | |
2863 | unsigned index; | |
2864 | large_entry_t *range; | |
2865 | ||
2866 | if (!num_large_entries) | |
2867 | return NULL; | |
2868 | hash_index = ((uintptr_t)ptr >> vm_page_shift) % num_large_entries; | |
e9ce8d39 A |
2869 | index = hash_index; |
2870 | do { | |
59e0d9fe A |
2871 | range = szone->large_entries + index; |
2872 | if (LARGE_ENTRY_MATCHES(*range, ptr)) | |
2873 | return range; | |
2874 | if (LARGE_ENTRY_IS_EMPTY(*range)) | |
2875 | return NULL; // end of chain | |
2876 | index++; | |
2877 | if (index == num_large_entries) | |
2878 | index = 0; | |
e9ce8d39 A |
2879 | } while (index != hash_index); |
2880 | return NULL; | |
2881 | } | |
2882 | ||
9385eb3d | 2883 | static void |
59e0d9fe A |
2884 | large_entry_insert_no_lock(szone_t *szone, large_entry_t range) |
2885 | { | |
2886 | unsigned num_large_entries = szone->num_large_entries; | |
2887 | unsigned hash_index = (range.address_and_num_pages >> vm_page_shift) % num_large_entries; | |
2888 | unsigned index = hash_index; | |
2889 | large_entry_t *entry; | |
2890 | ||
e9ce8d39 | 2891 | do { |
59e0d9fe | 2892 | entry = szone->large_entries + index; |
9385eb3d A |
2893 | if (LARGE_ENTRY_IS_EMPTY(*entry)) { |
2894 | *entry = range; | |
2895 | return; // end of chain | |
2896 | } | |
59e0d9fe A |
2897 | index++; |
2898 | if (index == num_large_entries) | |
2899 | index = 0; | |
e9ce8d39 A |
2900 | } while (index != hash_index); |
2901 | } | |
2902 | ||
9385eb3d | 2903 | static INLINE void |
59e0d9fe A |
2904 | large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry) |
2905 | { | |
2906 | unsigned num_large_entries = szone->num_large_entries; | |
2907 | unsigned hash_index = entry - szone->large_entries; | |
2908 | unsigned index = hash_index; | |
2909 | large_entry_t range; | |
2910 | ||
e9ce8d39 | 2911 | do { |
59e0d9fe A |
2912 | index++; |
2913 | if (index == num_large_entries) | |
2914 | index = 0; | |
9385eb3d | 2915 | range = szone->large_entries[index]; |
59e0d9fe A |
2916 | if (LARGE_ENTRY_IS_EMPTY(range)) |
2917 | return; | |
9385eb3d A |
2918 | szone->large_entries[index].address_and_num_pages = 0; |
2919 | large_entry_insert_no_lock(szone, range); // this will reinsert in the | |
2920 | // proper place | |
e9ce8d39 A |
2921 | } while (index != hash_index); |
2922 | } | |
2923 | ||
9385eb3d | 2924 | static INLINE large_entry_t * |
59e0d9fe A |
2925 | large_entries_alloc_no_lock(szone_t *szone, unsigned num) |
2926 | { | |
e9ce8d39 A |
2927 | size_t size = num * sizeof(large_entry_t); |
2928 | boolean_t is_vm_allocation = size >= LARGE_THRESHOLD; | |
59e0d9fe | 2929 | |
e9ce8d39 | 2930 | if (is_vm_allocation) { |
9385eb3d A |
2931 | // Note that we allocate memory (via a system call) under a spin lock |
2932 | // That is certainly evil, however it's very rare in the lifetime of a process | |
2933 | // The alternative would slow down the normal case | |
224c7076 | 2934 | return allocate_pages(szone, round_page(size), 0, 0, VM_MEMORY_MALLOC_LARGE); |
e9ce8d39 | 2935 | } else { |
59e0d9fe | 2936 | return small_malloc_cleared_no_lock(szone, SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1)); |
e9ce8d39 A |
2937 | } |
2938 | } | |
2939 | ||
9385eb3d | 2940 | static void |
59e0d9fe A |
2941 | large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate) |
2942 | { | |
9385eb3d | 2943 | // returns range to deallocate |
59e0d9fe A |
2944 | size_t size = num * sizeof(large_entry_t); |
2945 | boolean_t is_vm_allocation = size >= LARGE_THRESHOLD; | |
224c7076 | 2946 | region_t *region; |
59e0d9fe A |
2947 | msize_t msize_and_free; |
2948 | ||
e9ce8d39 | 2949 | if (is_vm_allocation) { |
9385eb3d A |
2950 | range_to_deallocate->address = (vm_address_t)entries; |
2951 | range_to_deallocate->size = round_page(size); | |
e9ce8d39 | 2952 | } else { |
9385eb3d | 2953 | range_to_deallocate->size = 0; |
59e0d9fe A |
2954 | region = small_region_for_ptr_no_lock(szone, entries); |
2955 | msize_and_free = *SMALL_METADATA_FOR_PTR(entries); | |
9385eb3d | 2956 | if (msize_and_free & SMALL_IS_FREE) { |
224c7076 | 2957 | szone_error(szone, "object already freed being freed", entries, NULL); |
9385eb3d A |
2958 | return; |
2959 | } | |
2960 | small_free_no_lock(szone, region, entries, msize_and_free); | |
e9ce8d39 A |
2961 | } |
2962 | } | |
2963 | ||
224c7076 | 2964 | static large_entry_t * |
59e0d9fe A |
2965 | large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate) |
2966 | { | |
9385eb3d | 2967 | // sets range_to_deallocate |
e9ce8d39 A |
2968 | unsigned old_num_entries = szone->num_large_entries; |
2969 | large_entry_t *old_entries = szone->large_entries; | |
59e0d9fe | 2970 | unsigned new_num_entries = (old_num_entries) ? old_num_entries * 2 + 1 : 63; // always an odd number for good hashing |
e9ce8d39 A |
2971 | large_entry_t *new_entries = large_entries_alloc_no_lock(szone, new_num_entries); |
2972 | unsigned index = old_num_entries; | |
59e0d9fe A |
2973 | large_entry_t oldRange; |
2974 | ||
224c7076 A |
2975 | // if the allocation of new entries failed, bail |
2976 | if (new_entries == NULL) | |
2977 | return NULL; | |
2978 | ||
e9ce8d39 A |
2979 | szone->num_large_entries = new_num_entries; |
2980 | szone->large_entries = new_entries; | |
59e0d9fe A |
2981 | |
2982 | /* rehash entries into the new list */ | |
e9ce8d39 | 2983 | while (index--) { |
59e0d9fe | 2984 | oldRange = old_entries[index]; |
9385eb3d A |
2985 | if (!LARGE_ENTRY_IS_EMPTY(oldRange)) { |
2986 | large_entry_insert_no_lock(szone, oldRange); | |
2987 | } | |
2988 | } | |
2989 | if (old_entries) { | |
2990 | large_entries_free_no_lock(szone, old_entries, old_num_entries, range_to_deallocate); | |
2991 | } else { | |
2992 | range_to_deallocate->size = 0; | |
e9ce8d39 | 2993 | } |
224c7076 | 2994 | return new_entries; |
e9ce8d39 A |
2995 | } |
2996 | ||
59e0d9fe A |
2997 | // frees the specific entry in the size table |
2998 | // returns a range to truly deallocate | |
9385eb3d | 2999 | static vm_range_t |
59e0d9fe A |
3000 | large_free_no_lock(szone_t *szone, large_entry_t *entry) |
3001 | { | |
e9ce8d39 | 3002 | vm_range_t range; |
59e0d9fe A |
3003 | |
3004 | range.address = (vm_address_t)LARGE_ENTRY_ADDRESS(*entry); | |
3005 | range.size = (vm_size_t)LARGE_ENTRY_SIZE(*entry); | |
3006 | szone->num_large_objects_in_use--; | |
e9ce8d39 A |
3007 | szone->num_bytes_in_large_objects -= range.size; |
3008 | if (szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) { | |
224c7076 | 3009 | protect((void *)range.address, range.size, VM_PROT_READ | VM_PROT_WRITE, szone->debug_flags); |
59e0d9fe A |
3010 | range.address -= vm_page_size; |
3011 | range.size += 2 * vm_page_size; | |
3012 | } | |
e9ce8d39 A |
3013 | entry->address_and_num_pages = 0; |
3014 | large_entries_rehash_after_entry_no_lock(szone, entry); | |
3015 | #if DEBUG_MALLOC | |
3016 | if (large_entry_for_pointer_no_lock(szone, (void *)range.address)) { | |
59e0d9fe A |
3017 | malloc_printf("*** freed entry %p still in use; num_large_entries=%d\n", |
3018 | range.address, szone->num_large_entries); | |
9385eb3d A |
3019 | large_debug_print(szone); |
3020 | szone_sleep(); | |
e9ce8d39 A |
3021 | } |
3022 | #endif | |
3b2a1fe8 A |
3023 | return range; |
3024 | } | |
3025 | ||
9385eb3d | 3026 | static kern_return_t |
59e0d9fe A |
3027 | large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder) |
3028 | { | |
e9ce8d39 A |
3029 | unsigned index = 0; |
3030 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
3031 | unsigned count = 0; | |
3032 | large_entry_t *entries; | |
3033 | kern_return_t err; | |
59e0d9fe A |
3034 | vm_range_t range; |
3035 | large_entry_t entry; | |
3036 | ||
3037 | err = reader(task, large_entries_address, sizeof(large_entry_t) * num_entries, (void **)&entries); | |
3038 | if (err) | |
3039 | return err; | |
e9ce8d39 | 3040 | index = num_entries; |
59e0d9fe A |
3041 | if ((type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) && |
3042 | (num_entries * sizeof(large_entry_t) >= LARGE_THRESHOLD)) { | |
9385eb3d A |
3043 | range.address = large_entries_address; |
3044 | range.size = round_page(num_entries * sizeof(large_entry_t)); | |
3045 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &range, 1); | |
3046 | } | |
59e0d9fe A |
3047 | if (type_mask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE)) |
3048 | while (index--) { | |
3049 | entry = entries[index]; | |
3050 | if (!LARGE_ENTRY_IS_EMPTY(entry)) { | |
3051 | range.address = (vm_address_t)LARGE_ENTRY_ADDRESS(entry); | |
3052 | range.size = (vm_size_t)LARGE_ENTRY_SIZE(entry); | |
3053 | buffer[count++] = range; | |
3054 | if (count >= MAX_RECORDER_BUFFER) { | |
3055 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, buffer, count); | |
3056 | count = 0; | |
3057 | } | |
9385eb3d A |
3058 | } |
3059 | } | |
9385eb3d A |
3060 | if (count) { |
3061 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | |
3062 | | MALLOC_PTR_REGION_RANGE_TYPE, buffer, count); | |
3063 | } | |
e9ce8d39 A |
3064 | return 0; |
3065 | } | |
3066 | ||
3067 | /********************* HUGE ENTRY UTILITIES ************************/ | |
3068 | ||
9385eb3d | 3069 | static huge_entry_t * |
59e0d9fe A |
3070 | huge_entry_for_pointer_no_lock(szone_t *szone, const void *ptr) |
3071 | { | |
3072 | unsigned index; | |
3073 | huge_entry_t *huge; | |
3074 | ||
3075 | for (index = szone->num_huge_entries, huge = szone->huge_entries; | |
3076 | index > 0; | |
3077 | index--, huge++) { | |
3078 | ||
3079 | if ((void *)huge->address == ptr) | |
3080 | return huge; | |
e9ce8d39 A |
3081 | } |
3082 | return NULL; | |
3083 | } | |
3084 | ||
9385eb3d | 3085 | static boolean_t |
59e0d9fe A |
3086 | huge_entry_append(szone_t *szone, huge_entry_t huge) |
3087 | { | |
3088 | huge_entry_t *new_huge_entries = NULL, *old_huge_entries; | |
3089 | unsigned num_huge_entries; | |
3090 | ||
9385eb3d A |
3091 | // We do a little dance with locking because doing allocation (even in the |
3092 | // default szone) may cause something to get freed in this szone, with a | |
3093 | // deadlock | |
3094 | // Returns 1 on success | |
e9ce8d39 | 3095 | SZONE_LOCK(szone); |
59e0d9fe | 3096 | for (;;) { |
9385eb3d A |
3097 | num_huge_entries = szone->num_huge_entries; |
3098 | SZONE_UNLOCK(szone); | |
59e0d9fe A |
3099 | /* check for counter wrap */ |
3100 | if ((num_huge_entries + 1) < num_huge_entries) | |
3101 | return 0; | |
3102 | /* stale allocation from last time around the loop? */ | |
3103 | if (new_huge_entries) | |
3104 | szone_free(szone, new_huge_entries); | |
9385eb3d | 3105 | new_huge_entries = szone_malloc(szone, (num_huge_entries + 1) * sizeof(huge_entry_t)); |
59e0d9fe A |
3106 | if (new_huge_entries == NULL) |
3107 | return 0; | |
9385eb3d A |
3108 | SZONE_LOCK(szone); |
3109 | if (num_huge_entries == szone->num_huge_entries) { | |
3110 | // No change - our malloc still applies | |
59e0d9fe | 3111 | old_huge_entries = szone->huge_entries; |
9385eb3d A |
3112 | if (num_huge_entries) { |
3113 | memcpy(new_huge_entries, old_huge_entries, num_huge_entries * sizeof(huge_entry_t)); | |
3114 | } | |
3115 | new_huge_entries[szone->num_huge_entries++] = huge; | |
3116 | szone->huge_entries = new_huge_entries; | |
3117 | SZONE_UNLOCK(szone); | |
3118 | szone_free(szone, old_huge_entries); | |
5b2abdfb | 3119 | return 1; |
9385eb3d A |
3120 | } |
3121 | // try again! | |
3122 | } | |
3123 | } | |
3124 | ||
3125 | static kern_return_t | |
59e0d9fe A |
3126 | huge_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t huge_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder) |
3127 | { | |
e9ce8d39 A |
3128 | huge_entry_t *entries; |
3129 | kern_return_t err; | |
59e0d9fe A |
3130 | |
3131 | err = reader(task, huge_entries_address, sizeof(huge_entry_t) * num_entries, (void **)&entries); | |
3132 | if (err) | |
3133 | return err; | |
3134 | if (num_entries) | |
3135 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, entries, num_entries); | |
3136 | ||
e9ce8d39 A |
3137 | return 0; |
3138 | } | |
3139 | ||
9385eb3d | 3140 | static void * |
224c7076 | 3141 | large_and_huge_malloc(szone_t *szone, size_t num_pages) |
59e0d9fe A |
3142 | { |
3143 | void *addr; | |
9385eb3d | 3144 | vm_range_t range_to_deallocate; |
59e0d9fe A |
3145 | huge_entry_t huge_entry; |
3146 | size_t size; | |
3147 | large_entry_t large_entry; | |
3148 | ||
3149 | if (!num_pages) | |
3150 | num_pages = 1; // minimal allocation size for this szone | |
3151 | size = (size_t)num_pages << vm_page_shift; | |
9385eb3d | 3152 | range_to_deallocate.size = 0; |
e9ce8d39 | 3153 | if (num_pages >= (1 << vm_page_shift)) { |
224c7076 | 3154 | addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MEMORY_MALLOC_HUGE); |
59e0d9fe A |
3155 | if (addr == NULL) |
3156 | return NULL; | |
3157 | huge_entry.size = size; | |
3158 | huge_entry.address = (vm_address_t)addr; | |
3159 | if (!huge_entry_append(szone, huge_entry)) | |
3160 | return NULL; // we are leaking the allocation here | |
9385eb3d | 3161 | SZONE_LOCK(szone); |
59e0d9fe | 3162 | szone->num_bytes_in_huge_objects += size; |
e9ce8d39 | 3163 | } else { |
59e0d9fe | 3164 | |
224c7076 | 3165 | addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MEMORY_MALLOC_LARGE); |
9385eb3d | 3166 | #if DEBUG_MALLOC |
59e0d9fe | 3167 | if (LOG(szone, addr)) |
224c7076 | 3168 | malloc_printf("in szone_malloc true large allocation at %p for %ly\n", (void *)addr, size); |
9385eb3d | 3169 | #endif |
5b2abdfb | 3170 | SZONE_LOCK(szone); |
59e0d9fe | 3171 | if (addr == NULL) { |
5b2abdfb A |
3172 | SZONE_UNLOCK(szone); |
3173 | return NULL; | |
e9ce8d39 | 3174 | } |
e9ce8d39 | 3175 | #if DEBUG_MALLOC |
59e0d9fe A |
3176 | if (large_entry_for_pointer_no_lock(szone, addr)) { |
3177 | malloc_printf("freshly allocated is already in use: %p\n", addr); | |
5b2abdfb | 3178 | large_debug_print(szone); |
9385eb3d | 3179 | szone_sleep(); |
5b2abdfb | 3180 | } |
e9ce8d39 | 3181 | #endif |
9385eb3d A |
3182 | if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) { |
3183 | // density of hash table too high; grow table | |
3184 | // we do that under lock to avoid a race | |
224c7076 A |
3185 | large_entry_t *entries = large_entries_grow_no_lock(szone, &range_to_deallocate); |
3186 | if (entries == NULL) { | |
3187 | SZONE_UNLOCK(szone); | |
3188 | return NULL; | |
3189 | } | |
9385eb3d | 3190 | } |
59e0d9fe | 3191 | large_entry.address_and_num_pages = (uintptr_t)addr | num_pages; |
e9ce8d39 | 3192 | #if DEBUG_MALLOC |
59e0d9fe A |
3193 | if (large_entry_for_pointer_no_lock(szone, addr)) { |
3194 | malloc_printf("entry about to be added already in use: %p\n", addr); | |
9385eb3d A |
3195 | large_debug_print(szone); |
3196 | szone_sleep(); | |
3197 | } | |
e9ce8d39 | 3198 | #endif |
59e0d9fe | 3199 | large_entry_insert_no_lock(szone, large_entry); |
e9ce8d39 | 3200 | #if DEBUG_MALLOC |
9385eb3d | 3201 | if (!large_entry_for_pointer_no_lock(szone, (void *)addr)) { |
59e0d9fe | 3202 | malloc_printf("can't find entry just added\n"); |
9385eb3d A |
3203 | large_debug_print(szone); |
3204 | szone_sleep(); | |
3205 | } | |
e9ce8d39 | 3206 | #endif |
9385eb3d A |
3207 | szone->num_large_objects_in_use ++; |
3208 | szone->num_bytes_in_large_objects += size; | |
e9ce8d39 A |
3209 | } |
3210 | SZONE_UNLOCK(szone); | |
9385eb3d | 3211 | if (range_to_deallocate.size) { |
59e0d9fe | 3212 | deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0); // we deallocate outside the lock |
9385eb3d | 3213 | } |
e9ce8d39 A |
3214 | return (void *)addr; |
3215 | } | |
3216 | ||
9385eb3d | 3217 | static INLINE void |
59e0d9fe A |
3218 | free_large_or_huge(szone_t *szone, void *ptr) |
3219 | { | |
9385eb3d | 3220 | // We have established ptr is page-aligned and not tiny nor small |
e9ce8d39 A |
3221 | large_entry_t *entry; |
3222 | vm_range_t vm_range_to_deallocate; | |
3223 | huge_entry_t *huge; | |
59e0d9fe | 3224 | |
e9ce8d39 A |
3225 | SZONE_LOCK(szone); |
3226 | entry = large_entry_for_pointer_no_lock(szone, ptr); | |
3227 | if (entry) { | |
9385eb3d | 3228 | vm_range_to_deallocate = large_free_no_lock(szone, entry); |
e9ce8d39 | 3229 | #if DEBUG_MALLOC |
9385eb3d | 3230 | if (large_entry_for_pointer_no_lock(szone, ptr)) { |
59e0d9fe | 3231 | malloc_printf("*** just after freeing %p still in use num_large_entries=%d\n", ptr, szone->num_large_entries); |
9385eb3d A |
3232 | large_debug_print(szone); |
3233 | szone_sleep(); | |
3234 | } | |
e9ce8d39 A |
3235 | #endif |
3236 | } else if ((huge = huge_entry_for_pointer_no_lock(szone, ptr))) { | |
9385eb3d A |
3237 | vm_range_to_deallocate = *huge; |
3238 | *huge = szone->huge_entries[--szone->num_huge_entries]; // last entry fills that spot | |
59e0d9fe | 3239 | szone->num_bytes_in_huge_objects -= (size_t)vm_range_to_deallocate.size; |
e9ce8d39 A |
3240 | } else { |
3241 | #if DEBUG_MALLOC | |
9385eb3d | 3242 | large_debug_print(szone); |
e9ce8d39 | 3243 | #endif |
224c7076 | 3244 | szone_error(szone, "pointer being freed was not allocated", ptr, NULL); |
9385eb3d | 3245 | return; |
e9ce8d39 | 3246 | } |
e9ce8d39 | 3247 | SZONE_UNLOCK(szone); // we release the lock asap |
9385eb3d | 3248 | CHECK(szone, __PRETTY_FUNCTION__); |
e9ce8d39 A |
3249 | // we deallocate_pages, including guard pages |
3250 | if (vm_range_to_deallocate.address) { | |
9385eb3d | 3251 | #if DEBUG_MALLOC |
59e0d9fe A |
3252 | if (large_entry_for_pointer_no_lock(szone, (void *)vm_range_to_deallocate.address)) { |
3253 | malloc_printf("*** invariant broken: %p still in use num_large_entries=%d\n", vm_range_to_deallocate.address, szone->num_large_entries); | |
9385eb3d A |
3254 | large_debug_print(szone); |
3255 | szone_sleep(); | |
3256 | } | |
3257 | #endif | |
59e0d9fe | 3258 | deallocate_pages(szone, (void *)vm_range_to_deallocate.address, (size_t)vm_range_to_deallocate.size, 0); |
9385eb3d A |
3259 | } |
3260 | } | |
3261 | ||
3262 | static INLINE int | |
59e0d9fe A |
3263 | try_realloc_large_or_huge_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) |
3264 | { | |
9385eb3d | 3265 | vm_address_t addr = (vm_address_t)ptr + old_size; |
59e0d9fe A |
3266 | large_entry_t *large_entry, saved_entry; |
3267 | huge_entry_t *huge_entry, huge; | |
9385eb3d | 3268 | kern_return_t err; |
59e0d9fe | 3269 | |
e9ce8d39 | 3270 | #if DEBUG_MALLOC |
9385eb3d A |
3271 | if (old_size != ((old_size >> vm_page_shift) << vm_page_shift)) { |
3272 | malloc_printf("*** old_size is %d\n", old_size); | |
3273 | } | |
e9ce8d39 | 3274 | #endif |
9385eb3d | 3275 | SZONE_LOCK(szone); |
59e0d9fe | 3276 | large_entry = large_entry_for_pointer_no_lock(szone, (void *)addr); |
9385eb3d | 3277 | SZONE_UNLOCK(szone); |
59e0d9fe A |
3278 | if (large_entry) { |
3279 | return 0; // large pointer already exists in table - extension is not going to work | |
9385eb3d A |
3280 | } |
3281 | new_size = round_page(new_size); | |
9385eb3d A |
3282 | /* |
3283 | * Ask for allocation at a specific address, and mark as realloc | |
3284 | * to request coalescing with previous realloc'ed extensions. | |
3285 | */ | |
3286 | err = vm_allocate(mach_task_self(), &addr, new_size - old_size, VM_MAKE_TAG(VM_MEMORY_REALLOC)); | |
3287 | if (err != KERN_SUCCESS) { | |
3288 | return 0; | |
3289 | } | |
3290 | SZONE_LOCK(szone); | |
3291 | /* | |
3292 | * If the new size is still under the large/huge threshold, we can just | |
3293 | * extend the existing large block. | |
3294 | * | |
3295 | * Note: this logic is predicated on the understanding that an allocated | |
3296 | * block can never really shrink, so that the new size will always be | |
3297 | * larger than the old size. | |
59e0d9fe A |
3298 | * |
3299 | * Note: the use of 1 << vm_page_shift here has to do with the subdivision | |
3300 | * of the bits in the large_entry_t, and not the size of a page (directly). | |
9385eb3d A |
3301 | */ |
3302 | if ((new_size >> vm_page_shift) < (1 << vm_page_shift)) { | |
3303 | /* extend existing large entry */ | |
59e0d9fe A |
3304 | large_entry = large_entry_for_pointer_no_lock(szone, ptr); |
3305 | if (!large_entry) { | |
224c7076 | 3306 | szone_error(szone, "large entry reallocated is not properly in table", ptr, NULL); |
9385eb3d A |
3307 | /* XXX will cause fault on next reference to entry */ |
3308 | } | |
59e0d9fe | 3309 | large_entry->address_and_num_pages = (uintptr_t)ptr | (new_size >> vm_page_shift); |
9385eb3d A |
3310 | szone->num_bytes_in_large_objects += new_size - old_size; |
3311 | } else if ((old_size >> vm_page_shift) >= (1 << vm_page_shift)) { | |
3312 | /* extend existing huge entry */ | |
59e0d9fe | 3313 | huge_entry = huge_entry_for_pointer_no_lock(szone, ptr); |
9385eb3d | 3314 | if (!huge_entry) { |
224c7076 | 3315 | szone_error(szone, "huge entry reallocated is not properly in table", ptr, NULL); |
9385eb3d A |
3316 | /* XXX will cause fault on next reference to huge_entry */ |
3317 | } | |
3318 | huge_entry->size = new_size; | |
3319 | szone->num_bytes_in_huge_objects += new_size - old_size; | |
3320 | } else { | |
3321 | /* need to convert large entry to huge entry */ | |
9385eb3d A |
3322 | |
3323 | /* release large entry, note we still have the VM allocation */ | |
59e0d9fe A |
3324 | large_entry = large_entry_for_pointer_no_lock(szone, ptr); |
3325 | saved_entry = *large_entry; // in case we need to put it back | |
3326 | large_free_no_lock(szone, large_entry); | |
9385eb3d A |
3327 | |
3328 | /* and get a huge entry */ | |
3329 | huge.address = (vm_address_t)ptr; | |
3330 | huge.size = new_size; /* fix up size */ | |
3331 | SZONE_UNLOCK(szone); | |
3332 | if (huge_entry_append(szone, huge)) { | |
3333 | szone->num_bytes_in_huge_objects += new_size; | |
3334 | return 1; // success! | |
3335 | } | |
3336 | SZONE_LOCK(szone); | |
3337 | // we leak memory (the extra space appended) but data structures are correct | |
3338 | large_entry_insert_no_lock(szone, saved_entry); // this will reinsert the large entry | |
e9ce8d39 | 3339 | } |
9385eb3d A |
3340 | SZONE_UNLOCK(szone); // we release the lock asap |
3341 | return 1; | |
e9ce8d39 A |
3342 | } |
3343 | ||
9385eb3d A |
3344 | /********************* Zone call backs ************************/ |
3345 | ||
3346 | static void | |
59e0d9fe A |
3347 | szone_free(szone_t *szone, void *ptr) |
3348 | { | |
224c7076 A |
3349 | region_t *tiny_region; |
3350 | region_t *small_region; | |
59e0d9fe | 3351 | |
e9ce8d39 | 3352 | #if DEBUG_MALLOC |
59e0d9fe A |
3353 | if (LOG(szone, ptr)) |
3354 | malloc_printf("in szone_free with %p\n", ptr); | |
e9ce8d39 | 3355 | #endif |
59e0d9fe A |
3356 | if (!ptr) |
3357 | return; | |
3358 | /* | |
3359 | * Try to free to a tiny region. | |
3360 | */ | |
3361 | if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) { | |
224c7076 | 3362 | szone_error(szone, "Non-aligned pointer being freed", ptr, NULL); |
9385eb3d A |
3363 | return; |
3364 | } | |
59e0d9fe | 3365 | if ((tiny_region = tiny_region_for_ptr_no_lock(szone, ptr)) != NULL) { |
224c7076 A |
3366 | if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) { |
3367 | szone_error(szone, "Pointer to metadata being freed", ptr, NULL); | |
3368 | return; | |
3369 | } | |
9385eb3d A |
3370 | free_tiny(szone, ptr, tiny_region); |
3371 | return; | |
3372 | } | |
59e0d9fe A |
3373 | |
3374 | /* | |
3375 | * Try to free to a small region. | |
3376 | */ | |
3377 | if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) { | |
224c7076 | 3378 | szone_error(szone, "Non-aligned pointer being freed (2)", ptr, NULL); |
9385eb3d A |
3379 | return; |
3380 | } | |
59e0d9fe | 3381 | if ((small_region = small_region_for_ptr_no_lock(szone, ptr)) != NULL) { |
224c7076 A |
3382 | if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) { |
3383 | szone_error(szone, "Pointer to metadata being freed (2)", ptr, NULL); | |
3384 | return; | |
3385 | } | |
9385eb3d A |
3386 | free_small(szone, ptr, small_region); |
3387 | return; | |
3388 | } | |
59e0d9fe A |
3389 | |
3390 | /* check that it's a legal large/huge allocation */ | |
3391 | if ((uintptr_t)ptr & (vm_page_size - 1)) { | |
224c7076 | 3392 | szone_error(szone, "non-page-aligned, non-allocated pointer being freed", ptr, NULL); |
9385eb3d A |
3393 | return; |
3394 | } | |
3395 | free_large_or_huge(szone, ptr); | |
3396 | } | |
3397 | ||
3398 | static INLINE void * | |
59e0d9fe A |
3399 | szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested) |
3400 | { | |
9385eb3d | 3401 | void *ptr; |
59e0d9fe | 3402 | msize_t msize; |
59e0d9fe | 3403 | |
9385eb3d A |
3404 | if (size <= 31*TINY_QUANTUM) { |
3405 | // think tiny | |
59e0d9fe A |
3406 | msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); |
3407 | if (!msize) | |
3408 | msize = 1; | |
9385eb3d A |
3409 | ptr = tiny_malloc_should_clear(szone, msize, cleared_requested); |
3410 | } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (size < LARGE_THRESHOLD)) { | |
3411 | // think small | |
59e0d9fe | 3412 | msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); |
9385eb3d A |
3413 | if (! msize) msize = 1; |
3414 | ptr = small_malloc_should_clear(szone, msize, cleared_requested); | |
e9ce8d39 | 3415 | } else { |
59e0d9fe | 3416 | // large or huge |
224c7076 | 3417 | size_t num_pages = round_page(size) >> vm_page_shift; |
20d7cd4c A |
3418 | if (num_pages == 0) /* Overflowed */ |
3419 | ptr = 0; | |
3420 | else | |
224c7076 | 3421 | ptr = large_and_huge_malloc(szone, num_pages); |
e9ce8d39 | 3422 | } |
9385eb3d | 3423 | #if DEBUG_MALLOC |
59e0d9fe A |
3424 | if (LOG(szone, ptr)) |
3425 | malloc_printf("szone_malloc returned %p\n", ptr); | |
9385eb3d | 3426 | #endif |
59e0d9fe A |
3427 | /* |
3428 | * If requested, scribble on allocated memory. | |
3429 | */ | |
3430 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && ptr && !cleared_requested && size) | |
3431 | memset(ptr, 0xaa, size); | |
3432 | ||
e9ce8d39 A |
3433 | return ptr; |
3434 | } | |
3435 | ||
9385eb3d A |
3436 | static void * |
3437 | szone_malloc(szone_t *szone, size_t size) { | |
59e0d9fe | 3438 | return szone_malloc_should_clear(szone, size, 0); |
e9ce8d39 A |
3439 | } |
3440 | ||
9385eb3d | 3441 | static void * |
59e0d9fe A |
3442 | szone_calloc(szone_t *szone, size_t num_items, size_t size) |
3443 | { | |
224c7076 A |
3444 | size_t total_bytes = num_items * size; |
3445 | if ((num_items > 1) && (size != 0) && ((total_bytes / size) != num_items)) | |
3446 | return NULL; | |
3447 | return szone_malloc_should_clear(szone, total_bytes, 1); | |
e9ce8d39 A |
3448 | } |
3449 | ||
9385eb3d | 3450 | static void * |
59e0d9fe A |
3451 | szone_valloc(szone_t *szone, size_t size) |
3452 | { | |
e9ce8d39 | 3453 | void *ptr; |
224c7076 | 3454 | size_t num_pages; |
59e0d9fe | 3455 | |
e9ce8d39 | 3456 | num_pages = round_page(size) >> vm_page_shift; |
5b2abdfb | 3457 | ptr = large_and_huge_malloc(szone, num_pages); |
9385eb3d | 3458 | #if DEBUG_MALLOC |
59e0d9fe A |
3459 | if (LOG(szone, ptr)) |
3460 | malloc_printf("szone_valloc returned %p\n", ptr); | |
9385eb3d | 3461 | #endif |
e9ce8d39 A |
3462 | return ptr; |
3463 | } | |
3464 | ||
9385eb3d | 3465 | static size_t |
59e0d9fe A |
3466 | szone_size(szone_t *szone, const void *ptr) |
3467 | { | |
e9ce8d39 | 3468 | size_t size = 0; |
59e0d9fe A |
3469 | boolean_t is_free; |
3470 | msize_t msize, msize_and_free; | |
e9ce8d39 A |
3471 | large_entry_t *entry; |
3472 | huge_entry_t *huge; | |
59e0d9fe A |
3473 | |
3474 | if (!ptr) | |
3475 | return 0; | |
9385eb3d A |
3476 | #if DEBUG_MALLOC |
3477 | if (LOG(szone, ptr)) { | |
59e0d9fe A |
3478 | malloc_printf("in szone_size for %p (szone=%p)\n", ptr, szone); |
3479 | } | |
3480 | #endif | |
3481 | ||
3482 | /* | |
3483 | * Look for it in a tiny region. | |
3484 | */ | |
3485 | if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) | |
3486 | return 0; | |
3487 | if (tiny_region_for_ptr_no_lock(szone, ptr)) { | |
224c7076 A |
3488 | if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) |
3489 | return 0; | |
59e0d9fe A |
3490 | msize = get_tiny_meta_header(ptr, &is_free); |
3491 | return (is_free) ? 0 : TINY_BYTES_FOR_MSIZE(msize); | |
3492 | } | |
3493 | ||
3494 | /* | |
3495 | * Look for it in a small region. | |
3496 | */ | |
3497 | if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) | |
9385eb3d | 3498 | return 0; |
59e0d9fe | 3499 | if (small_region_for_ptr_no_lock(szone, ptr)) { |
224c7076 A |
3500 | if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) |
3501 | return 0; | |
59e0d9fe A |
3502 | msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); |
3503 | return (msize_and_free & SMALL_IS_FREE) ? 0 : SMALL_BYTES_FOR_MSIZE(msize_and_free); | |
e9ce8d39 | 3504 | } |
59e0d9fe A |
3505 | |
3506 | /* | |
3507 | * If not page-aligned, it cannot have come from a large or huge allocation. | |
3508 | */ | |
3509 | if ((uintptr_t)ptr & (vm_page_size - 1)) | |
3510 | return(0); | |
3511 | ||
3512 | /* | |
3513 | * Look for it in a large or huge entry. | |
3514 | */ | |
e9ce8d39 A |
3515 | SZONE_LOCK(szone); |
3516 | entry = large_entry_for_pointer_no_lock(szone, ptr); | |
3517 | if (entry) { | |
9385eb3d | 3518 | size = LARGE_ENTRY_SIZE(*entry); |
e9ce8d39 | 3519 | } else if ((huge = huge_entry_for_pointer_no_lock(szone, ptr))) { |
9385eb3d | 3520 | size = huge->size; |
e9ce8d39 A |
3521 | } |
3522 | SZONE_UNLOCK(szone); | |
9385eb3d A |
3523 | #if DEBUG_MALLOC |
3524 | if (LOG(szone, ptr)) { | |
3525 | malloc_printf("szone_size for %p returned %d\n", ptr, (unsigned)size); | |
3526 | } | |
3527 | #endif | |
e9ce8d39 A |
3528 | return size; |
3529 | } | |
3530 | ||
9385eb3d | 3531 | static void * |
59e0d9fe A |
3532 | szone_realloc(szone_t *szone, void *ptr, size_t new_size) |
3533 | { | |
3534 | size_t old_size; | |
9385eb3d | 3535 | void *new_ptr; |
59e0d9fe | 3536 | |
e9ce8d39 | 3537 | #if DEBUG_MALLOC |
9385eb3d | 3538 | if (LOG(szone, ptr)) { |
59e0d9fe | 3539 | malloc_printf("in szone_realloc for %p, %d\n", ptr, (unsigned)new_size); |
e9ce8d39 A |
3540 | } |
3541 | #endif | |
9385eb3d A |
3542 | if (!ptr) { |
3543 | ptr = szone_malloc(szone, new_size); | |
9385eb3d A |
3544 | return ptr; |
3545 | } | |
3546 | old_size = szone_size(szone, ptr); | |
3547 | if (!old_size) { | |
224c7076 | 3548 | szone_error(szone, "pointer being reallocated was not allocated", ptr, NULL); |
9385eb3d | 3549 | return NULL; |
e9ce8d39 | 3550 | } |
9385eb3d | 3551 | /* we never shrink an allocation */ |
59e0d9fe A |
3552 | if (old_size >= new_size) |
3553 | return ptr; | |
3554 | ||
3555 | /* | |
3556 | * If the old and new sizes both suit the tiny allocator, try to reallocate in-place. | |
3557 | */ | |
9385eb3d | 3558 | if ((new_size + TINY_QUANTUM - 1) <= 31 * TINY_QUANTUM) { |
9385eb3d | 3559 | if (try_realloc_tiny_in_place(szone, ptr, old_size, new_size)) { |
9385eb3d A |
3560 | return ptr; |
3561 | } | |
59e0d9fe A |
3562 | |
3563 | /* | |
3564 | * If the old and new sizes both suit the small allocator, and we're not protecting the | |
3565 | * small allocations, try to reallocate in-place. | |
3566 | */ | |
3567 | } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && | |
3568 | ((new_size + SMALL_QUANTUM - 1) < LARGE_THRESHOLD) && | |
3569 | (old_size > 31 * TINY_QUANTUM)) { | |
9385eb3d | 3570 | if (try_realloc_small_in_place(szone, ptr, old_size, new_size)) { |
9385eb3d A |
3571 | return ptr; |
3572 | } | |
59e0d9fe A |
3573 | |
3574 | /* | |
3575 | * If the allocation's a large or huge allocation, try to reallocate in-place there. | |
3576 | */ | |
9385eb3d A |
3577 | } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (old_size > LARGE_THRESHOLD)) { |
3578 | if (try_realloc_large_or_huge_in_place(szone, ptr, old_size, new_size)) { | |
3579 | return ptr; | |
3580 | } | |
3581 | } | |
59e0d9fe A |
3582 | |
3583 | /* | |
3584 | * Can't reallocate in place for whatever reason; allocate a new buffer and copy. | |
3585 | */ | |
9385eb3d | 3586 | new_ptr = szone_malloc(szone, new_size); |
59e0d9fe A |
3587 | if (new_ptr == NULL) |
3588 | return NULL; | |
3589 | ||
3590 | /* | |
3591 | * If the allocation's large enough, try to copy using VM. If that fails, or | |
3592 | * if it's too small, just copy by hand. | |
3593 | */ | |
3594 | if ((old_size < VM_COPY_THRESHOLD) || | |
3595 | vm_copy(mach_task_self(), (vm_address_t)ptr, old_size, (vm_address_t)new_ptr)) | |
9385eb3d | 3596 | memcpy(new_ptr, ptr, old_size); |
9385eb3d | 3597 | szone_free(szone, ptr); |
59e0d9fe | 3598 | |
e9ce8d39 | 3599 | #if DEBUG_MALLOC |
9385eb3d A |
3600 | if (LOG(szone, ptr)) { |
3601 | malloc_printf("szone_realloc returned %p for %d\n", new_ptr, (unsigned)new_size); | |
e9ce8d39 | 3602 | } |
e9ce8d39 | 3603 | #endif |
9385eb3d | 3604 | return new_ptr; |
e9ce8d39 A |
3605 | } |
3606 | ||
224c7076 A |
3607 | // given a size, returns the number of pointers allocated capable of holding |
3608 | // that size, up to the limit specified by the 'count' argument. These pointers | |
3609 | // are stored in the 'results' array, which must be allocated by the caller. | |
3610 | // may return zero, since this function is only a best attempt at allocating | |
3611 | // the pointers. clients should be prepared to call malloc for any additional | |
3612 | // blocks they need. | |
59e0d9fe A |
3613 | static unsigned |
3614 | szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count) | |
3615 | { | |
224c7076 A |
3616 | msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); |
3617 | unsigned found = 0; | |
59e0d9fe | 3618 | |
224c7076 | 3619 | // only bother implementing this for tiny |
59e0d9fe | 3620 | if (size > 31*TINY_QUANTUM) |
224c7076 A |
3621 | return 0; |
3622 | // make sure to return objects at least one quantum in size | |
59e0d9fe | 3623 | if (!msize) |
224c7076 A |
3624 | msize = 1; |
3625 | ||
9385eb3d | 3626 | CHECK(szone, __PRETTY_FUNCTION__); |
224c7076 A |
3627 | |
3628 | // We must lock the zone now, since tiny_malloc_from_free_list assumes that | |
3629 | // the caller has done so. | |
3630 | SZONE_LOCK(szone); | |
3631 | ||
3632 | // with the zone locked, allocate objects from the free list until all | |
3633 | // sufficiently large objects have been exhausted, or we have met our quota | |
3634 | // of objects to allocate. | |
3635 | while (found < count) { | |
3636 | void *ptr = tiny_malloc_from_free_list(szone, msize); | |
3637 | if (!ptr) | |
3638 | break; | |
3639 | ||
3640 | *results++ = ptr; | |
3641 | found++; | |
9385eb3d | 3642 | } |
5b2abdfb | 3643 | SZONE_UNLOCK(szone); |
9385eb3d | 3644 | return found; |
5b2abdfb A |
3645 | } |
3646 | ||
59e0d9fe A |
3647 | static void |
3648 | szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count) | |
3649 | { | |
3650 | unsigned cc = 0; | |
3651 | void *ptr; | |
224c7076 | 3652 | region_t *tiny_region; |
59e0d9fe A |
3653 | boolean_t is_free; |
3654 | msize_t msize; | |
3655 | ||
9385eb3d A |
3656 | // frees all the pointers in to_be_freed |
3657 | // note that to_be_freed may be overwritten during the process | |
59e0d9fe A |
3658 | if (!count) |
3659 | return; | |
9385eb3d A |
3660 | CHECK(szone, __PRETTY_FUNCTION__); |
3661 | SZONE_LOCK(szone); | |
3662 | while (cc < count) { | |
59e0d9fe | 3663 | ptr = to_be_freed[cc]; |
224c7076 A |
3664 | if (ptr) { |
3665 | /* XXX this really slows us down */ | |
3666 | tiny_region = tiny_region_for_ptr_no_lock(szone, ptr); | |
3667 | if (tiny_region) { | |
3668 | // this is a tiny pointer | |
3669 | if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) | |
3670 | break; // pointer to metadata; let the standard free deal with it | |
3671 | msize = get_tiny_meta_header(ptr, &is_free); | |
3672 | if (is_free) | |
3673 | break; // a double free; let the standard free deal with it | |
3674 | tiny_free_no_lock(szone, tiny_region, ptr, msize); | |
3675 | to_be_freed[cc] = NULL; | |
3676 | } | |
9385eb3d A |
3677 | } |
3678 | cc++; | |
e9ce8d39 | 3679 | } |
9385eb3d A |
3680 | SZONE_UNLOCK(szone); |
3681 | CHECK(szone, __PRETTY_FUNCTION__); | |
3682 | while (count--) { | |
59e0d9fe A |
3683 | ptr = to_be_freed[count]; |
3684 | if (ptr) | |
3685 | szone_free(szone, ptr); | |
e9ce8d39 | 3686 | } |
e9ce8d39 A |
3687 | } |
3688 | ||
9385eb3d | 3689 | static void |
59e0d9fe A |
3690 | szone_destroy(szone_t *szone) |
3691 | { | |
224c7076 | 3692 | size_t index; |
59e0d9fe A |
3693 | large_entry_t *large; |
3694 | vm_range_t range_to_deallocate; | |
3695 | huge_entry_t *huge; | |
59e0d9fe A |
3696 | |
3697 | /* destroy large entries */ | |
e9ce8d39 A |
3698 | index = szone->num_large_entries; |
3699 | while (index--) { | |
59e0d9fe A |
3700 | large = szone->large_entries + index; |
3701 | if (!LARGE_ENTRY_IS_EMPTY(*large)) { | |
9385eb3d | 3702 | // we deallocate_pages, including guard pages |
59e0d9fe | 3703 | deallocate_pages(szone, (void *)LARGE_ENTRY_ADDRESS(*large), LARGE_ENTRY_SIZE(*large), szone->debug_flags); |
9385eb3d A |
3704 | } |
3705 | } | |
3706 | if (szone->num_large_entries * sizeof(large_entry_t) >= LARGE_THRESHOLD) { | |
59e0d9fe A |
3707 | // we do not free in the small chunk case |
3708 | large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate); | |
3709 | if (range_to_deallocate.size) | |
3710 | deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0); | |
9385eb3d | 3711 | } |
59e0d9fe A |
3712 | |
3713 | /* destroy huge entries */ | |
e9ce8d39 A |
3714 | index = szone->num_huge_entries; |
3715 | while (index--) { | |
59e0d9fe A |
3716 | huge = szone->huge_entries + index; |
3717 | deallocate_pages(szone, (void *)huge->address, huge->size, szone->debug_flags); | |
9385eb3d | 3718 | } |
59e0d9fe A |
3719 | |
3720 | /* destroy tiny regions */ | |
224c7076 A |
3721 | for (index = 0; index < szone->num_tiny_regions_allocated; ++index) |
3722 | if (szone->tiny_regions[index]) | |
3723 | deallocate_pages(szone, szone->tiny_regions[index], TINY_REGION_SIZE, 0); | |
3724 | ||
3725 | /* destroy small regions */ | |
3726 | for (index = 0; index < szone->num_small_regions_allocated; ++index) | |
3727 | if (szone->small_regions[index]) | |
3728 | deallocate_pages(szone, szone->small_regions[index], SMALL_REGION_SIZE, 0); | |
3729 | ||
3730 | /* destroy region hash rings, if any */ | |
3731 | if (szone->tiny_regions != szone->initial_tiny_regions) { | |
3732 | size_t size = round_page(szone->num_tiny_regions_allocated * sizeof(region_t)); | |
3733 | deallocate_pages(szone, szone->tiny_regions, size, 0); | |
3734 | } | |
3735 | if (szone->small_regions != szone->initial_small_regions) { | |
3736 | size_t size = round_page(szone->num_small_regions_allocated * sizeof(region_t)); | |
3737 | deallocate_pages(szone, szone->small_regions, size, 0); | |
3738 | } | |
3739 | /* Now destroy the separate szone region */ | |
3740 | deallocate_pages(szone, (void *)szone, SZONE_PAGED_SIZE, SCALABLE_MALLOC_ADD_GUARD_PAGES); | |
e9ce8d39 A |
3741 | } |
3742 | ||
9385eb3d | 3743 | static size_t |
59e0d9fe A |
3744 | szone_good_size(szone_t *szone, size_t size) |
3745 | { | |
3746 | msize_t msize; | |
3747 | unsigned num_pages; | |
3748 | ||
9385eb3d A |
3749 | if (size <= 31 * TINY_QUANTUM) { |
3750 | // think tiny | |
59e0d9fe | 3751 | msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); |
9385eb3d | 3752 | if (! msize) msize = 1; |
3d9156a7 | 3753 | return TINY_BYTES_FOR_MSIZE(msize); |
9385eb3d | 3754 | } |
e9ce8d39 | 3755 | if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (size < LARGE_THRESHOLD)) { |
9385eb3d | 3756 | // think small |
59e0d9fe | 3757 | msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); |
9385eb3d | 3758 | if (! msize) msize = 1; |
59e0d9fe | 3759 | return SMALL_BYTES_FOR_MSIZE(msize); |
e9ce8d39 | 3760 | } else { |
9385eb3d | 3761 | num_pages = round_page(size) >> vm_page_shift; |
59e0d9fe A |
3762 | if (!num_pages) |
3763 | num_pages = 1; // minimal allocation size for this | |
9385eb3d | 3764 | return num_pages << vm_page_shift; |
e9ce8d39 A |
3765 | } |
3766 | } | |
3767 | ||
3768 | unsigned szone_check_counter = 0; | |
3769 | unsigned szone_check_start = 0; | |
3770 | unsigned szone_check_modulo = 1; | |
3771 | ||
9385eb3d | 3772 | static boolean_t |
59e0d9fe A |
3773 | szone_check_all(szone_t *szone, const char *function) |
3774 | { | |
224c7076 A |
3775 | size_t index; |
3776 | ||
e9ce8d39 | 3777 | SZONE_LOCK(szone); |
9385eb3d | 3778 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); |
59e0d9fe A |
3779 | |
3780 | /* check tiny regions - chould check region count */ | |
224c7076 A |
3781 | for (index = 0; index < szone->num_tiny_regions_allocated; ++index) { |
3782 | region_t tiny = szone->tiny_regions[index]; | |
3783 | if (tiny && !tiny_check_region(szone, tiny)) { | |
3784 | SZONE_UNLOCK(szone); | |
3785 | szone->debug_flags &= ~ CHECK_REGIONS; | |
3786 | szone_error(szone, "check: tiny region incorrect", NULL, | |
3787 | "*** tiny region %d incorrect szone_check_all(%s) counter=%d\n", | |
3788 | index, function, szone_check_counter); | |
3789 | return 0; | |
3790 | } | |
3791 | } | |
3792 | /* check tiny free lists */ | |
3793 | for (index = 0; index < NUM_TINY_SLOTS; ++index) { | |
3794 | if (!tiny_free_list_check(szone, index)) { | |
3795 | SZONE_UNLOCK(szone); | |
3796 | szone->debug_flags &= ~ CHECK_REGIONS; | |
3797 | szone_error(szone, "check: tiny free list incorrect", NULL, | |
3798 | "*** tiny free list incorrect (slot=%d) szone_check_all(%s) counter=%d\n", | |
3799 | index, function, szone_check_counter); | |
3800 | return 0; | |
3801 | } | |
9385eb3d | 3802 | } |
59e0d9fe | 3803 | /* check small regions - could check region count */ |
224c7076 A |
3804 | for (index = 0; index < szone->num_small_regions_allocated; ++index) { |
3805 | region_t small = szone->small_regions[index]; | |
3806 | if (small && !szone_check_small_region(szone, small)) { | |
3807 | SZONE_UNLOCK(szone); | |
3808 | szone->debug_flags &= ~ CHECK_REGIONS; | |
3809 | szone_error(szone, "check: small region incorrect", NULL, | |
3810 | "*** small region %d incorrect szone_check_all(%s) counter=%d\n", | |
3811 | index, function, szone_check_counter); | |
3812 | return 0; | |
3813 | } | |
3814 | } | |
3815 | /* check small free lists */ | |
3816 | for (index = 0; index < NUM_SMALL_SLOTS; ++index) { | |
3817 | if (!small_free_list_check(szone, index)) { | |
3818 | SZONE_UNLOCK(szone); | |
3819 | szone->debug_flags &= ~ CHECK_REGIONS; | |
3820 | szone_error(szone, "check: small free list incorrect", NULL, | |
3821 | "*** small free list incorrect (grain=%d) szone_check_all(%s) counter=%d\n", | |
3822 | index, function, szone_check_counter); | |
3823 | return 0; | |
3824 | } | |
e9ce8d39 A |
3825 | } |
3826 | SZONE_UNLOCK(szone); | |
3827 | return 1; | |
3828 | } | |
3829 | ||
9385eb3d | 3830 | static boolean_t |
59e0d9fe A |
3831 | szone_check(szone_t *szone) |
3832 | { | |
59e0d9fe | 3833 | if ((++szone_check_counter % 10000) == 0) |
224c7076 | 3834 | _malloc_printf(ASL_LEVEL_NOTICE, "at szone_check counter=%d\n", szone_check_counter); |
59e0d9fe A |
3835 | if (szone_check_counter < szone_check_start) |
3836 | return 1; | |
3837 | if (szone_check_counter % szone_check_modulo) | |
3838 | return 1; | |
e9ce8d39 A |
3839 | return szone_check_all(szone, ""); |
3840 | } | |
3841 | ||
9385eb3d | 3842 | static kern_return_t |
59e0d9fe A |
3843 | szone_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder) |
3844 | { | |
e9ce8d39 A |
3845 | szone_t *szone; |
3846 | kern_return_t err; | |
59e0d9fe | 3847 | |
e9ce8d39 | 3848 | if (!reader) reader = _szone_default_reader; |
e9ce8d39 A |
3849 | err = reader(task, zone_address, sizeof(szone_t), (void **)&szone); |
3850 | if (err) return err; | |
224c7076 | 3851 | err = tiny_in_use_enumerator(task, context, type_mask, szone, reader, recorder); |
e9ce8d39 | 3852 | if (err) return err; |
224c7076 | 3853 | err = small_in_use_enumerator(task, context, type_mask, szone, reader, recorder); |
e9ce8d39 | 3854 | if (err) return err; |
9385eb3d A |
3855 | err = large_in_use_enumerator(task, context, type_mask, |
3856 | (vm_address_t)szone->large_entries, szone->num_large_entries, reader, | |
3857 | recorder); | |
3858 | if (err) return err; | |
9385eb3d A |
3859 | err = huge_in_use_enumerator(task, context, type_mask, |
3860 | (vm_address_t)szone->huge_entries, szone->num_huge_entries, reader, | |
3861 | recorder); | |
e9ce8d39 A |
3862 | return err; |
3863 | } | |
3864 | ||
9385eb3d A |
3865 | // Following method is deprecated: use scalable_zone_statistics instead |
3866 | void | |
59e0d9fe A |
3867 | scalable_zone_info(malloc_zone_t *zone, unsigned *info_to_fill, unsigned count) |
3868 | { | |
9385eb3d A |
3869 | szone_t *szone = (void *)zone; |
3870 | unsigned info[13]; | |
59e0d9fe | 3871 | |
9385eb3d A |
3872 | // We do not lock to facilitate debug |
3873 | info[4] = szone->num_tiny_objects; | |
3874 | info[5] = szone->num_bytes_in_tiny_objects; | |
3875 | info[6] = szone->num_small_objects; | |
3876 | info[7] = szone->num_bytes_in_small_objects; | |
3877 | info[8] = szone->num_large_objects_in_use; | |
3878 | info[9] = szone->num_bytes_in_large_objects; | |
3879 | info[10] = szone->num_huge_entries; | |
3880 | info[11] = szone->num_bytes_in_huge_objects; | |
3881 | info[12] = szone->debug_flags; | |
3882 | info[0] = info[4] + info[6] + info[8] + info[10]; | |
3883 | info[1] = info[5] + info[7] + info[9] + info[11]; | |
3884 | info[3] = szone->num_tiny_regions * TINY_REGION_SIZE + szone->num_small_regions * SMALL_REGION_SIZE + info[9] + info[11]; | |
3885 | info[2] = info[3] - szone->tiny_bytes_free_at_end - szone->small_bytes_free_at_end; | |
3886 | memcpy(info_to_fill, info, sizeof(unsigned)*count); | |
e9ce8d39 A |
3887 | } |
3888 | ||
9385eb3d | 3889 | static void |
59e0d9fe A |
3890 | szone_print(szone_t *szone, boolean_t verbose) |
3891 | { | |
224c7076 A |
3892 | unsigned info[13]; |
3893 | size_t index; | |
3894 | region_t region; | |
3895 | ||
3896 | SZONE_LOCK(szone); | |
3897 | scalable_zone_info((void *)szone, info, 13); | |
3898 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, | |
3899 | "Scalable zone %p: inUse=%d(%y) touched=%y allocated=%y flags=%d\n", | |
3900 | szone, info[0], info[1], info[2], info[3], info[12]); | |
3901 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, | |
3902 | "\ttiny=%d(%y) small=%d(%y) large=%d(%y) huge=%d(%y)\n", | |
3903 | info[4], info[5], info[6], info[7], info[8], info[9], info[10], info[11]); | |
3904 | // tiny | |
3905 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, | |
3906 | "%d tiny regions:\n", szone->num_tiny_regions); | |
3907 | for (index = 0; index < szone->num_tiny_regions_allocated; ++index) { | |
3908 | region = szone->tiny_regions[index]; | |
3909 | if (region) | |
3910 | print_tiny_region(verbose, region, (region == szone->last_tiny_region) ? | |
3911 | szone->tiny_bytes_free_at_end : 0); | |
3912 | } | |
3913 | if (verbose) | |
3914 | print_tiny_free_list(szone); | |
3915 | // small | |
3916 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, | |
3917 | "%d small regions:\n", szone->num_small_regions); | |
3918 | for (index = 0; index < szone->num_small_regions_allocated; ++index) { | |
3919 | region = szone->small_regions[index]; | |
3920 | if (region) | |
3921 | print_small_region(szone, verbose, region, | |
3922 | (region == szone->last_small_region) ? | |
3923 | szone->small_bytes_free_at_end : 0); | |
3924 | } | |
3925 | if (verbose) | |
3926 | print_small_free_list(szone); | |
3927 | SZONE_UNLOCK(szone); | |
9385eb3d A |
3928 | } |
3929 | ||
3930 | static void | |
59e0d9fe A |
3931 | szone_log(malloc_zone_t *zone, void *log_address) |
3932 | { | |
3933 | szone_t *szone = (szone_t *)zone; | |
3934 | ||
e9ce8d39 A |
3935 | szone->log_address = log_address; |
3936 | } | |
3937 | ||
9385eb3d | 3938 | static void |
59e0d9fe A |
3939 | szone_force_lock(szone_t *szone) |
3940 | { | |
e9ce8d39 A |
3941 | SZONE_LOCK(szone); |
3942 | } | |
3943 | ||
9385eb3d | 3944 | static void |
59e0d9fe A |
3945 | szone_force_unlock(szone_t *szone) |
3946 | { | |
e9ce8d39 A |
3947 | SZONE_UNLOCK(szone); |
3948 | } | |
3949 | ||
9385eb3d | 3950 | boolean_t |
59e0d9fe A |
3951 | scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone) |
3952 | { | |
3953 | szone_t *szone = (szone_t *)zone; | |
3954 | ||
9385eb3d A |
3955 | switch (subzone) { |
3956 | case 0: | |
3957 | stats->blocks_in_use = szone->num_tiny_objects; | |
3958 | stats->size_in_use = szone->num_bytes_in_tiny_objects; | |
3959 | stats->size_allocated = szone->num_tiny_regions * TINY_REGION_SIZE; | |
3960 | stats->max_size_in_use = stats->size_allocated - szone->tiny_bytes_free_at_end; | |
3961 | return 1; | |
3962 | case 1: | |
3963 | stats->blocks_in_use = szone->num_small_objects; | |
3964 | stats->size_in_use = szone->num_bytes_in_small_objects; | |
3965 | stats->size_allocated = szone->num_small_regions * SMALL_REGION_SIZE; | |
3966 | stats->max_size_in_use = stats->size_allocated - szone->small_bytes_free_at_end; | |
3967 | return 1; | |
3968 | case 2: | |
3969 | stats->blocks_in_use = szone->num_large_objects_in_use; | |
3970 | stats->size_in_use = szone->num_bytes_in_large_objects; | |
3971 | stats->max_size_in_use = stats->size_allocated = stats->size_in_use; | |
3972 | return 1; | |
3973 | case 3: | |
3974 | stats->blocks_in_use = szone->num_huge_entries; | |
3975 | stats->size_in_use = szone->num_bytes_in_huge_objects; | |
3976 | stats->max_size_in_use = stats->size_allocated = stats->size_in_use; | |
3977 | return 1; | |
3978 | } | |
3979 | return 0; | |
3980 | } | |
3981 | ||
3982 | static void | |
59e0d9fe A |
3983 | szone_statistics(szone_t *szone, malloc_statistics_t *stats) |
3984 | { | |
3985 | size_t big_and_huge; | |
3986 | ||
3987 | stats->blocks_in_use = | |
3988 | szone->num_tiny_objects + | |
3989 | szone->num_small_objects + | |
3990 | szone->num_large_objects_in_use + | |
3991 | szone->num_huge_entries; | |
3992 | big_and_huge = szone->num_bytes_in_large_objects + szone->num_bytes_in_huge_objects; | |
9385eb3d | 3993 | stats->size_in_use = szone->num_bytes_in_tiny_objects + szone->num_bytes_in_small_objects + big_and_huge; |
59e0d9fe A |
3994 | stats->max_size_in_use = stats->size_allocated = |
3995 | szone->num_tiny_regions * TINY_REGION_SIZE + | |
3996 | szone->num_small_regions * SMALL_REGION_SIZE + | |
3997 | big_and_huge ; | |
3998 | ||
9385eb3d A |
3999 | // Now we account for the untouched areas |
4000 | stats->max_size_in_use -= szone->tiny_bytes_free_at_end; | |
4001 | stats->max_size_in_use -= szone->small_bytes_free_at_end; | |
4002 | } | |
e9ce8d39 | 4003 | |
9385eb3d A |
4004 | static const struct malloc_introspection_t szone_introspect = { |
4005 | (void *)szone_ptr_in_use_enumerator, | |
4006 | (void *)szone_good_size, | |
4007 | (void *)szone_check, | |
4008 | (void *)szone_print, | |
4009 | szone_log, | |
4010 | (void *)szone_force_lock, | |
4011 | (void *)szone_force_unlock, | |
4012 | (void *)szone_statistics | |
4013 | }; // marked as const to spare the DATA section | |
4014 | ||
4015 | malloc_zone_t * | |
59e0d9fe A |
4016 | create_scalable_zone(size_t initial_size, unsigned debug_flags) |
4017 | { | |
e9ce8d39 | 4018 | szone_t *szone; |
59e0d9fe A |
4019 | |
4020 | /* | |
4021 | * Sanity-check our build-time assumptions about the size of a page. | |
4022 | * Since we have sized various things assuming the default page size, | |
4023 | * attempting to determine it dynamically is not useful. | |
4024 | */ | |
4025 | if ((vm_page_size != _vm_page_size) || (vm_page_shift != _vm_page_shift)) { | |
4026 | malloc_printf("*** FATAL ERROR - machine page size does not match our assumptions.\n"); | |
9385eb3d A |
4027 | exit(-1); |
4028 | } | |
59e0d9fe | 4029 | |
224c7076 A |
4030 | /* get memory for the zone, which is now separate from any region. |
4031 | add guard pages to prevent walking from any other vm allocations | |
4032 | to here and overwriting the function pointers in basic_zone. */ | |
4033 | szone = allocate_pages(NULL, SZONE_PAGED_SIZE, 0, | |
4034 | SCALABLE_MALLOC_ADD_GUARD_PAGES, | |
4035 | VM_MEMORY_MALLOC); | |
59e0d9fe | 4036 | if (!szone) |
224c7076 | 4037 | return NULL; |
59e0d9fe | 4038 | /* set up the szone structure */ |
9385eb3d A |
4039 | szone->tiny_regions = szone->initial_tiny_regions; |
4040 | szone->small_regions = szone->initial_small_regions; | |
224c7076 A |
4041 | szone->num_tiny_regions_allocated = INITIAL_NUM_REGIONS; |
4042 | szone->num_small_regions_allocated = INITIAL_NUM_REGIONS; | |
9385eb3d | 4043 | szone->basic_zone.version = 3; |
e9ce8d39 A |
4044 | szone->basic_zone.size = (void *)szone_size; |
4045 | szone->basic_zone.malloc = (void *)szone_malloc; | |
4046 | szone->basic_zone.calloc = (void *)szone_calloc; | |
4047 | szone->basic_zone.valloc = (void *)szone_valloc; | |
4048 | szone->basic_zone.free = (void *)szone_free; | |
4049 | szone->basic_zone.realloc = (void *)szone_realloc; | |
4050 | szone->basic_zone.destroy = (void *)szone_destroy; | |
9385eb3d A |
4051 | szone->basic_zone.batch_malloc = (void *)szone_batch_malloc; |
4052 | szone->basic_zone.batch_free = (void *)szone_batch_free; | |
4053 | szone->basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect; | |
59e0d9fe | 4054 | szone->debug_flags = debug_flags; |
e9ce8d39 | 4055 | LOCK_INIT(szone->lock); |
224c7076 | 4056 | |
9385eb3d A |
4057 | #if 0 |
4058 | #warning CHECK_REGIONS enabled | |
4059 | debug_flags |= CHECK_REGIONS; | |
4060 | #endif | |
4061 | #if 0 | |
4062 | #warning LOG enabled | |
4063 | szone->log_address = ~0; | |
4064 | #endif | |
e9ce8d39 A |
4065 | CHECK(szone, __PRETTY_FUNCTION__); |
4066 | return (malloc_zone_t *)szone; | |
4067 | } | |
4068 | ||
5b2abdfb A |
4069 | /********* Support code for emacs unexec ************/ |
4070 | ||
4071 | /* History of freezedry version numbers: | |
4072 | * | |
4073 | * 1) Old malloc (before the scalable malloc implementation in this file | |
4074 | * existed). | |
4075 | * 2) Original freezedrying code for scalable malloc. This code was apparently | |
4076 | * based on the old freezedrying code and was fundamentally flawed in its | |
4077 | * assumption that tracking allocated memory regions was adequate to fake | |
4078 | * operations on freezedried memory. This doesn't work, since scalable | |
4079 | * malloc does not store flags in front of large page-aligned allocations. | |
4080 | * 3) Original szone-based freezedrying code. | |
9385eb3d | 4081 | * 4) Fresher malloc with tiny zone |
59e0d9fe | 4082 | * 5) 32/64bit compatible malloc |
224c7076 | 4083 | * 6) Metadata within 1MB and 8MB region for tiny and small |
5b2abdfb A |
4084 | * |
4085 | * No version backward compatibility is provided, but the version number does | |
4086 | * make it possible for malloc_jumpstart() to return an error if the application | |
4087 | * was freezedried with an older version of malloc. | |
4088 | */ | |
224c7076 | 4089 | #define MALLOC_FREEZEDRY_VERSION 6 |
5b2abdfb A |
4090 | |
4091 | typedef struct { | |
4092 | unsigned version; | |
4093 | unsigned nszones; | |
4094 | szone_t *szones; | |
4095 | } malloc_frozen; | |
4096 | ||
9385eb3d | 4097 | static void * |
59e0d9fe A |
4098 | frozen_malloc(szone_t *zone, size_t new_size) |
4099 | { | |
5b2abdfb A |
4100 | return malloc(new_size); |
4101 | } | |
4102 | ||
9385eb3d | 4103 | static void * |
59e0d9fe A |
4104 | frozen_calloc(szone_t *zone, size_t num_items, size_t size) |
4105 | { | |
5b2abdfb A |
4106 | return calloc(num_items, size); |
4107 | } | |
4108 | ||
9385eb3d | 4109 | static void * |
59e0d9fe A |
4110 | frozen_valloc(szone_t *zone, size_t new_size) |
4111 | { | |
5b2abdfb A |
4112 | return valloc(new_size); |
4113 | } | |
4114 | ||
9385eb3d | 4115 | static void * |
59e0d9fe A |
4116 | frozen_realloc(szone_t *zone, void *ptr, size_t new_size) |
4117 | { | |
5b2abdfb A |
4118 | size_t old_size = szone_size(zone, ptr); |
4119 | void *new_ptr; | |
59e0d9fe | 4120 | |
5b2abdfb A |
4121 | if (new_size <= old_size) { |
4122 | return ptr; | |
4123 | } | |
5b2abdfb | 4124 | new_ptr = malloc(new_size); |
5b2abdfb A |
4125 | if (old_size > 0) { |
4126 | memcpy(new_ptr, ptr, old_size); | |
4127 | } | |
5b2abdfb A |
4128 | return new_ptr; |
4129 | } | |
4130 | ||
9385eb3d | 4131 | static void |
59e0d9fe A |
4132 | frozen_free(szone_t *zone, void *ptr) |
4133 | { | |
5b2abdfb A |
4134 | } |
4135 | ||
9385eb3d | 4136 | static void |
59e0d9fe A |
4137 | frozen_destroy(szone_t *zone) |
4138 | { | |
5b2abdfb A |
4139 | } |
4140 | ||
4141 | /********* Pseudo-private API for emacs unexec ************/ | |
4142 | ||
4143 | /* | |
4144 | * malloc_freezedry() records all of the szones in use, so that they can be | |
4145 | * partially reconstituted by malloc_jumpstart(). Due to the differences | |
4146 | * between reconstituted memory regions and those created by the szone code, | |
4147 | * care is taken not to reallocate from the freezedried memory, except in the | |
4148 | * case of a non-growing realloc(). | |
4149 | * | |
4150 | * Due to the flexibility provided by the zone registration mechanism, it is | |
4151 | * impossible to implement generic freezedrying for any zone type. This code | |
4152 | * only handles applications that use the szone allocator, so malloc_freezedry() | |
4153 | * returns 0 (error) if any non-szone zones are encountered. | |
4154 | */ | |
4155 | ||
59e0d9fe A |
4156 | uintptr_t |
4157 | malloc_freezedry(void) | |
4158 | { | |
5b2abdfb A |
4159 | extern unsigned malloc_num_zones; |
4160 | extern malloc_zone_t **malloc_zones; | |
4161 | malloc_frozen *data; | |
4162 | unsigned i; | |
4163 | ||
4164 | /* Allocate space in which to store the freezedry state. */ | |
4165 | data = (malloc_frozen *) malloc(sizeof(malloc_frozen)); | |
4166 | ||
9385eb3d | 4167 | /* Set freezedry version number so that malloc_jumpstart() can check for compatibility. */ |
5b2abdfb A |
4168 | data->version = MALLOC_FREEZEDRY_VERSION; |
4169 | ||
4170 | /* Allocate the array of szone pointers. */ | |
4171 | data->nszones = malloc_num_zones; | |
4172 | data->szones = (szone_t *) calloc(malloc_num_zones, sizeof(szone_t)); | |
4173 | ||
59e0d9fe A |
4174 | /* |
4175 | * Fill in the array of szone structures. They are copied rather than | |
5b2abdfb | 4176 | * referenced, since the originals are likely to be clobbered during malloc |
59e0d9fe A |
4177 | * initialization. |
4178 | */ | |
5b2abdfb A |
4179 | for (i = 0; i < malloc_num_zones; i++) { |
4180 | if (strcmp(malloc_zones[i]->zone_name, "DefaultMallocZone")) { | |
4181 | /* Unknown zone type. */ | |
4182 | free(data->szones); | |
4183 | free(data); | |
4184 | return 0; | |
4185 | } | |
4186 | memcpy(&data->szones[i], malloc_zones[i], sizeof(szone_t)); | |
4187 | } | |
4188 | ||
59e0d9fe | 4189 | return((uintptr_t)data); |
5b2abdfb A |
4190 | } |
4191 | ||
9385eb3d | 4192 | int |
59e0d9fe A |
4193 | malloc_jumpstart(uintptr_t cookie) |
4194 | { | |
4195 | malloc_frozen *data = (malloc_frozen *)cookie; | |
5b2abdfb A |
4196 | unsigned i; |
4197 | ||
4198 | if (data->version != MALLOC_FREEZEDRY_VERSION) { | |
4199 | /* Unsupported freezedry version. */ | |
4200 | return 1; | |
4201 | } | |
4202 | ||
4203 | for (i = 0; i < data->nszones; i++) { | |
4204 | /* Set function pointers. Even the functions that stay the same must be | |
4205 | * set, since there are no guarantees that they will be mapped to the | |
4206 | * same addresses. */ | |
4207 | data->szones[i].basic_zone.size = (void *) szone_size; | |
4208 | data->szones[i].basic_zone.malloc = (void *) frozen_malloc; | |
4209 | data->szones[i].basic_zone.calloc = (void *) frozen_calloc; | |
4210 | data->szones[i].basic_zone.valloc = (void *) frozen_valloc; | |
4211 | data->szones[i].basic_zone.free = (void *) frozen_free; | |
4212 | data->szones[i].basic_zone.realloc = (void *) frozen_realloc; | |
4213 | data->szones[i].basic_zone.destroy = (void *) frozen_destroy; | |
9385eb3d | 4214 | data->szones[i].basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect; |
5b2abdfb A |
4215 | |
4216 | /* Register the freezedried zone. */ | |
4217 | malloc_zone_register(&data->szones[i].basic_zone); | |
4218 | } | |
4219 | ||
4220 | return 0; | |
4221 | } | |
59e0d9fe | 4222 |