]>
Commit | Line | Data |
---|---|---|
e9ce8d39 A |
1 | /* |
2 | * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
734aad71 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
e9ce8d39 A |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
734aad71 A |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
e9ce8d39 A |
20 | * |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
24 | /* Author: Bertrand Serlet, August 1999 */ | |
25 | ||
59e0d9fe | 26 | #include "scalable_malloc.h" |
e9ce8d39 | 27 | |
59e0d9fe | 28 | #include <pthread_internals.h> |
9385eb3d | 29 | |
59e0d9fe A |
30 | #include <unistd.h> |
31 | #include <libc.h> | |
e9ce8d39 | 32 | #include <mach/vm_statistics.h> |
59e0d9fe | 33 | #include <mach/mach_init.h> |
e9ce8d39 A |
34 | |
35 | /********************* DEFINITIONS ************************/ | |
36 | ||
e9ce8d39 | 37 | #define DEBUG_MALLOC 0 // set to one to debug malloc itself |
9385eb3d | 38 | |
e9ce8d39 A |
39 | #define DEBUG_CLIENT 0 // set to one to help debug a nasty memory smasher |
40 | ||
41 | #if DEBUG_MALLOC | |
9385eb3d | 42 | #warning DEBUG_MALLOC ENABLED |
59e0d9fe | 43 | # define INLINE |
3d9156a7 | 44 | # define ALWAYSINLINE |
59e0d9fe A |
45 | # define CHECK_LOCKED(szone, fun) \ |
46 | do { \ | |
47 | if (__is_threaded && TRY_LOCK(szone->lock)) { \ | |
9385eb3d | 48 | malloc_printf("*** lock was not set %p in %s\n", szone->lock, fun); \ |
59e0d9fe A |
49 | } \ |
50 | } while (0) | |
9385eb3d | 51 | #else |
59e0d9fe | 52 | # define INLINE __inline__ |
3d9156a7 | 53 | # define ALWAYSINLINE __attribute__((always_inline)) |
59e0d9fe | 54 | # define CHECK_LOCKED(szone, fun) {} |
9385eb3d A |
55 | #endif |
56 | ||
59e0d9fe A |
57 | /* |
58 | * Access to global variables is slow, so optimise our handling of vm_page_size | |
59 | * and vm_page_shift. | |
60 | */ | |
61 | #define _vm_page_size vm_page_size /* to get to the originals */ | |
62 | #define _vm_page_shift vm_page_shift | |
63 | #define vm_page_size 4096 /* our normal working sizes */ | |
9385eb3d | 64 | #define vm_page_shift 12 |
e9ce8d39 | 65 | |
9385eb3d A |
66 | typedef unsigned short msize_t; // a size in multiples of SHIFT_SMALL_QUANTUM or SHIFT_TINY_QUANTUM |
67 | ||
59e0d9fe A |
68 | /* |
69 | * Note that in the LP64 case, this is 24 bytes, necessitating the 32-byte tiny grain size. | |
70 | */ | |
9385eb3d | 71 | typedef struct { |
59e0d9fe | 72 | uintptr_t checksum; |
9385eb3d A |
73 | void *previous; |
74 | void *next; | |
75 | } free_list_t; | |
76 | ||
77 | typedef struct { | |
59e0d9fe | 78 | uintptr_t address_and_num_pages; |
9385eb3d A |
79 | // this type represents both an address and a number of pages |
80 | // the low bits are the number of pages; the high bits are the address | |
81 | // note that the exact number of bits used for depends on the page size | |
82 | // also, this cannot represent pointers larger than 1 << (vm_page_shift * 2) | |
83 | } compact_range_t; | |
84 | ||
85 | typedef unsigned char grain_t; | |
86 | ||
e9ce8d39 A |
87 | #define CHECK_REGIONS (1 << 31) |
88 | ||
59e0d9fe A |
89 | #ifdef __LP64__ |
90 | # define CHECKSUM_MAGIC 0xdeadbeef0badc0de | |
91 | #else | |
92 | # define CHECKSUM_MAGIC 0x357B | |
93 | #endif | |
5b2abdfb | 94 | |
9385eb3d | 95 | #define MAX_RECORDER_BUFFER 256 |
e9ce8d39 | 96 | |
9385eb3d | 97 | /********************* DEFINITIONS for tiny ************************/ |
e9ce8d39 | 98 | |
59e0d9fe A |
99 | /* |
100 | * Memory in the Tiny range is allocated from regions (heaps) pointed to by the szone's tiny_regions | |
101 | * pointer. | |
102 | * | |
103 | * Each region is laid out as a heap (1MB in 32-bit mode, 2MB in 64-bit mode), followed by a header | |
104 | * block. The header block is arranged: | |
105 | * | |
106 | * 0x0 | |
107 | * header bits | |
108 | * 0x2000 | |
109 | * 0xffffffff pad word | |
110 | * 0x2004 | |
111 | * in-use bits | |
112 | * 0x4004 | |
113 | * pad word (not written) | |
114 | * | |
115 | * Each bitfield comprises NUM_TINY_BLOCKS bits, and refers to the corresponding TINY_QUANTUM block | |
116 | * within the heap. | |
117 | * | |
118 | * The bitfields are used to encode the state of memory within the heap. The header bit indicates | |
119 | * that the corresponding quantum is the first quantum in a block (either in use or free). The | |
120 | * in-use bit is set for the header if the block has been handed out (allocated). If the header | |
121 | * bit is not set, the in-use bit is invalid. | |
122 | * | |
123 | * The szone maintains an array of 32 freelists, each of which is used to hold free objects | |
124 | * of the corresponding quantum size. | |
125 | * | |
126 | * A free block is laid out as: | |
127 | * | |
128 | * Offset (32-bit mode) (64-bit mode) | |
129 | * 0x0 0x0 | |
130 | * checksum | |
131 | * 0x4 0x08 | |
132 | * previous | |
133 | * 0x8 0x10 | |
134 | * next | |
135 | * 0xc 0x18 | |
136 | * size (in quantum counts) | |
137 | * end - 2 end - 2 | |
138 | * size (in quantum counts) | |
139 | * end end | |
140 | * | |
141 | * All fields are pointer-sized, except for the size which is an unsigned short. | |
142 | * | |
143 | */ | |
144 | ||
145 | #ifdef __LP64__ | |
146 | # define SHIFT_TINY_QUANTUM 5 // Required to fit free_list_t | |
147 | #else | |
148 | # define SHIFT_TINY_QUANTUM 4 // Required for AltiVec | |
149 | #endif | |
9385eb3d | 150 | #define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM) |
e9ce8d39 | 151 | |
59e0d9fe | 152 | #define FOLLOWING_TINY_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM)) |
e9ce8d39 | 153 | |
59e0d9fe | 154 | #define NUM_TINY_SLOTS 32 // number of slots for free-lists |
e9ce8d39 | 155 | |
59e0d9fe A |
156 | #define SHIFT_NUM_TINY_BLOCKS 16 |
157 | #define NUM_TINY_BLOCKS (1 << SHIFT_NUM_TINY_BLOCKS) | |
158 | #define TINY_BLOCKS_ALIGN (SHIFT_NUM_TINY_BLOCKS + SHIFT_TINY_QUANTUM) | |
e9ce8d39 | 159 | |
59e0d9fe A |
160 | /* |
161 | * Enough room for the data, followed by the bit arrays (2-bits per block) plus 2 words of padding | |
162 | * as our bitmap operators overflow, plus rounding to the nearest page. | |
163 | */ | |
164 | #define TINY_REGION_SIZE ((NUM_TINY_BLOCKS * TINY_QUANTUM + (NUM_TINY_BLOCKS >> 2) + 8 + vm_page_size - 1) & ~ (vm_page_size - 1)) | |
165 | ||
166 | /* | |
167 | * Obtain the size of a free tiny block (in msize_t units). | |
168 | */ | |
169 | #ifdef __LP64__ | |
170 | # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[14]) | |
171 | #else | |
172 | # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[6]) | |
173 | #endif | |
174 | /* | |
175 | * The size is also kept at the very end of a free block. | |
176 | */ | |
9385eb3d | 177 | #define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1] |
e9ce8d39 | 178 | |
59e0d9fe A |
179 | /* |
180 | * Beginning and end pointers for a region's heap. | |
181 | */ | |
182 | #define TINY_REGION_ADDRESS(region) ((void *)(region)) | |
183 | #define TINY_REGION_END(region) (TINY_REGION_ADDRESS(region) + (1 << TINY_BLOCKS_ALIGN)) | |
e9ce8d39 | 184 | |
59e0d9fe A |
185 | /* |
186 | * Locate the heap base for a pointer known to be within a tiny region. | |
187 | */ | |
188 | #define TINY_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << TINY_BLOCKS_ALIGN) - 1))) | |
9385eb3d | 189 | |
59e0d9fe A |
190 | /* |
191 | * Convert between byte and msize units. | |
192 | */ | |
193 | #define TINY_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_TINY_QUANTUM) | |
194 | #define TINY_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_TINY_QUANTUM) | |
195 | ||
196 | /* | |
197 | * Locate the block header for a pointer known to be within a tiny region. | |
198 | */ | |
199 | #define TINY_BLOCK_HEADER_FOR_PTR(_p) ((void *)(((((uintptr_t)(_p)) >> TINY_BLOCKS_ALIGN) + 1) << TINY_BLOCKS_ALIGN)) | |
200 | ||
201 | /* | |
202 | * Locate the inuse map for a given block header pointer. | |
203 | */ | |
204 | #define TINY_INUSE_FOR_HEADER(_h) ((void *)((uintptr_t)(_h) + (NUM_TINY_BLOCKS >> 3) + 4)) | |
205 | ||
206 | /* | |
207 | * Compute the bitmap index for a pointer known to be within a tiny region. | |
208 | */ | |
209 | #define TINY_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_TINY_QUANTUM) & (NUM_TINY_BLOCKS - 1)) | |
210 | ||
211 | typedef void *tiny_region_t; | |
9385eb3d | 212 | |
59e0d9fe | 213 | #define INITIAL_NUM_TINY_REGIONS 24 // must be even for szone to be aligned |
9385eb3d A |
214 | |
215 | #define TINY_CACHE 1 // This governs a last-free cache of 1 that bypasses the free-list | |
216 | ||
217 | #if ! TINY_CACHE | |
218 | #warning TINY_CACHE turned off | |
219 | #endif | |
220 | ||
221 | /********************* DEFINITIONS for small ************************/ | |
222 | ||
59e0d9fe A |
223 | /* |
224 | * Memory in the Small range is allocated from regions (heaps) pointed to by the szone's small_regions | |
225 | * pointer. | |
226 | * | |
227 | * Each region is laid out as a heap (8MB in 32-bit and 64-bit mode), followed by the metadata array. | |
228 | * The array is arranged as an array of shorts, one for each SMALL_QUANTUM in the heap. | |
229 | * | |
230 | * The MSB of each short is set for the first quantum in a free block. The low 15 bits encode the | |
231 | * block size (in SMALL_QUANTUM units), or are zero if the quantum is not the first in a block. | |
232 | * | |
233 | * The szone maintains an array of 32 freelists, each of which is used to hold free objects | |
234 | * of the corresponding quantum size. | |
235 | * | |
236 | * A free block is laid out as: | |
237 | * | |
238 | * Offset (32-bit mode) (64-bit mode) | |
239 | * 0x0 0x0 | |
240 | * checksum | |
241 | * 0x4 0x08 | |
242 | * previous | |
243 | * 0x8 0x10 | |
244 | * next | |
245 | * 0xc 0x18 | |
246 | * size (in quantum counts) | |
247 | * end - 2 end - 2 | |
248 | * size (in quantum counts) | |
249 | * end end | |
250 | * | |
251 | * All fields are pointer-sized, except for the size which is an unsigned short. | |
252 | * | |
253 | */ | |
9385eb3d | 254 | |
59e0d9fe | 255 | #define SMALL_IS_FREE (1 << 15) |
9385eb3d A |
256 | |
257 | #define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9 | |
258 | #define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes | |
259 | ||
59e0d9fe | 260 | #define FOLLOWING_SMALL_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM)) |
9385eb3d | 261 | |
59e0d9fe | 262 | #define NUM_SMALL_SLOTS 32 // number of slots for free-lists |
9385eb3d | 263 | |
59e0d9fe A |
264 | /* |
265 | * We can only represent up to 1<<15 for msize; but we choose to stay even below that to avoid the | |
266 | * convention msize=0 => msize = (1<<15) | |
267 | */ | |
268 | #define SHIFT_NUM_SMALL_BLOCKS 14 | |
269 | #define NUM_SMALL_BLOCKS (1 << SHIFT_NUM_SMALL_BLOCKS) | |
270 | #define SMALL_BLOCKS_ALIGN (SHIFT_NUM_SMALL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23 | |
9385eb3d A |
271 | #define SMALL_REGION_SIZE (NUM_SMALL_BLOCKS * SMALL_QUANTUM + NUM_SMALL_BLOCKS * 2) // data + meta data |
272 | ||
273 | #define SMALL_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1] | |
274 | ||
59e0d9fe A |
275 | /* |
276 | * Convert between byte and msize units. | |
277 | */ | |
278 | #define SMALL_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_SMALL_QUANTUM) | |
279 | #define SMALL_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_SMALL_QUANTUM) | |
280 | ||
281 | ||
282 | #define SMALL_REGION_ADDRESS(region) ((unsigned char *)region) | |
283 | #define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region) + (1 << SMALL_BLOCKS_ALIGN)) | |
284 | ||
285 | /* | |
286 | * Locate the heap base for a pointer known to be within a small region. | |
287 | */ | |
288 | #define SMALL_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << SMALL_BLOCKS_ALIGN) - 1))) | |
289 | ||
290 | /* | |
291 | * Locate the metadata base for a pointer known to be within a small region. | |
292 | */ | |
293 | #define SMALL_META_HEADER_FOR_PTR(_p) ((msize_t *)(((((uintptr_t)(_p)) >> SMALL_BLOCKS_ALIGN) + 1) << SMALL_BLOCKS_ALIGN)) | |
294 | ||
295 | /* | |
296 | * Compute the metadata index for a pointer known to be within a small region. | |
297 | */ | |
298 | #define SMALL_META_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_SMALL_QUANTUM) & (NUM_SMALL_BLOCKS - 1)) | |
299 | ||
300 | /* | |
301 | * Find the metadata word for a pointer known to be within a small region. | |
302 | */ | |
303 | #define SMALL_METADATA_FOR_PTR(_p) (SMALL_META_HEADER_FOR_PTR(_p) + SMALL_META_INDEX_FOR_PTR(_p)) | |
304 | ||
305 | /* | |
306 | * Determine whether a pointer known to be within a small region points to memory which is free. | |
307 | */ | |
308 | #define SMALL_PTR_IS_FREE(_p) (*SMALL_METADATA_FOR_PTR(_p) & SMALL_IS_FREE) | |
9385eb3d | 309 | |
59e0d9fe A |
310 | /* |
311 | * Extract the msize value for a pointer known to be within a small region. | |
312 | */ | |
313 | #define SMALL_PTR_SIZE(_p) (*SMALL_METADATA_FOR_PTR(_p) & ~SMALL_IS_FREE) | |
314 | ||
315 | typedef void * small_region_t; | |
9385eb3d | 316 | |
59e0d9fe | 317 | #define INITIAL_NUM_SMALL_REGIONS 6 // must be even for szone to be aligned |
e9ce8d39 | 318 | |
59e0d9fe | 319 | #define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use |
e9ce8d39 | 320 | |
9385eb3d A |
321 | #define SMALL_CACHE 1 |
322 | #if !SMALL_CACHE | |
323 | #warning SMALL_CACHE turned off | |
324 | #endif | |
e9ce8d39 | 325 | |
9385eb3d | 326 | /********************* DEFINITIONS for large ************************/ |
e9ce8d39 | 327 | |
9385eb3d | 328 | #define LARGE_THRESHOLD (15 * 1024) // at or above this use "large" |
e9ce8d39 | 329 | |
9385eb3d A |
330 | #if (LARGE_THRESHOLD > NUM_SMALL_SLOTS * SMALL_QUANTUM) |
331 | #error LARGE_THRESHOLD should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM | |
332 | #endif | |
e9ce8d39 | 333 | |
9385eb3d A |
334 | #define VM_COPY_THRESHOLD (40 * 1024) |
335 | // When all memory is touched after a copy, vm_copy() is always a lose | |
336 | // But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages (on a G3/300MHz) | |
337 | // This must be larger than LARGE_THRESHOLD | |
e9ce8d39 | 338 | |
59e0d9fe A |
339 | /* |
340 | * Given a large_entry, return the address of the allocated block. | |
341 | */ | |
9385eb3d | 342 | #define LARGE_ENTRY_ADDRESS(entry) \ |
59e0d9fe A |
343 | (void *)(((entry).address_and_num_pages >> vm_page_shift) << vm_page_shift) |
344 | ||
345 | /* | |
346 | * Given a large entry, return the number of pages or bytes in the allocated block. | |
347 | */ | |
9385eb3d | 348 | #define LARGE_ENTRY_NUM_PAGES(entry) \ |
59e0d9fe | 349 | ((entry).address_and_num_pages & (vm_page_size - 1)) |
9385eb3d A |
350 | #define LARGE_ENTRY_SIZE(entry) \ |
351 | (LARGE_ENTRY_NUM_PAGES(entry) << vm_page_shift) | |
59e0d9fe A |
352 | |
353 | /* | |
354 | * Compare a pointer with a large entry. | |
355 | */ | |
9385eb3d | 356 | #define LARGE_ENTRY_MATCHES(entry,ptr) \ |
59e0d9fe A |
357 | ((((entry).address_and_num_pages - (uintptr_t)(ptr)) >> vm_page_shift) == 0) |
358 | ||
359 | #define LARGE_ENTRY_IS_EMPTY(entry) (((entry).address_and_num_pages) == 0) | |
e9ce8d39 A |
360 | |
361 | typedef compact_range_t large_entry_t; | |
362 | ||
9385eb3d A |
363 | /********************* DEFINITIONS for huge ************************/ |
364 | ||
e9ce8d39 A |
365 | typedef vm_range_t huge_entry_t; |
366 | ||
9385eb3d | 367 | /********************* zone itself ************************/ |
e9ce8d39 A |
368 | |
369 | typedef struct { | |
370 | malloc_zone_t basic_zone; | |
371 | pthread_lock_t lock; | |
372 | unsigned debug_flags; | |
373 | void *log_address; | |
9385eb3d A |
374 | |
375 | /* Regions for tiny objects */ | |
376 | unsigned num_tiny_regions; | |
377 | tiny_region_t *tiny_regions; | |
378 | void *last_tiny_free; // low SHIFT_TINY_QUANTUM indicate the msize | |
379 | unsigned tiny_bitmap; // cache of the 32 free lists | |
380 | free_list_t *tiny_free_list[NUM_TINY_SLOTS]; // 31 free lists for 1*TINY_QUANTUM to 31*TINY_QUANTUM plus 1 for larger than 32*SMALL_QUANTUM | |
381 | size_t tiny_bytes_free_at_end; // the last free region in the last block is treated as a big block in use that is not accounted for | |
382 | unsigned num_tiny_objects; | |
59e0d9fe | 383 | size_t num_bytes_in_tiny_objects; |
9385eb3d | 384 | |
e9ce8d39 | 385 | /* Regions for small objects */ |
9385eb3d A |
386 | unsigned num_small_regions; |
387 | small_region_t *small_regions; | |
388 | void *last_small_free; // low SHIFT_SMALL_QUANTUM indicate the msize | |
389 | unsigned small_bitmap; // cache of the free list | |
390 | free_list_t *small_free_list[NUM_SMALL_SLOTS]; | |
391 | size_t small_bytes_free_at_end; // the last free region in the last block is treated as a big block in use that is not accounted for | |
e9ce8d39 | 392 | unsigned num_small_objects; |
59e0d9fe | 393 | size_t num_bytes_in_small_objects; |
9385eb3d | 394 | |
e9ce8d39 | 395 | /* large objects: vm_page_shift <= log2(size) < 2 *vm_page_shift */ |
5b2abdfb | 396 | unsigned num_large_objects_in_use; |
e9ce8d39 | 397 | unsigned num_large_entries; |
9385eb3d | 398 | large_entry_t *large_entries; // hashed by location; null entries don't count |
59e0d9fe | 399 | size_t num_bytes_in_large_objects; |
e9ce8d39 A |
400 | |
401 | /* huge objects: log2(size) >= 2 *vm_page_shift */ | |
59e0d9fe | 402 | unsigned num_huge_entries; |
e9ce8d39 | 403 | huge_entry_t *huge_entries; |
59e0d9fe | 404 | size_t num_bytes_in_huge_objects; |
9385eb3d A |
405 | |
406 | /* Initial region list */ | |
407 | tiny_region_t initial_tiny_regions[INITIAL_NUM_TINY_REGIONS]; | |
408 | small_region_t initial_small_regions[INITIAL_NUM_SMALL_REGIONS]; | |
e9ce8d39 A |
409 | } szone_t; |
410 | ||
59e0d9fe A |
411 | #if DEBUG_MALLOC || DEBUG_CLIENT |
412 | static void szone_sleep(void); | |
413 | #endif | |
414 | static void szone_error(szone_t *szone, const char *msg, const void *ptr); | |
415 | static void protect(szone_t *szone, void *address, size_t size, unsigned protection, unsigned debug_flags); | |
416 | static void *allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label); | |
417 | static void deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags); | |
418 | static kern_return_t _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr); | |
419 | ||
3d9156a7 A |
420 | static INLINE void free_list_checksum(szone_t *szone, free_list_t *ptr, const char *msg) ALWAYSINLINE; |
421 | static INLINE void free_list_set_checksum(szone_t *szone, free_list_t *ptr) ALWAYSINLINE; | |
59e0d9fe A |
422 | static unsigned free_list_count(const free_list_t *ptr); |
423 | ||
3d9156a7 A |
424 | static INLINE msize_t get_tiny_meta_header(const void *ptr, boolean_t *is_free) ALWAYSINLINE; |
425 | static INLINE void set_tiny_meta_header_in_use(const void *ptr, msize_t msize) ALWAYSINLINE; | |
426 | static INLINE void set_tiny_meta_header_middle(const void *ptr) ALWAYSINLINE; | |
427 | static INLINE void set_tiny_meta_header_free(const void *ptr, msize_t msize) ALWAYSINLINE; | |
428 | static INLINE boolean_t tiny_meta_header_is_free(const void *ptr) ALWAYSINLINE; | |
429 | static INLINE void *tiny_previous_preceding_free(void *ptr, msize_t *prev_msize) ALWAYSINLINE; | |
430 | static INLINE void tiny_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize) ALWAYSINLINE; | |
431 | static INLINE void tiny_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize) ALWAYSINLINE; | |
432 | static INLINE tiny_region_t *tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr) ALWAYSINLINE; | |
433 | static INLINE void tiny_free_no_lock(szone_t *szone, tiny_region_t *region, void *ptr, msize_t msize) ALWAYSINLINE; | |
59e0d9fe | 434 | static void *tiny_malloc_from_region_no_lock(szone_t *szone, msize_t msize); |
3d9156a7 | 435 | static INLINE boolean_t try_realloc_tiny_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) ALWAYSINLINE; |
59e0d9fe A |
436 | static boolean_t tiny_check_region(szone_t *szone, tiny_region_t *region); |
437 | static kern_return_t tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t tiny_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder); | |
3d9156a7 A |
438 | static INLINE void *tiny_malloc_from_free_list(szone_t *szone, msize_t msize) ALWAYSINLINE; |
439 | static INLINE void *tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) ALWAYSINLINE; | |
440 | static INLINE void free_tiny(szone_t *szone, void *ptr, tiny_region_t *tiny_region) ALWAYSINLINE; | |
59e0d9fe A |
441 | static void print_tiny_free_list(szone_t *szone); |
442 | static void print_tiny_region(boolean_t verbose, tiny_region_t region, size_t bytes_at_end); | |
443 | static boolean_t tiny_free_list_check(szone_t *szone, grain_t slot); | |
444 | ||
3d9156a7 A |
445 | static INLINE void small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize) ALWAYSINLINE; |
446 | static INLINE void small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize) ALWAYSINLINE; | |
447 | static INLINE void small_meta_header_set_middle(msize_t *meta_headers, msize_t index) ALWAYSINLINE; | |
59e0d9fe A |
448 | static void small_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize); |
449 | static void small_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize); | |
3d9156a7 A |
450 | static INLINE small_region_t *small_region_for_ptr_no_lock(szone_t *szone, const void *ptr) ALWAYSINLINE; |
451 | static INLINE void small_free_no_lock(szone_t *szone, small_region_t *region, void *ptr, msize_t msize) ALWAYSINLINE; | |
59e0d9fe | 452 | static void *small_malloc_from_region_no_lock(szone_t *szone, msize_t msize); |
3d9156a7 | 453 | static INLINE boolean_t try_realloc_small_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) ALWAYSINLINE; |
59e0d9fe A |
454 | static boolean_t szone_check_small_region(szone_t *szone, small_region_t *region); |
455 | static kern_return_t small_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t small_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder); | |
3d9156a7 A |
456 | static INLINE void *small_malloc_from_free_list(szone_t *szone, msize_t msize) ALWAYSINLINE; |
457 | static INLINE void *small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) ALWAYSINLINE; | |
458 | static INLINE void *small_malloc_cleared_no_lock(szone_t *szone, msize_t msize) ALWAYSINLINE; | |
459 | static INLINE void free_small(szone_t *szone, void *ptr, small_region_t *small_region) ALWAYSINLINE; | |
59e0d9fe A |
460 | static void print_small_free_list(szone_t *szone); |
461 | static void print_small_region(szone_t *szone, boolean_t verbose, small_region_t *region, size_t bytes_at_end); | |
462 | static boolean_t small_free_list_check(szone_t *szone, grain_t grain); | |
e9ce8d39 | 463 | |
9385eb3d | 464 | #if DEBUG_MALLOC |
59e0d9fe A |
465 | static void large_debug_print(szone_t *szone); |
466 | #endif | |
467 | static large_entry_t *large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr); | |
468 | static void large_entry_insert_no_lock(szone_t *szone, large_entry_t range); | |
3d9156a7 A |
469 | static INLINE void large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry) ALWAYSINLINE; |
470 | static INLINE large_entry_t *large_entries_alloc_no_lock(szone_t *szone, unsigned num) ALWAYSINLINE; | |
59e0d9fe A |
471 | static void large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate); |
472 | static void large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate); | |
473 | static vm_range_t large_free_no_lock(szone_t *szone, large_entry_t *entry); | |
474 | static kern_return_t large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder); | |
475 | static huge_entry_t *huge_entry_for_pointer_no_lock(szone_t *szone, const void *ptr); | |
476 | static boolean_t huge_entry_append(szone_t *szone, huge_entry_t huge); | |
477 | static kern_return_t huge_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t huge_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder); | |
478 | static void *large_and_huge_malloc(szone_t *szone, unsigned num_pages); | |
3d9156a7 A |
479 | static INLINE void free_large_or_huge(szone_t *szone, void *ptr) ALWAYSINLINE; |
480 | static INLINE int try_realloc_large_or_huge_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) ALWAYSINLINE; | |
59e0d9fe A |
481 | |
482 | static void szone_free(szone_t *szone, void *ptr); | |
3d9156a7 | 483 | static INLINE void *szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested) ALWAYSINLINE; |
59e0d9fe A |
484 | static void *szone_malloc(szone_t *szone, size_t size); |
485 | static void *szone_calloc(szone_t *szone, size_t num_items, size_t size); | |
486 | static void *szone_valloc(szone_t *szone, size_t size); | |
487 | static size_t szone_size(szone_t *szone, const void *ptr); | |
488 | static void *szone_realloc(szone_t *szone, void *ptr, size_t new_size); | |
489 | static unsigned szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count); | |
490 | static void szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count); | |
491 | static void szone_destroy(szone_t *szone); | |
492 | static size_t szone_good_size(szone_t *szone, size_t size); | |
493 | ||
494 | static boolean_t szone_check_all(szone_t *szone, const char *function); | |
495 | static boolean_t szone_check(szone_t *szone); | |
496 | static kern_return_t szone_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); | |
497 | static void szone_print(szone_t *szone, boolean_t verbose); | |
498 | static void szone_log(malloc_zone_t *zone, void *log_address); | |
499 | static void szone_force_lock(szone_t *szone); | |
500 | static void szone_force_unlock(szone_t *szone); | |
501 | ||
502 | static void szone_statistics(szone_t *szone, malloc_statistics_t *stats); | |
503 | ||
504 | static void *frozen_malloc(szone_t *zone, size_t new_size); | |
505 | static void *frozen_calloc(szone_t *zone, size_t num_items, size_t size); | |
506 | static void *frozen_valloc(szone_t *zone, size_t new_size); | |
507 | static void *frozen_realloc(szone_t *zone, void *ptr, size_t new_size); | |
508 | static void frozen_free(szone_t *zone, void *ptr); | |
509 | static void frozen_destroy(szone_t *zone); | |
510 | ||
511 | #if DEBUG_MALLOC | |
512 | # define LOG(szone,ptr) \ | |
513 | (szone->log_address && (((uintptr_t)szone->log_address == -1) || (szone->log_address == (void *)(ptr)))) | |
9385eb3d | 514 | #else |
59e0d9fe | 515 | # define LOG(szone,ptr) 0 |
9385eb3d | 516 | #endif |
e9ce8d39 | 517 | |
59e0d9fe A |
518 | #define SZONE_LOCK(szone) \ |
519 | do { \ | |
520 | LOCK(szone->lock); \ | |
521 | } while (0) | |
e9ce8d39 | 522 | |
59e0d9fe A |
523 | #define SZONE_UNLOCK(szone) \ |
524 | do { \ | |
525 | UNLOCK(szone->lock); \ | |
526 | } while (0) | |
e9ce8d39 | 527 | |
59e0d9fe A |
528 | #define LOCK_AND_NOTE_LOCKED(szone,locked) \ |
529 | do { \ | |
530 | CHECK(szone, __PRETTY_FUNCTION__); \ | |
531 | locked = 1; SZONE_LOCK(szone); \ | |
532 | } while (0) | |
e9ce8d39 | 533 | |
9385eb3d | 534 | #if DEBUG_MALLOC || DEBUG_CLIENT |
59e0d9fe | 535 | # define CHECK(szone,fun) \ |
9385eb3d A |
536 | if ((szone)->debug_flags & CHECK_REGIONS) szone_check_all(szone, fun) |
537 | #else | |
59e0d9fe | 538 | # define CHECK(szone,fun) do {} while (0) |
9385eb3d | 539 | #endif |
e9ce8d39 | 540 | |
9385eb3d | 541 | /********************* VERY LOW LEVEL UTILITIES ************************/ |
e9ce8d39 | 542 | |
9385eb3d A |
543 | #if DEBUG_MALLOC || DEBUG_CLIENT |
544 | static void | |
59e0d9fe A |
545 | szone_sleep(void) |
546 | { | |
547 | ||
9385eb3d | 548 | if (getenv("MallocErrorSleep")) { |
59e0d9fe | 549 | malloc_printf("*** sleeping to help debug\n"); |
9385eb3d A |
550 | sleep(3600); // to help debug |
551 | } | |
552 | } | |
553 | #endif | |
e9ce8d39 | 554 | |
9385eb3d | 555 | static void |
59e0d9fe A |
556 | szone_error(szone_t *szone, const char *msg, const void *ptr) |
557 | { | |
558 | ||
e9ce8d39 A |
559 | if (szone) SZONE_UNLOCK(szone); |
560 | if (ptr) { | |
59e0d9fe | 561 | malloc_printf("*** error for object %p: %s\n", ptr, msg); |
e9ce8d39 | 562 | } else { |
59e0d9fe | 563 | malloc_printf("*** error: %s\n", msg); |
e9ce8d39 | 564 | } |
3d9156a7 | 565 | malloc_printf("*** set a breakpoint in szone_error to debug\n"); |
9385eb3d A |
566 | #if DEBUG_MALLOC |
567 | szone_print(szone, 1); | |
568 | szone_sleep(); | |
569 | #endif | |
e9ce8d39 | 570 | #if DEBUG_CLIENT |
9385eb3d | 571 | szone_sleep(); |
e9ce8d39 A |
572 | #endif |
573 | } | |
574 | ||
9385eb3d | 575 | static void |
59e0d9fe A |
576 | protect(szone_t *szone, void *address, size_t size, unsigned protection, unsigned debug_flags) |
577 | { | |
e9ce8d39 | 578 | kern_return_t err; |
59e0d9fe | 579 | |
e9ce8d39 | 580 | if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_PRELUDE)) { |
59e0d9fe | 581 | err = vm_protect(mach_task_self(), (vm_address_t)(uintptr_t)address - vm_page_size, vm_page_size, 0, protection); |
9385eb3d | 582 | if (err) { |
59e0d9fe A |
583 | malloc_printf("*** can't protect(%p) region for prelude guard page at %p\n", |
584 | protection,address - (1 << vm_page_shift)); | |
9385eb3d | 585 | } |
e9ce8d39 A |
586 | } |
587 | if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE)) { | |
59e0d9fe | 588 | err = vm_protect(mach_task_self(), (vm_address_t)(uintptr_t)address + size, vm_page_size, 0, protection); |
9385eb3d | 589 | if (err) { |
59e0d9fe A |
590 | malloc_printf("*** can't protect(%p) region for postlude guard page at %p\n", |
591 | protection, address + size); | |
9385eb3d | 592 | } |
e9ce8d39 A |
593 | } |
594 | } | |
595 | ||
59e0d9fe A |
596 | static void * |
597 | allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label) | |
598 | { | |
9385eb3d | 599 | // align specifies a desired alignment (as a log) or 0 if no alignment requested |
e9ce8d39 | 600 | kern_return_t err; |
59e0d9fe A |
601 | vm_address_t vm_addr; |
602 | uintptr_t addr, aligned_address; | |
e9ce8d39 A |
603 | boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES; |
604 | size_t allocation_size = round_page(size); | |
59e0d9fe | 605 | size_t delta; |
20d7cd4c | 606 | |
9385eb3d A |
607 | if (align) add_guard_pages = 0; // too cumbersome to deal with that |
608 | if (!allocation_size) allocation_size = 1 << vm_page_shift; | |
609 | if (add_guard_pages) allocation_size += 2 * (1 << vm_page_shift); | |
59e0d9fe A |
610 | if (align) allocation_size += (size_t)1 << align; |
611 | err = vm_allocate(mach_task_self(), &vm_addr, allocation_size, vm_page_label | 1); | |
e9ce8d39 | 612 | if (err) { |
59e0d9fe A |
613 | malloc_printf("*** vm_allocate(size=%lld) failed (error code=%d)\n", (long long)size, err); |
614 | szone_error(szone, "can't allocate region", NULL); | |
9385eb3d A |
615 | return NULL; |
616 | } | |
59e0d9fe | 617 | addr = (uintptr_t)vm_addr; |
9385eb3d | 618 | if (align) { |
59e0d9fe | 619 | aligned_address = (addr + ((uintptr_t)1 << align) - 1) & ~ (((uintptr_t)1 << align) - 1); |
9385eb3d | 620 | if (aligned_address != addr) { |
59e0d9fe A |
621 | delta = aligned_address - addr; |
622 | err = vm_deallocate(mach_task_self(), (vm_address_t)addr, delta); | |
623 | if (err) | |
624 | malloc_printf("*** freeing unaligned header failed with %d\n", err); | |
9385eb3d A |
625 | addr = aligned_address; |
626 | allocation_size -= delta; | |
627 | } | |
628 | if (allocation_size > size) { | |
59e0d9fe A |
629 | err = vm_deallocate(mach_task_self(), (vm_address_t)addr + size, allocation_size - size); |
630 | if (err) | |
631 | malloc_printf("*** freeing unaligned footer failed with %d\n", err); | |
9385eb3d | 632 | } |
e9ce8d39 A |
633 | } |
634 | if (add_guard_pages) { | |
59e0d9fe A |
635 | addr += (uintptr_t)1 << vm_page_shift; |
636 | protect(szone, (void *)addr, size, 0, debug_flags); | |
e9ce8d39 | 637 | } |
59e0d9fe | 638 | return (void *)addr; |
e9ce8d39 A |
639 | } |
640 | ||
9385eb3d | 641 | static void |
59e0d9fe A |
642 | deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags) |
643 | { | |
e9ce8d39 A |
644 | kern_return_t err; |
645 | boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES; | |
59e0d9fe | 646 | |
e9ce8d39 | 647 | if (add_guard_pages) { |
9385eb3d A |
648 | addr -= 1 << vm_page_shift; |
649 | size += 2 * (1 << vm_page_shift); | |
e9ce8d39 | 650 | } |
59e0d9fe A |
651 | err = vm_deallocate(mach_task_self(), (vm_address_t)addr, size); |
652 | if (err && szone) | |
653 | szone_error(szone, "Can't deallocate_pages region", addr); | |
e9ce8d39 A |
654 | } |
655 | ||
9385eb3d | 656 | static kern_return_t |
59e0d9fe A |
657 | _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) |
658 | { | |
e9ce8d39 A |
659 | *ptr = (void *)address; |
660 | return 0; | |
661 | } | |
662 | ||
9385eb3d | 663 | static INLINE void |
59e0d9fe A |
664 | free_list_checksum(szone_t *szone, free_list_t *ptr, const char *msg) |
665 | { | |
666 | // We always checksum, as testing whether to do it (based on szone->debug_flags) is as fast as | |
667 | // doing it | |
668 | // XXX not necessarily true for LP64 case | |
669 | if (ptr->checksum != (((uintptr_t)ptr->previous) ^ ((uintptr_t)ptr->next) ^ CHECKSUM_MAGIC)) { | |
9385eb3d | 670 | #if DEBUG_MALLOC |
59e0d9fe | 671 | malloc_printf("*** incorrect checksum: %s\n", msg); |
9385eb3d | 672 | #endif |
59e0d9fe A |
673 | szone_error(szone, "incorrect checksum for freed object " |
674 | "- object was probably modified after being freed, break at szone_error to debug", ptr); | |
9385eb3d A |
675 | } |
676 | } | |
677 | ||
678 | static INLINE void | |
59e0d9fe A |
679 | free_list_set_checksum(szone_t *szone, free_list_t *ptr) |
680 | { | |
9385eb3d A |
681 | // We always set checksum, as testing whether to do it (based on |
682 | // szone->debug_flags) is slower than just doing it | |
59e0d9fe A |
683 | // XXX not necessarily true for LP64 case |
684 | ptr->checksum = ((uintptr_t)ptr->previous) ^ ((uintptr_t)ptr->next) ^ CHECKSUM_MAGIC; | |
9385eb3d A |
685 | } |
686 | ||
687 | static unsigned | |
59e0d9fe A |
688 | free_list_count(const free_list_t *ptr) |
689 | { | |
9385eb3d | 690 | unsigned count = 0; |
59e0d9fe | 691 | |
9385eb3d A |
692 | while (ptr) { |
693 | count++; | |
9385eb3d A |
694 | ptr = ptr->next; |
695 | } | |
696 | return count; | |
697 | } | |
698 | ||
59e0d9fe A |
699 | /* XXX inconsistent use of BITMAP32 and BITARRAY operations could be cleaned up */ |
700 | ||
9385eb3d A |
701 | #define BITMAP32_SET(bitmap,bit) (bitmap |= 1 << (bit)) |
702 | #define BITMAP32_CLR(bitmap,bit) (bitmap &= ~ (1 << (bit))) | |
703 | #define BITMAP32_BIT(bitmap,bit) ((bitmap >> (bit)) & 1) | |
704 | ||
705 | #define BITMAP32_FFS(bitmap) (ffs(bitmap)) | |
706 | // returns bit # of first bit that's one, starting at 1 (returns 0 if !bitmap) | |
707 | ||
708 | /********************* TINY FREE LIST UTILITIES ************************/ | |
709 | ||
710 | // We encode the meta-headers as follows: | |
711 | // Each quantum has an associated set of 2 bits: | |
712 | // block_header when 1 says this block is the beginning of a block | |
713 | // in_use when 1 says this block is in use | |
714 | // so a block in use of size 3 is 1-1 0-X 0-X | |
715 | // for a free block TINY_FREE_SIZE(ptr) carries the size and the bits are 1-0 X-X X-X | |
716 | // for a block middle the bits are 0-0 | |
717 | ||
718 | // Attention double evaluation for these | |
719 | #define BITARRAY_SET(bits,index) (bits[index>>3] |= (1 << (index & 7))) | |
720 | #define BITARRAY_CLR(bits,index) (bits[index>>3] &= ~(1 << (index & 7))) | |
721 | #define BITARRAY_BIT(bits,index) (((bits[index>>3]) >> (index & 7)) & 1) | |
722 | ||
723 | // Following is for start<8 and end<=start+32 | |
59e0d9fe A |
724 | #define BITARRAY_MCLR_LESS_32(bits,start,end) \ |
725 | do { \ | |
726 | unsigned char *_bits = (bits); \ | |
727 | unsigned _end = (end); \ | |
728 | switch (_end >> 3) { \ | |
729 | case 4: _bits[4] &= ~ ((1 << (_end - 32)) - 1); _end = 32; \ | |
9385eb3d | 730 | case 3: _bits[3] &= ~ ((1 << (_end - 24)) - 1); _end = 24; \ |
59e0d9fe A |
731 | case 2: _bits[2] &= ~ ((1 << (_end - 16)) - 1); _end = 16; \ |
732 | case 1: _bits[1] &= ~ ((1 << (_end - 8)) - 1); _end = 8; \ | |
9385eb3d | 733 | case 0: _bits[0] &= ~ ((1 << _end) - (1 << (start))); \ |
59e0d9fe A |
734 | } \ |
735 | } while (0) | |
9385eb3d A |
736 | |
737 | #if 0 // Simple but slow version | |
738 | #warning Slow version in effect | |
59e0d9fe A |
739 | #define BITARRAY_MCLR(bits,index,num) \ |
740 | do { \ | |
741 | unsigned _ctr = (num); \ | |
742 | unsigned _cur = (index); \ | |
743 | \ | |
744 | while (_ctr--) {BITARRAY_CLR(bits,_cur); _cur++; } \ | |
745 | } while (0) | |
9385eb3d | 746 | #else |
e9ce8d39 | 747 | |
9385eb3d | 748 | // Following is for num <= 32 |
59e0d9fe A |
749 | #define BITARRAY_MCLR(bits,index,num) \ |
750 | do { \ | |
751 | unsigned _index = (index); \ | |
752 | unsigned char *_rebased = (bits) + (_index >> 3); \ | |
753 | \ | |
754 | _index &= 7; \ | |
755 | BITARRAY_MCLR_LESS_32(_rebased, _index, _index + (num)); \ | |
756 | } while (0) | |
9385eb3d A |
757 | #endif |
758 | ||
759 | static INLINE msize_t | |
59e0d9fe A |
760 | get_tiny_meta_header(const void *ptr, boolean_t *is_free) |
761 | { | |
9385eb3d A |
762 | // returns msize and is_free |
763 | // may return 0 for the msize component (meaning 65536) | |
59e0d9fe A |
764 | unsigned char *block_header; |
765 | unsigned char *in_use; | |
766 | msize_t index; | |
767 | unsigned byte_index; | |
768 | ||
769 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
770 | index = TINY_INDEX_FOR_PTR(ptr); | |
771 | byte_index = index >> 3; | |
772 | ||
9385eb3d A |
773 | block_header += byte_index; |
774 | index &= 7; | |
775 | *is_free = 0; | |
59e0d9fe A |
776 | if (!BITMAP32_BIT(*block_header, index)) |
777 | return 0; | |
778 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
9385eb3d A |
779 | if (!BITMAP32_BIT(*in_use, index)) { |
780 | *is_free = 1; | |
781 | return TINY_FREE_SIZE(ptr); | |
782 | } | |
59e0d9fe A |
783 | uint32_t *addr = (uint32_t *)((uintptr_t)block_header & ~3); |
784 | uint32_t word0 = OSReadLittleInt32(addr, 0) >> index; | |
785 | uint32_t word1 = OSReadLittleInt32(addr, 4) << (8 - index); | |
786 | uint32_t bits = (((uintptr_t)block_header & 3) * 8); // precision loss on LP64 OK here | |
787 | uint32_t word = (word0 >> bits) | (word1 << (24 - bits)); | |
788 | uint32_t result = ffs(word >> 1); | |
9385eb3d | 789 | return result; |
e9ce8d39 A |
790 | } |
791 | ||
9385eb3d | 792 | static INLINE void |
59e0d9fe A |
793 | set_tiny_meta_header_in_use(const void *ptr, msize_t msize) |
794 | { | |
795 | unsigned char *block_header; | |
796 | unsigned char *in_use; | |
797 | msize_t index; | |
798 | unsigned byte_index; | |
799 | msize_t clr_msize; | |
800 | unsigned end_bit; | |
801 | ||
802 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
803 | index = TINY_INDEX_FOR_PTR(ptr); | |
804 | byte_index = index >> 3; | |
805 | ||
9385eb3d | 806 | #if DEBUG_MALLOC |
59e0d9fe A |
807 | if (msize >= 32) |
808 | malloc_printf("set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr, msize); | |
809 | if ((unsigned)index + (unsigned)msize > 0x10000) | |
810 | malloc_printf("set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr, msize); | |
9385eb3d | 811 | #endif |
9385eb3d A |
812 | block_header += byte_index; |
813 | index &= 7; | |
814 | BITMAP32_SET(*block_header, index); | |
59e0d9fe | 815 | in_use = TINY_INUSE_FOR_HEADER(block_header); |
9385eb3d A |
816 | BITMAP32_SET(*in_use, index); |
817 | index++; | |
59e0d9fe | 818 | clr_msize = msize-1; |
9385eb3d A |
819 | if (clr_msize) { |
820 | byte_index = index >> 3; | |
821 | block_header += byte_index; in_use += byte_index; | |
822 | index &= 7; | |
59e0d9fe | 823 | end_bit = index + clr_msize; |
9385eb3d A |
824 | BITARRAY_MCLR_LESS_32(block_header, index, end_bit); |
825 | BITARRAY_MCLR_LESS_32(in_use, index, end_bit); | |
826 | } | |
827 | BITARRAY_SET(block_header, index+clr_msize); // we set the block_header bit for the following block to reaffirm next block is a block | |
828 | #if DEBUG_MALLOC | |
59e0d9fe A |
829 | { |
830 | boolean_t ff; | |
831 | msize_t mf; | |
832 | ||
833 | mf = get_tiny_meta_header(ptr, &ff); | |
834 | if (msize != mf) { | |
835 | malloc_printf("setting header for tiny in_use %p : %d\n", ptr, msize); | |
836 | malloc_printf("reading header for tiny %p : %d %d\n", ptr, mf, ff); | |
837 | } | |
9385eb3d A |
838 | } |
839 | #endif | |
e9ce8d39 A |
840 | } |
841 | ||
9385eb3d | 842 | static INLINE void |
59e0d9fe A |
843 | set_tiny_meta_header_middle(const void *ptr) |
844 | { | |
9385eb3d | 845 | // indicates this block is in the middle of an in use block |
59e0d9fe A |
846 | unsigned char *block_header; |
847 | unsigned char *in_use; | |
848 | msize_t index; | |
849 | ||
850 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
851 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
852 | index = TINY_INDEX_FOR_PTR(ptr); | |
853 | ||
854 | BITARRAY_CLR(block_header, index); | |
855 | BITARRAY_CLR(in_use, index); | |
9385eb3d A |
856 | TINY_FREE_SIZE(ptr) = 0; |
857 | } | |
858 | ||
859 | static INLINE void | |
59e0d9fe A |
860 | set_tiny_meta_header_free(const void *ptr, msize_t msize) |
861 | { | |
9385eb3d | 862 | // !msize is acceptable and means 65536 |
59e0d9fe A |
863 | unsigned char *block_header; |
864 | unsigned char *in_use; | |
865 | msize_t index; | |
866 | ||
867 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
868 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
869 | index = TINY_INDEX_FOR_PTR(ptr); | |
870 | ||
9385eb3d A |
871 | #if DEBUG_MALLOC |
872 | if ((unsigned)index + (unsigned)msize > 0x10000) { | |
59e0d9fe | 873 | malloc_printf("setting header for tiny free %p msize too large: %d\n", ptr, msize); |
e9ce8d39 | 874 | } |
9385eb3d A |
875 | #endif |
876 | BITARRAY_SET(block_header, index); BITARRAY_CLR(in_use, index); | |
877 | TINY_FREE_SIZE(ptr) = msize; | |
878 | // mark the end of this block | |
879 | if (msize) { // msize==0 => the whole region is free | |
880 | void *follower = FOLLOWING_TINY_PTR(ptr, msize); | |
881 | TINY_PREVIOUS_MSIZE(follower) = msize; | |
882 | } | |
883 | #if DEBUG_MALLOC | |
884 | boolean_t ff; | |
885 | msize_t mf = get_tiny_meta_header(ptr, &ff); | |
886 | if ((msize != mf) || !ff) { | |
59e0d9fe | 887 | malloc_printf("setting header for tiny free %p : %d\n", ptr, msize); |
9385eb3d A |
888 | malloc_printf("reading header for tiny %p : %d %d\n", ptr, mf, ff); |
889 | } | |
890 | #endif | |
e9ce8d39 A |
891 | } |
892 | ||
9385eb3d | 893 | static INLINE boolean_t |
59e0d9fe A |
894 | tiny_meta_header_is_free(const void *ptr) |
895 | { | |
9385eb3d A |
896 | // returns msize and is_free shifted by 16 |
897 | // may return 0 for the msize component (meaning 65536) | |
59e0d9fe A |
898 | unsigned char *block_header; |
899 | unsigned char *in_use; | |
900 | msize_t index; | |
901 | ||
902 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
903 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
904 | index = TINY_INDEX_FOR_PTR(ptr); | |
905 | if (!BITARRAY_BIT(block_header, index)) | |
906 | return 0; | |
9385eb3d A |
907 | return !BITARRAY_BIT(in_use, index); |
908 | } | |
909 | ||
910 | static INLINE void * | |
59e0d9fe A |
911 | tiny_previous_preceding_free(void *ptr, msize_t *prev_msize) |
912 | { | |
9385eb3d | 913 | // returns the previous block, assuming and verifying it's free |
59e0d9fe A |
914 | unsigned char *block_header; |
915 | unsigned char *in_use; | |
916 | msize_t index; | |
917 | msize_t previous_msize; | |
918 | msize_t previous_index; | |
919 | void *previous_ptr; | |
920 | ||
921 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
922 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
923 | index = TINY_INDEX_FOR_PTR(ptr); | |
924 | ||
925 | if (!index) | |
926 | return NULL; | |
927 | if ((previous_msize = TINY_PREVIOUS_MSIZE(ptr)) > index) | |
928 | return NULL; | |
929 | ||
930 | previous_index = index - previous_msize; | |
931 | previous_ptr = (void *)(TINY_REGION_FOR_PTR(ptr) + TINY_BYTES_FOR_MSIZE(previous_index)); | |
932 | if (TINY_FREE_SIZE(previous_ptr) != previous_msize) | |
933 | return NULL; | |
934 | ||
935 | if (!BITARRAY_BIT(block_header, previous_index)) | |
936 | return NULL; | |
937 | if (BITARRAY_BIT(in_use, previous_index)) | |
938 | return NULL; | |
939 | ||
9385eb3d A |
940 | // conservative check did match true check |
941 | *prev_msize = previous_msize; | |
9385eb3d | 942 | return previous_ptr; |
e9ce8d39 A |
943 | } |
944 | ||
9385eb3d | 945 | static INLINE void |
59e0d9fe A |
946 | tiny_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize) |
947 | { | |
e9ce8d39 | 948 | // Adds an item to the proper free list |
9385eb3d A |
949 | // Also marks the meta-header of the block properly |
950 | // Assumes szone has been locked | |
951 | grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1; | |
e9ce8d39 | 952 | free_list_t *free_ptr = ptr; |
9385eb3d | 953 | free_list_t *free_head = szone->tiny_free_list[slot]; |
59e0d9fe | 954 | |
e9ce8d39 | 955 | #if DEBUG_MALLOC |
9385eb3d | 956 | if (LOG(szone,ptr)) { |
59e0d9fe | 957 | malloc_printf("in tiny_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
958 | } |
959 | if (((unsigned)ptr) & (TINY_QUANTUM - 1)) { | |
960 | szone_error(szone, "tiny_free_list_add_ptr: Unaligned ptr", ptr); | |
e9ce8d39 A |
961 | } |
962 | #endif | |
9385eb3d | 963 | set_tiny_meta_header_free(ptr, msize); |
e9ce8d39 | 964 | if (free_head) { |
9385eb3d | 965 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); |
e9ce8d39 | 966 | #if DEBUG_MALLOC |
9385eb3d A |
967 | if (free_head->previous) { |
968 | malloc_printf("ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, free_head, free_head->previous); | |
969 | szone_error(szone, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr); | |
970 | } | |
971 | if (! tiny_meta_header_is_free(free_head)) { | |
972 | malloc_printf("ptr=%p slot=%d free_head=%p\n", ptr, slot, free_head); | |
973 | szone_error(szone, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr); | |
974 | } | |
975 | #endif | |
976 | free_head->previous = free_ptr; | |
977 | free_list_set_checksum(szone, free_head); | |
978 | } else { | |
979 | BITMAP32_SET(szone->tiny_bitmap, slot); | |
e9ce8d39 A |
980 | } |
981 | free_ptr->previous = NULL; | |
982 | free_ptr->next = free_head; | |
983 | free_list_set_checksum(szone, free_ptr); | |
9385eb3d | 984 | szone->tiny_free_list[slot] = free_ptr; |
e9ce8d39 A |
985 | } |
986 | ||
9385eb3d | 987 | static INLINE void |
59e0d9fe A |
988 | tiny_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize) |
989 | { | |
e9ce8d39 A |
990 | // Removes item in the proper free list |
991 | // msize could be read, but all callers have it so we pass it in | |
9385eb3d A |
992 | // Assumes szone has been locked |
993 | grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1; | |
e9ce8d39 A |
994 | free_list_t *free_ptr = ptr; |
995 | free_list_t *next = free_ptr->next; | |
996 | free_list_t *previous = free_ptr->previous; | |
59e0d9fe | 997 | |
e9ce8d39 | 998 | #if DEBUG_MALLOC |
9385eb3d A |
999 | if (LOG(szone,ptr)) { |
1000 | malloc_printf("In tiny_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr, msize); | |
1001 | } | |
e9ce8d39 | 1002 | #endif |
9385eb3d | 1003 | free_list_checksum(szone, free_ptr, __PRETTY_FUNCTION__); |
e9ce8d39 | 1004 | if (!previous) { |
9385eb3d | 1005 | // The block to remove is the head of the free list |
e9ce8d39 | 1006 | #if DEBUG_MALLOC |
9385eb3d | 1007 | if (szone->tiny_free_list[slot] != ptr) { |
59e0d9fe A |
1008 | malloc_printf("ptr=%p slot=%d msize=%d szone->tiny_free_list[slot]=%p\n", |
1009 | ptr, slot, msize, szone->tiny_free_list[slot]); | |
9385eb3d A |
1010 | szone_error(szone, "tiny_free_list_remove_ptr: Internal invariant broken (szone->tiny_free_list[slot])", ptr); |
1011 | return; | |
1012 | } | |
e9ce8d39 | 1013 | #endif |
9385eb3d A |
1014 | szone->tiny_free_list[slot] = next; |
1015 | if (!next) BITMAP32_CLR(szone->tiny_bitmap, slot); | |
e9ce8d39 | 1016 | } else { |
9385eb3d A |
1017 | previous->next = next; |
1018 | free_list_set_checksum(szone, previous); | |
e9ce8d39 A |
1019 | } |
1020 | if (next) { | |
9385eb3d A |
1021 | next->previous = previous; |
1022 | free_list_set_checksum(szone, next); | |
e9ce8d39 | 1023 | } |
e9ce8d39 A |
1024 | } |
1025 | ||
59e0d9fe A |
1026 | /* |
1027 | * Find the tiny region containing (ptr) (if any). | |
1028 | * | |
1029 | * We take advantage of the knowledge that tiny regions are always (1 << TINY_BLOCKS_ALIGN) aligned. | |
1030 | */ | |
9385eb3d | 1031 | static INLINE tiny_region_t * |
59e0d9fe A |
1032 | tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr) |
1033 | { | |
1034 | tiny_region_t *region; | |
1035 | tiny_region_t rbase; | |
1036 | int i; | |
1037 | ||
1038 | /* mask off irrelevant lower bits */ | |
1039 | rbase = TINY_REGION_FOR_PTR(ptr); | |
1040 | /* iterate over allocated regions - XXX not terribly efficient for large number of regions */ | |
1041 | for (i = szone->num_tiny_regions, region = szone->tiny_regions; i > 0; i--, region++) | |
1042 | if (rbase == *region) | |
1043 | return(region); | |
1044 | return(NULL); | |
9385eb3d A |
1045 | } |
1046 | ||
1047 | static INLINE void | |
59e0d9fe A |
1048 | tiny_free_no_lock(szone_t *szone, tiny_region_t *region, void *ptr, msize_t msize) |
1049 | { | |
1050 | size_t original_size = TINY_BYTES_FOR_MSIZE(msize); | |
9385eb3d | 1051 | void *next_block = ((char *)ptr + original_size); |
59e0d9fe A |
1052 | msize_t previous_msize; |
1053 | void *previous; | |
1054 | msize_t next_msize; | |
1055 | free_list_t *big_free_block; | |
1056 | free_list_t *after_next_block; | |
1057 | free_list_t *before_next_block; | |
1058 | ||
9385eb3d A |
1059 | #if DEBUG_MALLOC |
1060 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1061 | malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1062 | } |
1063 | if (! msize) { | |
59e0d9fe A |
1064 | malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
1065 | szone_error(szone, "trying to free tiny block that is too small", ptr); | |
9385eb3d A |
1066 | } |
1067 | #endif | |
1068 | // We try to coalesce this block with the preceeding one | |
59e0d9fe | 1069 | previous = tiny_previous_preceding_free(ptr, &previous_msize); |
9385eb3d A |
1070 | if (previous) { |
1071 | #if DEBUG_MALLOC | |
1072 | if (LOG(szone, ptr) || LOG(szone,previous)) { | |
59e0d9fe | 1073 | malloc_printf("in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous); |
9385eb3d A |
1074 | } |
1075 | #endif | |
1076 | tiny_free_list_remove_ptr(szone, previous, previous_msize); | |
1077 | ptr = previous; | |
1078 | msize += previous_msize; | |
1079 | } | |
1080 | // We try to coalesce with the next block | |
59e0d9fe | 1081 | if ((next_block < TINY_REGION_END(*region)) && tiny_meta_header_is_free(next_block)) { |
9385eb3d | 1082 | // The next block is free, we coalesce |
59e0d9fe | 1083 | next_msize = TINY_FREE_SIZE(next_block); |
9385eb3d A |
1084 | #if DEBUG_MALLOC |
1085 | if (LOG(szone, ptr) || LOG(szone, next_block)) { | |
59e0d9fe A |
1086 | malloc_printf("in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n", |
1087 | ptr, msize, next_block, next_msize); | |
9385eb3d A |
1088 | } |
1089 | #endif | |
1090 | if (next_msize >= NUM_TINY_SLOTS) { | |
1091 | // we take a short cut here to avoid removing next_block from the slot 31 freelist and then adding ptr back to slot 31 | |
9385eb3d | 1092 | msize += next_msize; |
59e0d9fe A |
1093 | big_free_block = (free_list_t *)next_block; |
1094 | after_next_block = big_free_block->next; | |
1095 | before_next_block = big_free_block->previous; | |
9385eb3d A |
1096 | free_list_checksum(szone, big_free_block, __PRETTY_FUNCTION__); |
1097 | if (!before_next_block) { | |
1098 | szone->tiny_free_list[NUM_TINY_SLOTS-1] = ptr; | |
1099 | } else { | |
1100 | before_next_block->next = ptr; | |
1101 | free_list_set_checksum(szone, before_next_block); | |
1102 | } | |
1103 | if (after_next_block) { | |
1104 | after_next_block->previous = ptr; | |
1105 | free_list_set_checksum(szone, after_next_block); | |
1106 | } | |
1107 | ((free_list_t *)ptr)->previous = before_next_block; | |
1108 | ((free_list_t *)ptr)->next = after_next_block; | |
1109 | free_list_set_checksum(szone, ptr); | |
1110 | set_tiny_meta_header_free(ptr, msize); | |
1111 | set_tiny_meta_header_middle(big_free_block); // clear the meta_header to enable coalescing backwards | |
1112 | goto tiny_free_ending; | |
1113 | } | |
1114 | tiny_free_list_remove_ptr(szone, next_block, next_msize); | |
1115 | set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards | |
1116 | msize += next_msize; | |
1117 | } | |
1118 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize) { | |
59e0d9fe | 1119 | memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
1120 | } |
1121 | tiny_free_list_add_ptr(szone, ptr, msize); | |
1122 | tiny_free_ending: | |
1123 | // When in proper debug mode we write on the memory to help debug memory smashers | |
1124 | szone->num_tiny_objects--; | |
1125 | szone->num_bytes_in_tiny_objects -= original_size; // we use original_size and not msize to avoid double counting the coalesced blocks | |
1126 | } | |
1127 | ||
1128 | static void * | |
59e0d9fe A |
1129 | tiny_malloc_from_region_no_lock(szone_t *szone, msize_t msize) |
1130 | { | |
1131 | tiny_region_t last_region, *new_regions; | |
1132 | void *last_block, *ptr, *aligned_address; | |
1133 | ||
9385eb3d A |
1134 | // Allocates from the last region or a freshly allocated region |
1135 | // Before anything we transform the tiny_bytes_free_at_end - if any - to a regular free block | |
1136 | if (szone->tiny_bytes_free_at_end) { | |
59e0d9fe A |
1137 | last_region = szone->tiny_regions[szone->num_tiny_regions-1]; |
1138 | last_block = TINY_REGION_END(last_region) - szone->tiny_bytes_free_at_end; | |
1139 | tiny_free_list_add_ptr(szone, last_block, TINY_MSIZE_FOR_BYTES(szone->tiny_bytes_free_at_end)); | |
9385eb3d A |
1140 | szone->tiny_bytes_free_at_end = 0; |
1141 | } | |
9385eb3d | 1142 | // time to create a new region |
59e0d9fe A |
1143 | aligned_address = allocate_pages(szone, TINY_REGION_SIZE, TINY_BLOCKS_ALIGN, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_TINY)); |
1144 | if (!aligned_address) { | |
9385eb3d A |
1145 | // out of memory! |
1146 | return NULL; | |
1147 | } | |
9385eb3d | 1148 | // We set the padding after block_header to be all 1 |
59e0d9fe | 1149 | ((uint32_t *)(aligned_address + (1 << TINY_BLOCKS_ALIGN) + (NUM_TINY_BLOCKS >> 3)))[0] = ~0; |
9385eb3d | 1150 | if (szone->num_tiny_regions == INITIAL_NUM_TINY_REGIONS) { |
59e0d9fe A |
1151 | // XXX logic here fails after initial reallocation of tiny regions is exhausted (approx 4GB of |
1152 | // tiny allocations) | |
9385eb3d A |
1153 | new_regions = small_malloc_from_region_no_lock(szone, 16); // 16 * 512 bytes is plenty of tiny regions (more than 4,000) |
1154 | if (!new_regions) return NULL; | |
1155 | memcpy(new_regions, szone->tiny_regions, INITIAL_NUM_TINY_REGIONS * sizeof(tiny_region_t)); | |
1156 | szone->tiny_regions = new_regions; // we set the pointer after it's all ready to enable enumeration from another thread without locking | |
1157 | } | |
59e0d9fe | 1158 | szone->tiny_regions[szone->num_tiny_regions] = aligned_address; |
9385eb3d | 1159 | szone->num_tiny_regions ++; // we set the number after the pointer is all ready to enable enumeration from another thread without taking the lock |
59e0d9fe | 1160 | ptr = aligned_address; |
9385eb3d A |
1161 | set_tiny_meta_header_in_use(ptr, msize); |
1162 | szone->num_tiny_objects++; | |
59e0d9fe | 1163 | szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(msize); |
9385eb3d | 1164 | // We put a header on the last block so that it appears in use (for coalescing, etc...) |
59e0d9fe A |
1165 | set_tiny_meta_header_in_use(ptr + TINY_BYTES_FOR_MSIZE(msize), 1); |
1166 | szone->tiny_bytes_free_at_end = TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS - msize); | |
9385eb3d A |
1167 | #if DEBUG_MALLOC |
1168 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1169 | malloc_printf("in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1170 | } |
1171 | #endif | |
1172 | return ptr; | |
1173 | } | |
1174 | ||
1175 | static INLINE boolean_t | |
59e0d9fe A |
1176 | try_realloc_tiny_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) |
1177 | { | |
9385eb3d | 1178 | // returns 1 on success |
59e0d9fe A |
1179 | msize_t index; |
1180 | msize_t old_msize; | |
1181 | unsigned next_index; | |
1182 | void *next_block; | |
1183 | boolean_t is_free; | |
1184 | msize_t next_msize, coalesced_msize, leftover_msize; | |
1185 | void *leftover; | |
1186 | ||
1187 | index = TINY_INDEX_FOR_PTR(ptr); | |
1188 | old_msize = TINY_MSIZE_FOR_BYTES(old_size); | |
1189 | next_index = index + old_msize; | |
1190 | ||
9385eb3d | 1191 | if (next_index >= NUM_TINY_BLOCKS) { |
9385eb3d A |
1192 | return 0; |
1193 | } | |
59e0d9fe | 1194 | next_block = (char *)ptr + old_size; |
9385eb3d | 1195 | SZONE_LOCK(szone); |
59e0d9fe | 1196 | is_free = tiny_meta_header_is_free(next_block); |
9385eb3d A |
1197 | if (!is_free) { |
1198 | SZONE_UNLOCK(szone); | |
1199 | return 0; // next_block is in use; | |
1200 | } | |
59e0d9fe A |
1201 | next_msize = TINY_FREE_SIZE(next_block); |
1202 | if (old_size + TINY_MSIZE_FOR_BYTES(next_msize) < new_size) { | |
9385eb3d A |
1203 | SZONE_UNLOCK(szone); |
1204 | return 0; // even with next block, not enough | |
1205 | } | |
1206 | tiny_free_list_remove_ptr(szone, next_block, next_msize); | |
1207 | set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards | |
59e0d9fe A |
1208 | coalesced_msize = TINY_MSIZE_FOR_BYTES(new_size - old_size + TINY_QUANTUM - 1); |
1209 | leftover_msize = next_msize - coalesced_msize; | |
9385eb3d | 1210 | if (leftover_msize) { |
59e0d9fe | 1211 | leftover = next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize); |
9385eb3d A |
1212 | tiny_free_list_add_ptr(szone, leftover, leftover_msize); |
1213 | } | |
1214 | set_tiny_meta_header_in_use(ptr, old_msize + coalesced_msize); | |
1215 | #if DEBUG_MALLOC | |
1216 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1217 | malloc_printf("in try_realloc_tiny_in_place(), ptr=%p, msize=%d\n", ptr, old_msize + coalesced_msize); |
9385eb3d A |
1218 | } |
1219 | #endif | |
59e0d9fe | 1220 | szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(coalesced_msize); |
9385eb3d A |
1221 | SZONE_UNLOCK(szone); |
1222 | CHECK(szone, __PRETTY_FUNCTION__); | |
9385eb3d A |
1223 | return 1; |
1224 | } | |
1225 | ||
1226 | static boolean_t | |
59e0d9fe A |
1227 | tiny_check_region(szone_t *szone, tiny_region_t *region) |
1228 | { | |
1229 | uintptr_t start, ptr, region_end, follower; | |
9385eb3d | 1230 | boolean_t prev_free = 0; |
59e0d9fe A |
1231 | boolean_t is_free; |
1232 | msize_t msize; | |
1233 | free_list_t *free_head; | |
1234 | ||
1235 | /* establish region limits */ | |
1236 | start = (uintptr_t)TINY_REGION_ADDRESS(*region); | |
1237 | ptr = start; | |
1238 | region_end = (uintptr_t)TINY_REGION_END(*region); | |
1239 | ||
1240 | /* | |
1241 | * The last region may have a trailing chunk which has not been converted into inuse/freelist | |
1242 | * blocks yet. | |
1243 | */ | |
1244 | if (region == szone->tiny_regions + szone->num_tiny_regions - 1) | |
1245 | region_end -= szone->tiny_bytes_free_at_end; | |
1246 | ||
1247 | ||
1248 | /* | |
1249 | * Scan blocks within the region. | |
1250 | */ | |
1251 | while (ptr < region_end) { | |
1252 | /* | |
1253 | * If the first block is free, and its size is 65536 (msize = 0) then the entire region is | |
1254 | * free. | |
1255 | */ | |
1256 | msize = get_tiny_meta_header((void *)ptr, &is_free); | |
1257 | if (is_free && !msize && (ptr == start)) { | |
9385eb3d A |
1258 | return 1; |
1259 | } | |
59e0d9fe A |
1260 | |
1261 | /* | |
1262 | * If the block's size is 65536 (msize = 0) then since we're not the first entry the size is | |
1263 | * corrupt. | |
1264 | */ | |
1265 | if (!msize) { | |
1266 | malloc_printf("*** invariant broken for tiny block %p this msize=%d - size is too small\n", | |
1267 | ptr, msize); | |
9385eb3d A |
1268 | return 0; |
1269 | } | |
59e0d9fe | 1270 | |
9385eb3d | 1271 | if (!is_free) { |
59e0d9fe A |
1272 | /* |
1273 | * In use blocks cannot be more than 31 quanta large. | |
1274 | */ | |
9385eb3d | 1275 | prev_free = 0; |
59e0d9fe A |
1276 | if (msize > 31 * TINY_QUANTUM) { |
1277 | malloc_printf("*** invariant broken for %p this tiny msize=%d[%p] - size is too large\n", | |
1278 | ptr, msize, msize); | |
9385eb3d A |
1279 | return 0; |
1280 | } | |
59e0d9fe A |
1281 | /* move to next block */ |
1282 | ptr += TINY_BYTES_FOR_MSIZE(msize); | |
9385eb3d | 1283 | } else { |
59e0d9fe A |
1284 | /* |
1285 | * Free blocks must have been coalesced, we cannot have a free block following another | |
1286 | * free block. | |
1287 | */ | |
9385eb3d | 1288 | if (prev_free) { |
59e0d9fe A |
1289 | malloc_printf("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n", |
1290 | ptr, msize); | |
9385eb3d A |
1291 | return 0; |
1292 | } | |
1293 | prev_free = 1; | |
59e0d9fe A |
1294 | /* |
1295 | * Check the integrity of this block's entry in its freelist. | |
1296 | */ | |
1297 | free_head = (free_list_t *)ptr; | |
9385eb3d A |
1298 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); |
1299 | if (free_head->previous && !tiny_meta_header_is_free(free_head->previous)) { | |
59e0d9fe A |
1300 | malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n", |
1301 | ptr, free_head->previous); | |
9385eb3d A |
1302 | return 0; |
1303 | } | |
1304 | if (free_head->next && !tiny_meta_header_is_free(free_head->next)) { | |
59e0d9fe A |
1305 | malloc_printf("*** invariant broken for %p (next in free list %p is not a free pointer)\n", |
1306 | ptr, free_head->next); | |
9385eb3d A |
1307 | return 0; |
1308 | } | |
59e0d9fe A |
1309 | /* |
1310 | * Check the free block's trailing size value. | |
1311 | */ | |
1312 | follower = (uintptr_t)FOLLOWING_TINY_PTR(ptr, msize); | |
1313 | if ((follower != region_end) && (TINY_PREVIOUS_MSIZE(follower) != msize)) { | |
1314 | malloc_printf("*** invariant broken for tiny free %p followed by %p in region [%p-%p] " | |
1315 | "(end marker incorrect) should be %d; in fact %d\n", | |
1316 | ptr, follower, TINY_REGION_ADDRESS(*region), region_end, msize, TINY_PREVIOUS_MSIZE(follower)); | |
9385eb3d A |
1317 | return 0; |
1318 | } | |
59e0d9fe | 1319 | /* move to next block */ |
9385eb3d A |
1320 | ptr = follower; |
1321 | } | |
1322 | } | |
59e0d9fe A |
1323 | /* |
1324 | * Ensure that we scanned the entire region | |
1325 | */ | |
1326 | if (ptr != region_end) { | |
1327 | malloc_printf("*** invariant broken for region end %p - %p\n", ptr, region_end); | |
9385eb3d A |
1328 | return 0; |
1329 | } | |
59e0d9fe A |
1330 | /* |
1331 | * Check the trailing block's integrity. | |
1332 | */ | |
9385eb3d A |
1333 | if (region == szone->tiny_regions + szone->num_tiny_regions - 1) { |
1334 | if (szone->tiny_bytes_free_at_end) { | |
59e0d9fe | 1335 | msize = get_tiny_meta_header((void *)ptr, &is_free); |
9385eb3d | 1336 | if (is_free || (msize != 1)) { |
59e0d9fe | 1337 | malloc_printf("*** invariant broken for blocker block %p - %d %d\n", ptr, msize, is_free); |
9385eb3d A |
1338 | } |
1339 | } | |
1340 | } | |
1341 | return 1; | |
1342 | } | |
1343 | ||
1344 | static kern_return_t | |
59e0d9fe A |
1345 | tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t tiny_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder) |
1346 | { | |
9385eb3d A |
1347 | tiny_region_t *regions; |
1348 | unsigned index = 0; | |
1349 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
1350 | unsigned count = 0; | |
1351 | kern_return_t err; | |
59e0d9fe A |
1352 | tiny_region_t region; |
1353 | vm_range_t range; | |
1354 | vm_range_t admin_range; | |
1355 | vm_range_t ptr_range; | |
1356 | unsigned char *mapped_region; | |
1357 | unsigned char *block_header; | |
1358 | unsigned char *in_use; | |
1359 | unsigned block_index; | |
1360 | unsigned block_limit; | |
1361 | boolean_t is_free; | |
1362 | msize_t msize; | |
1363 | void *mapped_ptr; | |
1364 | unsigned bit; | |
1365 | ||
9385eb3d A |
1366 | err = reader(task, region_address, sizeof(tiny_region_t) * num_regions, (void **)®ions); |
1367 | if (err) return err; | |
1368 | while (index < num_regions) { | |
1369 | // unsigned num_in_use = 0; | |
1370 | // unsigned num_free = 0; | |
59e0d9fe A |
1371 | region = regions[index]; |
1372 | range.address = (vm_address_t)TINY_REGION_ADDRESS(region); | |
1373 | range.size = (vm_size_t)TINY_REGION_SIZE; | |
9385eb3d | 1374 | if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { |
59e0d9fe A |
1375 | admin_range.address = range.address + (1 << TINY_BLOCKS_ALIGN); |
1376 | admin_range.size = range.size - (1 << TINY_BLOCKS_ALIGN); | |
9385eb3d A |
1377 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1); |
1378 | } | |
1379 | if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { | |
59e0d9fe A |
1380 | ptr_range.address = range.address; |
1381 | ptr_range.size = 1 << TINY_BLOCKS_ALIGN; | |
9385eb3d A |
1382 | recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); |
1383 | } | |
1384 | if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { | |
9385eb3d | 1385 | err = reader(task, range.address, range.size, (void **)&mapped_region); |
59e0d9fe A |
1386 | if (err) |
1387 | return err; | |
1388 | block_header = (unsigned char *)(mapped_region + (1 << TINY_BLOCKS_ALIGN)); | |
1389 | in_use = block_header + (NUM_TINY_BLOCKS >> 3) + 4; | |
1390 | block_index = 0; | |
1391 | block_limit = NUM_TINY_BLOCKS; | |
9385eb3d | 1392 | if (index == num_regions - 1) |
59e0d9fe | 1393 | block_limit -= TINY_MSIZE_FOR_BYTES(tiny_bytes_free_at_end); |
9385eb3d | 1394 | while (block_index < block_limit) { |
59e0d9fe | 1395 | is_free = !BITARRAY_BIT(in_use, block_index); |
9385eb3d | 1396 | if (is_free) { |
59e0d9fe | 1397 | mapped_ptr = mapped_region + TINY_BYTES_FOR_MSIZE(block_index); |
9385eb3d | 1398 | msize = TINY_FREE_SIZE(mapped_ptr); |
59e0d9fe A |
1399 | if (!msize) |
1400 | break; | |
9385eb3d A |
1401 | } else { |
1402 | msize = 1; | |
59e0d9fe A |
1403 | bit = block_index + 1; |
1404 | while (! BITARRAY_BIT(block_header, bit)) { | |
1405 | bit++; | |
1406 | msize ++; | |
1407 | } | |
1408 | buffer[count].address = range.address + TINY_BYTES_FOR_MSIZE(block_index); | |
1409 | buffer[count].size = TINY_BYTES_FOR_MSIZE(msize); | |
9385eb3d A |
1410 | count++; |
1411 | if (count >= MAX_RECORDER_BUFFER) { | |
1412 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
1413 | count = 0; | |
1414 | } | |
1415 | } | |
1416 | block_index += msize; | |
1417 | } | |
1418 | } | |
9385eb3d A |
1419 | index++; |
1420 | } | |
1421 | if (count) { | |
1422 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
1423 | } | |
1424 | return 0; | |
1425 | } | |
1426 | ||
1427 | static INLINE void * | |
59e0d9fe A |
1428 | tiny_malloc_from_free_list(szone_t *szone, msize_t msize) |
1429 | { | |
9385eb3d | 1430 | // Assumes we've locked the region |
59e0d9fe A |
1431 | free_list_t *ptr; |
1432 | msize_t this_msize; | |
1433 | grain_t slot = msize - 1; | |
1434 | free_list_t **free_list = szone->tiny_free_list; | |
1435 | free_list_t **the_slot = free_list + slot; | |
1436 | free_list_t *next; | |
1437 | free_list_t **limit; | |
1438 | unsigned bitmap; | |
1439 | msize_t leftover_msize; | |
1440 | free_list_t *leftover_ptr; | |
1441 | ||
1442 | /* | |
1443 | * Look for an exact match by checking the freelist for this msize. | |
1444 | */ | |
9385eb3d A |
1445 | ptr = *the_slot; |
1446 | if (ptr) { | |
59e0d9fe | 1447 | next = ptr->next; |
9385eb3d A |
1448 | if (next) { |
1449 | next->previous = NULL; | |
1450 | free_list_set_checksum(szone, next); | |
1451 | } | |
1452 | *the_slot = next; | |
1453 | this_msize = msize; | |
1454 | #if DEBUG_MALLOC | |
59e0d9fe A |
1455 | if (LOG(szone, ptr)) { |
1456 | malloc_printf("in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr, this_msize); | |
9385eb3d A |
1457 | } |
1458 | #endif | |
1459 | goto return_tiny_alloc; | |
1460 | } | |
59e0d9fe A |
1461 | |
1462 | /* | |
1463 | * Iterate over freelists for larger blocks looking for the next-largest block. | |
1464 | */ | |
1465 | bitmap = szone->tiny_bitmap & ~ ((1 << slot) - 1); | |
1466 | if (!bitmap) | |
1467 | goto try_tiny_malloc_from_end; | |
9385eb3d | 1468 | slot = BITMAP32_FFS(bitmap) - 1; |
59e0d9fe | 1469 | limit = free_list + NUM_TINY_SLOTS - 1; |
9385eb3d A |
1470 | free_list += slot; |
1471 | while (free_list < limit) { | |
1472 | // try bigger grains | |
1473 | ptr = *free_list; | |
1474 | if (ptr) { | |
59e0d9fe | 1475 | next = ptr->next; |
9385eb3d A |
1476 | if (next) { |
1477 | next->previous = NULL; | |
1478 | free_list_set_checksum(szone, next); | |
1479 | } | |
1480 | *free_list = next; | |
1481 | this_msize = TINY_FREE_SIZE(ptr); | |
1482 | #if DEBUG_MALLOC | |
59e0d9fe A |
1483 | if (LOG(szone, ptr)) { |
1484 | malloc_printf("in tiny_malloc_from_free_list(), bigger grain ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize); | |
9385eb3d A |
1485 | } |
1486 | #endif | |
1487 | goto add_leftover_and_proceed; | |
1488 | } | |
1489 | free_list++; | |
1490 | } | |
1491 | // we are now looking at the last slot (31) | |
1492 | ptr = *limit; | |
1493 | if (ptr) { | |
9385eb3d | 1494 | this_msize = TINY_FREE_SIZE(ptr); |
59e0d9fe | 1495 | next = ptr->next; |
9385eb3d A |
1496 | if (this_msize - msize >= NUM_TINY_SLOTS) { |
1497 | // the leftover will go back to the free list, so we optimize by modifying the free list rather than removing the head and then adding back | |
59e0d9fe A |
1498 | leftover_msize = this_msize - msize; |
1499 | leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); | |
9385eb3d A |
1500 | *limit = leftover_ptr; |
1501 | if (next) { | |
1502 | next->previous = leftover_ptr; | |
1503 | free_list_set_checksum(szone, next); | |
1504 | } | |
59e0d9fe A |
1505 | leftover_ptr->next = next; |
1506 | leftover_ptr->previous = NULL; | |
9385eb3d A |
1507 | free_list_set_checksum(szone, leftover_ptr); |
1508 | set_tiny_meta_header_free(leftover_ptr, leftover_msize); | |
1509 | #if DEBUG_MALLOC | |
1510 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1511 | malloc_printf("in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize); |
9385eb3d A |
1512 | } |
1513 | #endif | |
1514 | this_msize = msize; | |
1515 | goto return_tiny_alloc; | |
1516 | } | |
1517 | *limit = next; | |
1518 | if (next) { | |
1519 | next->previous = NULL; | |
1520 | free_list_set_checksum(szone, next); | |
1521 | } | |
1522 | goto add_leftover_and_proceed; | |
1523 | } | |
1524 | try_tiny_malloc_from_end: | |
1525 | // Let's see if we can use szone->tiny_bytes_free_at_end | |
59e0d9fe A |
1526 | if (szone->tiny_bytes_free_at_end >= TINY_BYTES_FOR_MSIZE(msize)) { |
1527 | ptr = (free_list_t *)(TINY_REGION_END(szone->tiny_regions[szone->num_tiny_regions-1]) - szone->tiny_bytes_free_at_end); | |
1528 | szone->tiny_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(msize); | |
9385eb3d A |
1529 | if (szone->tiny_bytes_free_at_end) { |
1530 | // let's add an in use block after ptr to serve as boundary | |
59e0d9fe | 1531 | set_tiny_meta_header_in_use((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize), 1); |
9385eb3d A |
1532 | } |
1533 | this_msize = msize; | |
1534 | #if DEBUG_MALLOC | |
59e0d9fe A |
1535 | if (LOG(szone, ptr)) { |
1536 | malloc_printf("in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr, msize); | |
9385eb3d A |
1537 | } |
1538 | #endif | |
1539 | goto return_tiny_alloc; | |
1540 | } | |
1541 | return NULL; | |
1542 | add_leftover_and_proceed: | |
9385eb3d | 1543 | if (!this_msize || (this_msize > msize)) { |
59e0d9fe A |
1544 | leftover_msize = this_msize - msize; |
1545 | leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); | |
9385eb3d A |
1546 | #if DEBUG_MALLOC |
1547 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1548 | malloc_printf("in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize); |
9385eb3d A |
1549 | } |
1550 | #endif | |
1551 | tiny_free_list_add_ptr(szone, leftover_ptr, leftover_msize); | |
1552 | this_msize = msize; | |
1553 | } | |
1554 | return_tiny_alloc: | |
1555 | szone->num_tiny_objects++; | |
59e0d9fe | 1556 | szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(this_msize); |
9385eb3d A |
1557 | #if DEBUG_MALLOC |
1558 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1559 | malloc_printf("in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize); |
9385eb3d A |
1560 | } |
1561 | #endif | |
1562 | set_tiny_meta_header_in_use(ptr, this_msize); | |
1563 | return ptr; | |
1564 | } | |
1565 | ||
1566 | static INLINE void * | |
59e0d9fe A |
1567 | tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) |
1568 | { | |
9385eb3d A |
1569 | boolean_t locked = 0; |
1570 | void *ptr; | |
59e0d9fe | 1571 | |
9385eb3d | 1572 | #if DEBUG_MALLOC |
59e0d9fe A |
1573 | if (!msize) { |
1574 | szone_error(szone, "invariant broken (!msize) in allocation (region)", NULL); | |
1575 | return(NULL); | |
9385eb3d A |
1576 | } |
1577 | #endif | |
1578 | #if TINY_CACHE | |
59e0d9fe A |
1579 | ptr = szone->last_tiny_free; |
1580 | if ((((uintptr_t)ptr) & (TINY_QUANTUM - 1)) == msize) { | |
9385eb3d A |
1581 | // we have a candidate - let's lock to make sure |
1582 | LOCK_AND_NOTE_LOCKED(szone, locked); | |
59e0d9fe | 1583 | if (ptr == szone->last_tiny_free) { |
9385eb3d | 1584 | szone->last_tiny_free = NULL; |
9385eb3d A |
1585 | SZONE_UNLOCK(szone); |
1586 | CHECK(szone, __PRETTY_FUNCTION__); | |
59e0d9fe | 1587 | ptr = (void *)((uintptr_t)ptr & ~ (TINY_QUANTUM - 1)); |
9385eb3d | 1588 | if (cleared_requested) { |
59e0d9fe | 1589 | memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
1590 | } |
1591 | #if DEBUG_MALLOC | |
1592 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1593 | malloc_printf("in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1594 | } |
1595 | #endif | |
1596 | return ptr; | |
1597 | } | |
9385eb3d A |
1598 | } |
1599 | #endif | |
1600 | // Except in rare occasions where we need to add a new region, we are going to end up locking, so we might as well lock right away to avoid doing unnecessary optimistic probes | |
1601 | if (!locked) LOCK_AND_NOTE_LOCKED(szone, locked); | |
1602 | ptr = tiny_malloc_from_free_list(szone, msize); | |
9385eb3d A |
1603 | if (ptr) { |
1604 | SZONE_UNLOCK(szone); | |
1605 | CHECK(szone, __PRETTY_FUNCTION__); | |
1606 | if (cleared_requested) { | |
59e0d9fe | 1607 | memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
1608 | } |
1609 | return ptr; | |
1610 | } | |
1611 | ptr = tiny_malloc_from_region_no_lock(szone, msize); | |
9385eb3d A |
1612 | // we don't clear because this freshly allocated space is pristine |
1613 | SZONE_UNLOCK(szone); | |
1614 | CHECK(szone, __PRETTY_FUNCTION__); | |
1615 | return ptr; | |
1616 | } | |
1617 | ||
1618 | static INLINE void | |
59e0d9fe A |
1619 | free_tiny(szone_t *szone, void *ptr, tiny_region_t *tiny_region) |
1620 | { | |
1621 | msize_t msize; | |
1622 | boolean_t is_free; | |
1623 | #if TINY_CACHE | |
1624 | void *ptr2; | |
1625 | #endif | |
1626 | ||
9385eb3d A |
1627 | // ptr is known to be in tiny_region |
1628 | SZONE_LOCK(szone); | |
1629 | #if TINY_CACHE | |
59e0d9fe A |
1630 | ptr2 = szone->last_tiny_free; |
1631 | /* check that we don't already have this pointer in the cache */ | |
1632 | if (ptr == (void *)((uintptr_t)ptr2 & ~ (TINY_QUANTUM - 1))) { | |
1633 | szone_error(szone, "double free", ptr); | |
9385eb3d A |
1634 | return; |
1635 | } | |
1636 | #endif /* TINY_CACHE */ | |
59e0d9fe | 1637 | msize = get_tiny_meta_header(ptr, &is_free); |
9385eb3d | 1638 | if (is_free) { |
59e0d9fe | 1639 | szone_error(szone, "double free", ptr); |
9385eb3d A |
1640 | return; |
1641 | } | |
9385eb3d A |
1642 | #if DEBUG_MALLOC |
1643 | if (!msize) { | |
1644 | malloc_printf("*** szone_free() block in use is too large: %p\n", ptr); | |
59e0d9fe | 1645 | return; |
9385eb3d A |
1646 | } |
1647 | #endif | |
1648 | #if TINY_CACHE | |
1649 | if (msize < TINY_QUANTUM) { // to see if the bits fit in the last 4 bits | |
59e0d9fe | 1650 | szone->last_tiny_free = (void *)(((uintptr_t)ptr) | msize); |
9385eb3d | 1651 | if (!ptr2) { |
9385eb3d A |
1652 | SZONE_UNLOCK(szone); |
1653 | CHECK(szone, __PRETTY_FUNCTION__); | |
1654 | return; | |
1655 | } | |
59e0d9fe A |
1656 | msize = (uintptr_t)ptr2 & (TINY_QUANTUM - 1); |
1657 | ptr = (void *)(((uintptr_t)ptr2) & ~(TINY_QUANTUM - 1)); | |
9385eb3d A |
1658 | tiny_region = tiny_region_for_ptr_no_lock(szone, ptr); |
1659 | if (!tiny_region) { | |
59e0d9fe | 1660 | szone_error(szone, "double free (tiny cache)", ptr); |
9385eb3d A |
1661 | } |
1662 | } | |
1663 | #endif | |
1664 | tiny_free_no_lock(szone, tiny_region, ptr, msize); | |
1665 | SZONE_UNLOCK(szone); | |
1666 | CHECK(szone, __PRETTY_FUNCTION__); | |
1667 | } | |
1668 | ||
1669 | static void | |
59e0d9fe A |
1670 | print_tiny_free_list(szone_t *szone) |
1671 | { | |
1672 | grain_t slot = 0; | |
1673 | free_list_t *ptr; | |
1674 | ||
1675 | malloc_printf("tiny free sizes: "); | |
9385eb3d | 1676 | while (slot < NUM_TINY_SLOTS) { |
59e0d9fe | 1677 | ptr = szone->tiny_free_list[slot]; |
9385eb3d A |
1678 | if (ptr) { |
1679 | malloc_printf("%s%y[%d]; ", (slot == NUM_TINY_SLOTS-1) ? ">=" : "", (slot+1)*TINY_QUANTUM, free_list_count(ptr)); | |
1680 | } | |
1681 | slot++; | |
1682 | } | |
9385eb3d A |
1683 | } |
1684 | ||
1685 | static void | |
59e0d9fe A |
1686 | print_tiny_region(boolean_t verbose, tiny_region_t region, size_t bytes_at_end) |
1687 | { | |
9385eb3d A |
1688 | unsigned counts[1024]; |
1689 | unsigned in_use = 0; | |
59e0d9fe A |
1690 | uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(region); |
1691 | uintptr_t current = start; | |
1692 | uintptr_t limit = (uintptr_t)TINY_REGION_END(region) - bytes_at_end; | |
1693 | boolean_t is_free; | |
1694 | msize_t msize; | |
1695 | unsigned ci; | |
1696 | ||
9385eb3d A |
1697 | memset(counts, 0, 1024 * sizeof(unsigned)); |
1698 | while (current < limit) { | |
59e0d9fe | 1699 | msize = get_tiny_meta_header((void *)current, &is_free); |
9385eb3d A |
1700 | if (is_free & !msize && (current == start)) { |
1701 | // first block is all free | |
1702 | break; | |
1703 | } | |
1704 | if (!msize) { | |
59e0d9fe | 1705 | malloc_printf("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize); |
9385eb3d A |
1706 | break; |
1707 | } | |
59e0d9fe | 1708 | if (!is_free) { |
9385eb3d | 1709 | // block in use |
59e0d9fe A |
1710 | if (msize > 32) |
1711 | malloc_printf("*** error at %p msize for in_use is %d\n", (void *)current, msize); | |
1712 | if (msize < 1024) | |
1713 | counts[msize]++; | |
9385eb3d A |
1714 | in_use++; |
1715 | } | |
59e0d9fe | 1716 | current += TINY_BYTES_FOR_MSIZE(msize); |
9385eb3d | 1717 | } |
59e0d9fe | 1718 | malloc_printf("Tiny region [%p-%p, %y]\t", (void *)start, TINY_REGION_END(region), (int)TINY_REGION_SIZE); |
9385eb3d | 1719 | malloc_printf("In_use=%d ", in_use); |
59e0d9fe | 1720 | if (bytes_at_end) malloc_printf("untouched=%y ", bytes_at_end); |
9385eb3d | 1721 | if (verbose && in_use) { |
59e0d9fe A |
1722 | malloc_printf("\tSizes in use: "); |
1723 | for (ci = 0; ci < 1024; ci++) | |
1724 | if (counts[ci]) | |
1725 | malloc_printf("%d[%d]", TINY_BYTES_FOR_MSIZE(ci), counts[ci]); | |
9385eb3d A |
1726 | } |
1727 | malloc_printf("\n"); | |
1728 | } | |
1729 | ||
1730 | static boolean_t | |
59e0d9fe A |
1731 | tiny_free_list_check(szone_t *szone, grain_t slot) |
1732 | { | |
e9ce8d39 | 1733 | unsigned count = 0; |
9385eb3d | 1734 | free_list_t *ptr = szone->tiny_free_list[slot]; |
e9ce8d39 | 1735 | free_list_t *previous = NULL; |
59e0d9fe A |
1736 | boolean_t is_free; |
1737 | ||
1738 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
e9ce8d39 | 1739 | while (ptr) { |
9385eb3d | 1740 | free_list_checksum(szone, ptr, __PRETTY_FUNCTION__); |
59e0d9fe | 1741 | is_free = tiny_meta_header_is_free(ptr); |
9385eb3d | 1742 | if (! is_free) { |
59e0d9fe | 1743 | malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr); |
9385eb3d A |
1744 | return 0; |
1745 | } | |
59e0d9fe A |
1746 | if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) { |
1747 | malloc_printf("*** inaligned ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
9385eb3d A |
1748 | return 0; |
1749 | } | |
1750 | if (!tiny_region_for_ptr_no_lock(szone, ptr)) { | |
59e0d9fe | 1751 | malloc_printf("*** itr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr); |
9385eb3d A |
1752 | return 0; |
1753 | } | |
1754 | if (ptr->previous != previous) { | |
59e0d9fe | 1755 | malloc_printf("*** irevious incorrectly set slot=%d count=%d ptr=%p\n", slot, count, ptr); |
9385eb3d A |
1756 | return 0; |
1757 | } | |
1758 | previous = ptr; | |
1759 | ptr = ptr->next; | |
1760 | count++; | |
e9ce8d39 A |
1761 | } |
1762 | return 1; | |
1763 | } | |
1764 | ||
9385eb3d A |
1765 | /********************* SMALL FREE LIST UTILITIES ************************/ |
1766 | ||
59e0d9fe A |
1767 | /* |
1768 | * Mark a block as free. Only the first quantum of a block is marked thusly, | |
1769 | * the remainder are marked "middle". | |
1770 | */ | |
9385eb3d | 1771 | static INLINE void |
59e0d9fe A |
1772 | small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize) |
1773 | { | |
1774 | ||
9385eb3d A |
1775 | meta_headers[index] = msize | SMALL_IS_FREE; |
1776 | } | |
1777 | ||
59e0d9fe A |
1778 | /* |
1779 | * Mark a block as in use. Only the first quantum of a block is marked thusly, | |
1780 | * the remainder are marked "middle". | |
1781 | */ | |
9385eb3d | 1782 | static INLINE void |
59e0d9fe A |
1783 | small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize) |
1784 | { | |
1785 | ||
9385eb3d A |
1786 | meta_headers[index] = msize; |
1787 | } | |
1788 | ||
59e0d9fe A |
1789 | /* |
1790 | * Mark a quantum as being the second or later in a block. | |
1791 | */ | |
9385eb3d | 1792 | static INLINE void |
59e0d9fe A |
1793 | small_meta_header_set_middle(msize_t *meta_headers, msize_t index) |
1794 | { | |
1795 | ||
9385eb3d A |
1796 | meta_headers[index] = 0; |
1797 | } | |
e9ce8d39 | 1798 | |
59e0d9fe A |
1799 | // Adds an item to the proper free list |
1800 | // Also marks the header of the block properly | |
1801 | // Assumes szone has been locked | |
9385eb3d | 1802 | static void |
59e0d9fe A |
1803 | small_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize) |
1804 | { | |
9385eb3d A |
1805 | grain_t grain = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1; |
1806 | free_list_t *free_ptr = ptr; | |
1807 | free_list_t *free_head = szone->small_free_list[grain]; | |
59e0d9fe A |
1808 | void *follower; |
1809 | ||
1810 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d A |
1811 | #if DEBUG_MALLOC |
1812 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1813 | malloc_printf("in small_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d | 1814 | } |
59e0d9fe | 1815 | if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) { |
9385eb3d A |
1816 | szone_error(szone, "small_free_list_add_ptr: Unaligned ptr", ptr); |
1817 | } | |
1818 | #endif | |
1819 | if (free_head) { | |
1820 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); | |
1821 | #if DEBUG_MALLOC | |
1822 | if (free_head->previous) { | |
59e0d9fe A |
1823 | malloc_printf("ptr=%p grain=%d free_head=%p previous=%p\n", |
1824 | ptr, grain, free_head, free_head->previous); | |
9385eb3d A |
1825 | szone_error(szone, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr); |
1826 | } | |
59e0d9fe | 1827 | if (!SMALL_PTR_IS_FREE(free_head)) { |
9385eb3d A |
1828 | malloc_printf("ptr=%p grain=%d free_head=%p\n", ptr, grain, free_head); |
1829 | szone_error(szone, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr); | |
1830 | } | |
1831 | #endif | |
1832 | free_head->previous = free_ptr; | |
1833 | free_list_set_checksum(szone, free_head); | |
e9ce8d39 | 1834 | } else { |
9385eb3d A |
1835 | BITMAP32_SET(szone->small_bitmap, grain); |
1836 | } | |
1837 | free_ptr->previous = NULL; | |
1838 | free_ptr->next = free_head; | |
1839 | free_list_set_checksum(szone, free_ptr); | |
1840 | szone->small_free_list[grain] = free_ptr; | |
59e0d9fe | 1841 | follower = ptr + SMALL_BYTES_FOR_MSIZE(msize); |
9385eb3d A |
1842 | SMALL_PREVIOUS_MSIZE(follower) = msize; |
1843 | } | |
1844 | ||
59e0d9fe A |
1845 | // Removes item in the proper free list |
1846 | // msize could be read, but all callers have it so we pass it in | |
1847 | // Assumes szone has been locked | |
9385eb3d | 1848 | static void |
59e0d9fe A |
1849 | small_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize) |
1850 | { | |
9385eb3d A |
1851 | grain_t grain = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1; |
1852 | free_list_t *free_ptr = ptr; | |
1853 | free_list_t *next = free_ptr->next; | |
1854 | free_list_t *previous = free_ptr->previous; | |
59e0d9fe A |
1855 | |
1856 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
e9ce8d39 | 1857 | #if DEBUG_MALLOC |
9385eb3d | 1858 | if (LOG(szone,ptr)) { |
59e0d9fe | 1859 | malloc_printf("in small_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1860 | } |
1861 | #endif | |
1862 | free_list_checksum(szone, free_ptr, __PRETTY_FUNCTION__); | |
1863 | if (!previous) { | |
e9ce8d39 | 1864 | #if DEBUG_MALLOC |
9385eb3d | 1865 | if (szone->small_free_list[grain] != ptr) { |
59e0d9fe A |
1866 | malloc_printf("ptr=%p grain=%d msize=%d szone->small_free_list[grain]=%p\n", |
1867 | ptr, grain, msize, szone->small_free_list[grain]); | |
9385eb3d A |
1868 | szone_error(szone, "small_free_list_remove_ptr: Internal invariant broken (szone->small_free_list[grain])", ptr); |
1869 | return; | |
1870 | } | |
1871 | #endif | |
1872 | szone->small_free_list[grain] = next; | |
1873 | if (!next) BITMAP32_CLR(szone->small_bitmap, grain); | |
1874 | } else { | |
1875 | previous->next = next; | |
1876 | free_list_set_checksum(szone, previous); | |
1877 | } | |
1878 | if (next) { | |
1879 | next->previous = previous; | |
1880 | free_list_set_checksum(szone, next); | |
1881 | } | |
1882 | } | |
1883 | ||
1884 | static INLINE small_region_t * | |
59e0d9fe A |
1885 | small_region_for_ptr_no_lock(szone_t *szone, const void *ptr) |
1886 | { | |
1887 | small_region_t *region; | |
1888 | small_region_t rbase; | |
1889 | int i; | |
1890 | ||
1891 | /* find assumed heap/region base */ | |
1892 | rbase = SMALL_REGION_FOR_PTR(ptr); | |
1893 | ||
1894 | /* scan existing regions for a match */ | |
1895 | for (i = szone->num_small_regions, region = szone->small_regions; i > 0; i--, region++) | |
1896 | if (rbase == *region) | |
1897 | return(region); | |
1898 | return(NULL); | |
9385eb3d A |
1899 | } |
1900 | ||
1901 | static INLINE void | |
59e0d9fe A |
1902 | small_free_no_lock(szone_t *szone, small_region_t *region, void *ptr, msize_t msize) |
1903 | { | |
1904 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
1905 | unsigned index = SMALL_META_INDEX_FOR_PTR(ptr); | |
1906 | size_t original_size = SMALL_BYTES_FOR_MSIZE(msize); | |
1907 | unsigned char *next_block = ((unsigned char *)ptr + original_size); | |
1908 | msize_t next_index = index + msize; | |
1909 | msize_t previous_msize, next_msize; | |
1910 | void *previous; | |
1911 | ||
9385eb3d A |
1912 | // Assumes locked |
1913 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d A |
1914 | #if DEBUG_MALLOC |
1915 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1916 | malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1917 | } |
1918 | if (! msize) { | |
59e0d9fe A |
1919 | malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
1920 | szone_error(szone, "trying to free small block that is too small", ptr); | |
9385eb3d | 1921 | } |
9385eb3d A |
1922 | #endif |
1923 | // We try to coalesce this block with the preceeding one | |
1924 | if (index && (SMALL_PREVIOUS_MSIZE(ptr) <= index)) { | |
59e0d9fe | 1925 | previous_msize = SMALL_PREVIOUS_MSIZE(ptr); |
9385eb3d | 1926 | if (meta_headers[index - previous_msize] == (previous_msize | SMALL_IS_FREE)) { |
59e0d9fe | 1927 | previous = ptr - SMALL_BYTES_FOR_MSIZE(previous_msize); |
9385eb3d | 1928 | // previous is really to be coalesced |
e9ce8d39 | 1929 | #if DEBUG_MALLOC |
9385eb3d | 1930 | if (LOG(szone, ptr) || LOG(szone,previous)) { |
59e0d9fe | 1931 | malloc_printf("in small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous); |
9385eb3d A |
1932 | } |
1933 | #endif | |
9385eb3d A |
1934 | small_free_list_remove_ptr(szone, previous, previous_msize); |
1935 | small_meta_header_set_middle(meta_headers, index); | |
1936 | ptr = previous; | |
1937 | msize += previous_msize; | |
1938 | index -= previous_msize; | |
e9ce8d39 | 1939 | } |
9385eb3d A |
1940 | } |
1941 | // We try to coalesce with the next block | |
59e0d9fe | 1942 | if ((next_block < SMALL_REGION_END(*region)) && (meta_headers[next_index] & SMALL_IS_FREE)) { |
9385eb3d | 1943 | // next block is free, we coalesce |
59e0d9fe | 1944 | next_msize = meta_headers[next_index] & ~ SMALL_IS_FREE; |
9385eb3d A |
1945 | #if DEBUG_MALLOC |
1946 | if (LOG(szone,ptr)) malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr, msize, next_block, next_msize); | |
1947 | #endif | |
9385eb3d A |
1948 | small_free_list_remove_ptr(szone, next_block, next_msize); |
1949 | small_meta_header_set_middle(meta_headers, next_index); | |
1950 | msize += next_msize; | |
e9ce8d39 A |
1951 | } |
1952 | if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) { | |
1953 | if (!msize) { | |
59e0d9fe | 1954 | szone_error(szone, "incorrect size information - block header was damaged", ptr); |
e9ce8d39 | 1955 | } else { |
59e0d9fe | 1956 | memset(ptr, 0x55, SMALL_BYTES_FOR_MSIZE(msize)); |
e9ce8d39 A |
1957 | } |
1958 | } | |
9385eb3d A |
1959 | small_free_list_add_ptr(szone, ptr, msize); |
1960 | small_meta_header_set_is_free(meta_headers, index, msize); | |
e9ce8d39 A |
1961 | szone->num_small_objects--; |
1962 | szone->num_bytes_in_small_objects -= original_size; // we use original_size and not msize to avoid double counting the coalesced blocks | |
1963 | } | |
1964 | ||
9385eb3d | 1965 | static void * |
59e0d9fe A |
1966 | small_malloc_from_region_no_lock(szone_t *szone, msize_t msize) |
1967 | { | |
1968 | small_region_t last_region; | |
1969 | void *last_block; | |
1970 | void *ptr; | |
1971 | void *new_address; | |
1972 | msize_t *meta_headers; | |
1973 | msize_t index ; | |
1974 | size_t region_capacity; | |
1975 | msize_t new_msize; | |
1976 | small_region_t *new_regions; | |
1977 | msize_t msize_left; | |
1978 | ||
e9ce8d39 | 1979 | // Allocates from the last region or a freshly allocated region |
9385eb3d A |
1980 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); |
1981 | // Before anything we transform the small_bytes_free_at_end - if any - to a regular free block | |
1982 | if (szone->small_bytes_free_at_end) { | |
59e0d9fe A |
1983 | last_region = szone->small_regions[szone->num_small_regions - 1]; |
1984 | last_block = (void *)(SMALL_REGION_END(last_region) - szone->small_bytes_free_at_end); | |
1985 | small_free_list_add_ptr(szone, last_block, SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end)); | |
1986 | *SMALL_METADATA_FOR_PTR(last_block) = SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end) | SMALL_IS_FREE; | |
9385eb3d | 1987 | szone->small_bytes_free_at_end = 0; |
e9ce8d39 A |
1988 | } |
1989 | // time to create a new region | |
59e0d9fe | 1990 | new_address = allocate_pages(szone, SMALL_REGION_SIZE, SMALL_BLOCKS_ALIGN, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL)); |
e9ce8d39 | 1991 | if (!new_address) { |
9385eb3d A |
1992 | // out of memory! |
1993 | return NULL; | |
1994 | } | |
59e0d9fe A |
1995 | ptr = new_address; |
1996 | meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
1997 | index = 0; | |
9385eb3d A |
1998 | if (szone->num_small_regions == INITIAL_NUM_SMALL_REGIONS) { |
1999 | // time to grow the number of regions | |
59e0d9fe A |
2000 | region_capacity = (1 << (32 - SMALL_BLOCKS_ALIGN)) - 20; // that is for sure the maximum number of small regions we can have |
2001 | new_msize = (region_capacity * sizeof(small_region_t) + SMALL_QUANTUM - 1) / SMALL_QUANTUM; | |
2002 | new_regions = ptr; | |
9385eb3d A |
2003 | small_meta_header_set_in_use(meta_headers, index, new_msize); |
2004 | szone->num_small_objects++; | |
59e0d9fe | 2005 | szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(new_msize); |
9385eb3d | 2006 | memcpy(new_regions, szone->small_regions, INITIAL_NUM_SMALL_REGIONS * sizeof(small_region_t)); |
59e0d9fe A |
2007 | // We intentionally leak the previous regions pointer to avoid multi-threading crashes if |
2008 | // another thread was reading it (unlocked) while we are changing it. | |
9385eb3d | 2009 | szone->small_regions = new_regions; // note we set this pointer after it's all set |
59e0d9fe | 2010 | ptr += SMALL_BYTES_FOR_MSIZE(new_msize); |
9385eb3d | 2011 | index = new_msize; |
9385eb3d | 2012 | } |
59e0d9fe A |
2013 | szone->small_regions[szone->num_small_regions] = new_address; |
2014 | // we bump the number of regions AFTER we have changes the regions pointer to enable finding a | |
2015 | // small region without taking the lock | |
2016 | // XXX naive assumption assumes memory ordering coherence between this and other CPUs | |
2017 | szone->num_small_regions++; | |
2018 | small_meta_header_set_in_use(meta_headers, index, msize); | |
2019 | msize_left = NUM_SMALL_BLOCKS - index; | |
2020 | szone->num_small_objects++; | |
2021 | szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(msize); | |
2022 | // add a big free block | |
2023 | index += msize; msize_left -= msize; | |
2024 | meta_headers[index] = msize_left; | |
2025 | szone->small_bytes_free_at_end = SMALL_BYTES_FOR_MSIZE(msize_left); | |
2026 | return ptr; | |
2027 | } | |
2028 | ||
2029 | static INLINE boolean_t | |
2030 | try_realloc_small_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) | |
2031 | { | |
2032 | // returns 1 on success | |
2033 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
2034 | unsigned index = SMALL_META_INDEX_FOR_PTR(ptr); | |
2035 | msize_t old_msize = SMALL_MSIZE_FOR_BYTES(old_size); | |
2036 | msize_t new_msize = SMALL_MSIZE_FOR_BYTES(new_size + SMALL_QUANTUM - 1); | |
2037 | void *next_block = (char *)ptr + old_size; | |
2038 | unsigned next_index = index + old_msize; | |
2039 | msize_t next_msize_and_free; | |
2040 | msize_t next_msize; | |
2041 | msize_t leftover_msize; | |
2042 | void *leftover; | |
2043 | unsigned leftover_index; | |
2044 | ||
2045 | if (next_index >= NUM_SMALL_BLOCKS) { | |
2046 | return 0; | |
2047 | } | |
2048 | #if DEBUG_MALLOC | |
2049 | if ((uintptr_t)next_block & (SMALL_QUANTUM - 1)) { | |
2050 | szone_error(szone, "internal invariant broken in realloc(next_block)", next_block); | |
2051 | } | |
2052 | if (meta_headers[index] != old_msize) | |
2053 | malloc_printf("*** try_realloc_small_in_place incorrect old %d %d\n", | |
2054 | meta_headers[index], old_msize); | |
2055 | #endif | |
2056 | SZONE_LOCK(szone); | |
2057 | /* | |
2058 | * Look for a free block immediately afterwards. If it's large enough, we can consume (part of) | |
2059 | * it. | |
2060 | */ | |
2061 | next_msize_and_free = meta_headers[next_index]; | |
2062 | next_msize = next_msize_and_free & ~ SMALL_IS_FREE; | |
2063 | if (!(next_msize_and_free & SMALL_IS_FREE) || (old_msize + next_msize < new_msize)) { | |
2064 | SZONE_UNLOCK(szone); | |
2065 | return 0; | |
2066 | } | |
2067 | /* | |
2068 | * The following block is big enough; pull it from its freelist and chop off enough to satisfy | |
2069 | * our needs. | |
2070 | */ | |
2071 | small_free_list_remove_ptr(szone, next_block, next_msize); | |
2072 | small_meta_header_set_middle(meta_headers, next_index); | |
2073 | leftover_msize = old_msize + next_msize - new_msize; | |
2074 | if (leftover_msize) { | |
2075 | /* there's some left, so put the remainder back */ | |
2076 | leftover = (unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(new_msize); | |
2077 | small_free_list_add_ptr(szone, leftover, leftover_msize); | |
2078 | leftover_index = index + new_msize; | |
2079 | small_meta_header_set_is_free(meta_headers, leftover_index, leftover_msize); | |
2080 | } | |
2081 | #if DEBUG_MALLOC | |
2082 | if (SMALL_BYTES_FOR_MSIZE(new_msize) >= LARGE_THRESHOLD) { | |
2083 | malloc_printf("*** realloc in place for %p exceeded msize=%d\n", new_msize); | |
2084 | } | |
2085 | #endif | |
2086 | small_meta_header_set_in_use(meta_headers, index, new_msize); | |
2087 | #if DEBUG_MALLOC | |
2088 | if (LOG(szone,ptr)) { | |
2089 | malloc_printf("in szone_realloc(), ptr=%p, msize=%d\n", ptr, *SMALL_METADATA_FOR_PTR(ptr)); | |
2090 | } | |
2091 | #endif | |
2092 | szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(new_msize - old_msize); | |
2093 | SZONE_UNLOCK(szone); | |
2094 | CHECK(szone, __PRETTY_FUNCTION__); | |
2095 | return 1; | |
e9ce8d39 A |
2096 | } |
2097 | ||
9385eb3d | 2098 | static boolean_t |
59e0d9fe A |
2099 | szone_check_small_region(szone_t *szone, small_region_t *region) |
2100 | { | |
2101 | unsigned char *ptr = SMALL_REGION_ADDRESS(*region); | |
2102 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
2103 | unsigned char *region_end = SMALL_REGION_END(*region); | |
e9ce8d39 | 2104 | msize_t prev_free = 0; |
59e0d9fe A |
2105 | unsigned index; |
2106 | msize_t msize_and_free; | |
2107 | msize_t msize; | |
2108 | free_list_t *free_head; | |
2109 | msize_t *follower; | |
2110 | ||
2111 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d | 2112 | if (region == szone->small_regions + szone->num_small_regions - 1) region_end -= szone->small_bytes_free_at_end; |
59e0d9fe A |
2113 | while (ptr < region_end) { |
2114 | index = SMALL_META_INDEX_FOR_PTR(ptr); | |
2115 | msize_and_free = meta_headers[index]; | |
2116 | if (!(msize_and_free & SMALL_IS_FREE)) { | |
9385eb3d | 2117 | // block is in use |
59e0d9fe | 2118 | msize = msize_and_free; |
9385eb3d | 2119 | if (!msize) { |
59e0d9fe A |
2120 | malloc_printf("*** invariant broken: null msize ptr=%p region#=%d num_small_regions=%d end=%p\n", |
2121 | ptr, region - szone->small_regions, szone->num_small_regions, (void *)region_end); | |
9385eb3d A |
2122 | return 0; |
2123 | } | |
2124 | if (msize > (LARGE_THRESHOLD / SMALL_QUANTUM)) { | |
59e0d9fe A |
2125 | malloc_printf("*** invariant broken for %p this small msize=%d - size is too large\n", |
2126 | ptr, msize_and_free); | |
9385eb3d A |
2127 | return 0; |
2128 | } | |
59e0d9fe | 2129 | ptr += SMALL_BYTES_FOR_MSIZE(msize); |
9385eb3d A |
2130 | prev_free = 0; |
2131 | } else { | |
2132 | // free pointer | |
59e0d9fe A |
2133 | msize = msize_and_free & ~ SMALL_IS_FREE; |
2134 | free_head = (free_list_t *)ptr; | |
2135 | follower = (msize_t *)FOLLOWING_SMALL_PTR(ptr, msize); | |
2136 | if (!msize) { | |
2137 | malloc_printf("*** invariant broken for free block %p this msize=%d\n", ptr, msize); | |
9385eb3d A |
2138 | return 0; |
2139 | } | |
2140 | if (prev_free) { | |
59e0d9fe | 2141 | malloc_printf("*** invariant broken for %p (2 free in a row)\n", ptr); |
9385eb3d A |
2142 | return 0; |
2143 | } | |
2144 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); | |
59e0d9fe A |
2145 | if (free_head->previous && !SMALL_PTR_IS_FREE(free_head->previous)) { |
2146 | malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n", | |
2147 | ptr, free_head->previous); | |
9385eb3d A |
2148 | return 0; |
2149 | } | |
59e0d9fe A |
2150 | if (free_head->next && !SMALL_PTR_IS_FREE(free_head->next)) { |
2151 | malloc_printf("*** invariant broken for %p (next is not a free pointer)\n", ptr); | |
9385eb3d | 2152 | return 0; |
e9ce8d39 | 2153 | } |
9385eb3d | 2154 | if (SMALL_PREVIOUS_MSIZE(follower) != msize) { |
59e0d9fe A |
2155 | malloc_printf("*** invariant broken for small free %p followed by %p in region [%p-%p] " |
2156 | "(end marker incorrect) should be %d; in fact %d\n", | |
2157 | ptr, follower, SMALL_REGION_ADDRESS(*region), region_end, msize, SMALL_PREVIOUS_MSIZE(follower)); | |
9385eb3d | 2158 | return 0; |
e9ce8d39 | 2159 | } |
59e0d9fe | 2160 | ptr = (unsigned char *)follower; |
9385eb3d A |
2161 | prev_free = SMALL_IS_FREE; |
2162 | } | |
e9ce8d39 A |
2163 | } |
2164 | return 1; | |
2165 | } | |
2166 | ||
9385eb3d | 2167 | static kern_return_t |
59e0d9fe A |
2168 | small_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t small_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder) |
2169 | { | |
9385eb3d | 2170 | small_region_t *regions; |
e9ce8d39 A |
2171 | unsigned index = 0; |
2172 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
2173 | unsigned count = 0; | |
2174 | kern_return_t err; | |
59e0d9fe A |
2175 | small_region_t region; |
2176 | vm_range_t range; | |
2177 | vm_range_t admin_range; | |
2178 | vm_range_t ptr_range; | |
2179 | unsigned char *mapped_region; | |
2180 | msize_t *block_header; | |
2181 | unsigned block_index; | |
2182 | unsigned block_limit; | |
2183 | msize_t msize_and_free; | |
2184 | msize_t msize; | |
2185 | ||
9385eb3d | 2186 | err = reader(task, region_address, sizeof(small_region_t) * num_regions, (void **)®ions); |
e9ce8d39 A |
2187 | if (err) return err; |
2188 | while (index < num_regions) { | |
59e0d9fe A |
2189 | region = regions[index]; |
2190 | range.address = (vm_address_t)SMALL_REGION_ADDRESS(region); | |
2191 | range.size = SMALL_REGION_SIZE; | |
9385eb3d | 2192 | if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { |
59e0d9fe A |
2193 | admin_range.address = range.address + (1 << SMALL_BLOCKS_ALIGN); |
2194 | admin_range.size = range.size - (1 << SMALL_BLOCKS_ALIGN); | |
9385eb3d A |
2195 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1); |
2196 | } | |
2197 | if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { | |
59e0d9fe A |
2198 | ptr_range.address = range.address; |
2199 | ptr_range.size = 1 << SMALL_BLOCKS_ALIGN; | |
9385eb3d A |
2200 | recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); |
2201 | } | |
2202 | if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { | |
9385eb3d A |
2203 | err = reader(task, range.address, range.size, (void **)&mapped_region); |
2204 | if (err) return err; | |
59e0d9fe A |
2205 | block_header = (msize_t *)(mapped_region + (1 << SMALL_BLOCKS_ALIGN)); |
2206 | block_index = 0; | |
2207 | block_limit = NUM_SMALL_BLOCKS; | |
9385eb3d | 2208 | if (index == num_regions - 1) |
59e0d9fe | 2209 | block_limit -= SMALL_MSIZE_FOR_BYTES(small_bytes_free_at_end); |
9385eb3d | 2210 | while (block_index < block_limit) { |
59e0d9fe A |
2211 | msize_and_free = block_header[block_index]; |
2212 | msize = msize_and_free & ~ SMALL_IS_FREE; | |
9385eb3d A |
2213 | if (! (msize_and_free & SMALL_IS_FREE)) { |
2214 | // Block in use | |
59e0d9fe A |
2215 | buffer[count].address = range.address + SMALL_BYTES_FOR_MSIZE(block_index); |
2216 | buffer[count].size = SMALL_BYTES_FOR_MSIZE(msize); | |
9385eb3d A |
2217 | count++; |
2218 | if (count >= MAX_RECORDER_BUFFER) { | |
2219 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
2220 | count = 0; | |
2221 | } | |
2222 | } | |
2223 | block_index += msize; | |
2224 | } | |
9385eb3d A |
2225 | } |
2226 | index++; | |
2227 | } | |
2228 | if (count) { | |
2229 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
2230 | } | |
e9ce8d39 A |
2231 | return 0; |
2232 | } | |
2233 | ||
9385eb3d | 2234 | static INLINE void * |
59e0d9fe A |
2235 | small_malloc_from_free_list(szone_t *szone, msize_t msize) |
2236 | { | |
9385eb3d A |
2237 | grain_t grain = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1; |
2238 | unsigned bitmap = szone->small_bitmap & ~ ((1 << grain) - 1); | |
e9ce8d39 A |
2239 | void *ptr; |
2240 | msize_t this_msize; | |
59e0d9fe A |
2241 | free_list_t **free_list; |
2242 | free_list_t **limit; | |
2243 | free_list_t *next; | |
2244 | msize_t leftover_msize; | |
2245 | void *leftover_ptr; | |
2246 | msize_t *meta_headers; | |
2247 | unsigned leftover_index; | |
2248 | ||
2249 | // Assumes locked | |
2250 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
2251 | ||
9385eb3d A |
2252 | if (!bitmap) goto try_small_from_end; |
2253 | grain = BITMAP32_FFS(bitmap) - 1; | |
e9ce8d39 | 2254 | // first try the small grains |
59e0d9fe | 2255 | limit = szone->small_free_list + NUM_SMALL_SLOTS - 1; |
9385eb3d | 2256 | free_list = szone->small_free_list + grain; |
e9ce8d39 | 2257 | while (free_list < limit) { |
9385eb3d A |
2258 | // try bigger grains |
2259 | ptr = *free_list; | |
2260 | if (ptr) { | |
9385eb3d A |
2261 | next = ((free_list_t *)ptr)->next; |
2262 | if (next) { | |
2263 | next->previous = NULL; | |
2264 | free_list_set_checksum(szone, next); | |
2265 | } | |
2266 | *free_list = next; | |
59e0d9fe | 2267 | this_msize = SMALL_PTR_SIZE(ptr); |
9385eb3d A |
2268 | goto add_leftover_and_proceed; |
2269 | } | |
2270 | free_list++; | |
e9ce8d39 A |
2271 | } |
2272 | // We now check the large grains for one that is big enough | |
e9ce8d39 A |
2273 | ptr = *free_list; |
2274 | while (ptr) { | |
59e0d9fe | 2275 | this_msize = SMALL_PTR_SIZE(ptr); |
9385eb3d | 2276 | if (this_msize >= msize) { |
9385eb3d A |
2277 | small_free_list_remove_ptr(szone, ptr, this_msize); |
2278 | goto add_leftover_and_proceed; | |
2279 | } | |
2280 | ptr = ((free_list_t *)ptr)->next; | |
2281 | } | |
2282 | try_small_from_end: | |
2283 | // Let's see if we can use szone->small_bytes_free_at_end | |
59e0d9fe | 2284 | if (szone->small_bytes_free_at_end >= SMALL_BYTES_FOR_MSIZE(msize)) { |
9385eb3d | 2285 | ptr = (void *)(SMALL_REGION_END(szone->small_regions[szone->num_small_regions-1]) - szone->small_bytes_free_at_end); |
59e0d9fe | 2286 | szone->small_bytes_free_at_end -= SMALL_BYTES_FOR_MSIZE(msize); |
9385eb3d A |
2287 | if (szone->small_bytes_free_at_end) { |
2288 | // let's mark this block as in use to serve as boundary | |
59e0d9fe | 2289 | *SMALL_METADATA_FOR_PTR(ptr + SMALL_BYTES_FOR_MSIZE(msize)) = SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end); |
9385eb3d A |
2290 | } |
2291 | this_msize = msize; | |
2292 | goto return_small_alloc; | |
e9ce8d39 A |
2293 | } |
2294 | return NULL; | |
2295 | add_leftover_and_proceed: | |
9385eb3d | 2296 | if (this_msize > msize) { |
59e0d9fe A |
2297 | leftover_msize = this_msize - msize; |
2298 | leftover_ptr = ptr + SMALL_BYTES_FOR_MSIZE(msize); | |
9385eb3d A |
2299 | #if DEBUG_MALLOC |
2300 | if (LOG(szone,ptr)) { | |
59e0d9fe | 2301 | malloc_printf("in small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize); |
9385eb3d A |
2302 | } |
2303 | #endif | |
2304 | small_free_list_add_ptr(szone, leftover_ptr, leftover_msize); | |
59e0d9fe A |
2305 | meta_headers = SMALL_META_HEADER_FOR_PTR(leftover_ptr); |
2306 | leftover_index = SMALL_META_INDEX_FOR_PTR(leftover_ptr); | |
9385eb3d A |
2307 | small_meta_header_set_is_free(meta_headers, leftover_index, leftover_msize); |
2308 | this_msize = msize; | |
e9ce8d39 | 2309 | } |
9385eb3d | 2310 | return_small_alloc: |
e9ce8d39 | 2311 | szone->num_small_objects++; |
59e0d9fe | 2312 | szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(this_msize); |
e9ce8d39 | 2313 | #if DEBUG_MALLOC |
9385eb3d | 2314 | if (LOG(szone,ptr)) { |
59e0d9fe | 2315 | malloc_printf("in small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize); |
9385eb3d | 2316 | } |
e9ce8d39 | 2317 | #endif |
59e0d9fe | 2318 | *SMALL_METADATA_FOR_PTR(ptr) = this_msize; |
e9ce8d39 A |
2319 | return ptr; |
2320 | } | |
2321 | ||
9385eb3d | 2322 | static INLINE void * |
59e0d9fe A |
2323 | small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) |
2324 | { | |
e9ce8d39 | 2325 | boolean_t locked = 0; |
59e0d9fe | 2326 | #if SMALL_CACHE |
e9ce8d39 | 2327 | void *ptr; |
59e0d9fe A |
2328 | #endif |
2329 | ||
9385eb3d A |
2330 | #if SMALL_CACHE |
2331 | ptr = (void *)szone->last_small_free; | |
59e0d9fe | 2332 | if ((((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) == msize) { |
9385eb3d A |
2333 | // we have a candidate - let's lock to make sure |
2334 | LOCK_AND_NOTE_LOCKED(szone, locked); | |
2335 | if (ptr == (void *)szone->last_small_free) { | |
2336 | szone->last_small_free = NULL; | |
9385eb3d A |
2337 | SZONE_UNLOCK(szone); |
2338 | CHECK(szone, __PRETTY_FUNCTION__); | |
59e0d9fe | 2339 | ptr = (void *)((uintptr_t)ptr & ~ (SMALL_QUANTUM - 1)); |
9385eb3d | 2340 | if (cleared_requested) { |
59e0d9fe | 2341 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
2342 | } |
2343 | return ptr; | |
2344 | } | |
e9ce8d39 A |
2345 | } |
2346 | #endif | |
59e0d9fe A |
2347 | // Except in rare occasions where we need to add a new region, we are going to end up locking, |
2348 | // so we might as well lock right away to avoid doing unnecessary optimistic probes | |
9385eb3d A |
2349 | if (!locked) LOCK_AND_NOTE_LOCKED(szone, locked); |
2350 | ptr = small_malloc_from_free_list(szone, msize); | |
e9ce8d39 | 2351 | if (ptr) { |
9385eb3d A |
2352 | SZONE_UNLOCK(szone); |
2353 | CHECK(szone, __PRETTY_FUNCTION__); | |
2354 | if (cleared_requested) { | |
59e0d9fe | 2355 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
2356 | } |
2357 | return ptr; | |
e9ce8d39 | 2358 | } |
9385eb3d A |
2359 | ptr = small_malloc_from_region_no_lock(szone, msize); |
2360 | // we don't clear because this freshly allocated space is pristine | |
2361 | SZONE_UNLOCK(szone); | |
2362 | CHECK(szone, __PRETTY_FUNCTION__); | |
e9ce8d39 A |
2363 | return ptr; |
2364 | } | |
2365 | ||
59e0d9fe | 2366 | // tries to allocate a small, cleared block |
9385eb3d | 2367 | static INLINE void * |
59e0d9fe A |
2368 | small_malloc_cleared_no_lock(szone_t *szone, msize_t msize) |
2369 | { | |
2370 | void *ptr; | |
2371 | ||
9385eb3d A |
2372 | // Assumes already locked |
2373 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d | 2374 | ptr = small_malloc_from_free_list(szone, msize); |
e9ce8d39 | 2375 | if (ptr) { |
59e0d9fe | 2376 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); |
9385eb3d | 2377 | return ptr; |
e9ce8d39 | 2378 | } else { |
9385eb3d A |
2379 | ptr = small_malloc_from_region_no_lock(szone, msize); |
2380 | // we don't clear because this freshly allocated space is pristine | |
e9ce8d39 A |
2381 | } |
2382 | return ptr; | |
2383 | } | |
2384 | ||
9385eb3d | 2385 | static INLINE void |
59e0d9fe A |
2386 | free_small(szone_t *szone, void *ptr, small_region_t *small_region) |
2387 | { | |
9385eb3d | 2388 | msize_t msize_and_free; |
59e0d9fe A |
2389 | #if SMALL_CACHE |
2390 | void *ptr2; | |
2391 | #endif | |
2392 | ||
2393 | // ptr is known to be in small_region | |
2394 | msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); | |
9385eb3d A |
2395 | if (msize_and_free & SMALL_IS_FREE) { |
2396 | szone_error(szone, "Object already freed being freed", ptr); | |
2397 | return; | |
2398 | } | |
2399 | CHECK(szone, __PRETTY_FUNCTION__); | |
9385eb3d A |
2400 | SZONE_LOCK(szone); |
2401 | #if SMALL_CACHE | |
59e0d9fe A |
2402 | ptr2 = szone->last_small_free; |
2403 | szone->last_small_free = (void *)(((uintptr_t)ptr) | msize_and_free); | |
9385eb3d | 2404 | if (!ptr2) { |
9385eb3d A |
2405 | SZONE_UNLOCK(szone); |
2406 | CHECK(szone, __PRETTY_FUNCTION__); | |
2407 | return; | |
2408 | } | |
59e0d9fe A |
2409 | msize_and_free = (uintptr_t)ptr2 & (SMALL_QUANTUM - 1); |
2410 | ptr = (void *)(((uintptr_t)ptr2) & ~ (SMALL_QUANTUM - 1)); | |
9385eb3d A |
2411 | small_region = small_region_for_ptr_no_lock(szone, ptr); |
2412 | if (!small_region) { | |
59e0d9fe A |
2413 | szone_error(szone, "double free (small cache)", ptr); |
2414 | return; | |
9385eb3d A |
2415 | } |
2416 | #endif | |
2417 | small_free_no_lock(szone, small_region, ptr, msize_and_free); | |
2418 | SZONE_UNLOCK(szone); | |
2419 | CHECK(szone, __PRETTY_FUNCTION__); | |
2420 | } | |
2421 | ||
2422 | static void | |
59e0d9fe A |
2423 | print_small_free_list(szone_t *szone) |
2424 | { | |
9385eb3d | 2425 | grain_t grain = 0; |
59e0d9fe A |
2426 | free_list_t *ptr; |
2427 | ||
2428 | malloc_printf("small free sizes: "); | |
9385eb3d | 2429 | while (grain < NUM_SMALL_SLOTS) { |
59e0d9fe | 2430 | ptr = szone->small_free_list[grain]; |
9385eb3d A |
2431 | if (ptr) { |
2432 | malloc_printf("%s%y[%d]; ", (grain == NUM_SMALL_SLOTS-1) ? ">=" : "", (grain + 1) * SMALL_QUANTUM, free_list_count(ptr)); | |
2433 | } | |
2434 | grain++; | |
2435 | } | |
2436 | malloc_printf("\n"); | |
2437 | } | |
2438 | ||
2439 | static void | |
59e0d9fe A |
2440 | print_small_region(szone_t *szone, boolean_t verbose, small_region_t *region, size_t bytes_at_end) |
2441 | { | |
2442 | unsigned counts[1024]; | |
2443 | unsigned in_use = 0; | |
2444 | void *start = SMALL_REGION_ADDRESS(*region); | |
2445 | void *limit = SMALL_REGION_END(*region) - bytes_at_end; | |
2446 | msize_t msize_and_free; | |
2447 | msize_t msize; | |
2448 | unsigned ci; | |
2449 | ||
9385eb3d A |
2450 | memset(counts, 0, 1024 * sizeof(unsigned)); |
2451 | while (start < limit) { | |
59e0d9fe A |
2452 | msize_and_free = *SMALL_METADATA_FOR_PTR(start); |
2453 | msize = msize_and_free & ~ SMALL_IS_FREE; | |
9385eb3d A |
2454 | if (!(msize_and_free & SMALL_IS_FREE)) { |
2455 | // block in use | |
59e0d9fe A |
2456 | if (msize < 1024) |
2457 | counts[msize]++; | |
9385eb3d A |
2458 | in_use++; |
2459 | } | |
59e0d9fe | 2460 | start += SMALL_BYTES_FOR_MSIZE(msize); |
9385eb3d | 2461 | } |
59e0d9fe A |
2462 | malloc_printf("Small region [%p-%p, %y]\tIn_use=%d ", |
2463 | SMALL_REGION_ADDRESS(*region), SMALL_REGION_END(*region), (int)SMALL_REGION_SIZE, in_use); | |
2464 | if (bytes_at_end) | |
2465 | malloc_printf("Untouched=%y ", bytes_at_end); | |
9385eb3d | 2466 | if (verbose && in_use) { |
9385eb3d | 2467 | malloc_printf("\n\tSizes in use: "); |
59e0d9fe A |
2468 | for (ci = 0; ci < 1024; ci++) |
2469 | if (counts[ci]) | |
2470 | malloc_printf("%d[%d] ", SMALL_BYTES_FOR_MSIZE(ci), counts[ci]); | |
9385eb3d A |
2471 | } |
2472 | malloc_printf("\n"); | |
2473 | } | |
2474 | ||
2475 | static boolean_t | |
59e0d9fe A |
2476 | small_free_list_check(szone_t *szone, grain_t grain) |
2477 | { | |
9385eb3d A |
2478 | unsigned count = 0; |
2479 | free_list_t *ptr = szone->small_free_list[grain]; | |
2480 | free_list_t *previous = NULL; | |
59e0d9fe A |
2481 | msize_t msize_and_free; |
2482 | ||
2483 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d | 2484 | while (ptr) { |
59e0d9fe | 2485 | msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); |
9385eb3d A |
2486 | count++; |
2487 | if (!(msize_and_free & SMALL_IS_FREE)) { | |
59e0d9fe | 2488 | malloc_printf("*** in-use ptr in free list grain=%d count=%d ptr=%p\n", grain, count, ptr); |
9385eb3d A |
2489 | return 0; |
2490 | } | |
59e0d9fe A |
2491 | if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) { |
2492 | malloc_printf("*** unaligned ptr in free list grain=%d count=%d ptr=%p\n", grain, count, ptr); | |
9385eb3d A |
2493 | return 0; |
2494 | } | |
2495 | if (!small_region_for_ptr_no_lock(szone, ptr)) { | |
59e0d9fe | 2496 | malloc_printf("*** ptr not in szone grain=%d count=%d ptr=%p\n", grain, count, ptr); |
9385eb3d A |
2497 | return 0; |
2498 | } | |
2499 | free_list_checksum(szone, ptr, __PRETTY_FUNCTION__); | |
2500 | if (ptr->previous != previous) { | |
59e0d9fe | 2501 | malloc_printf("*** previous incorrectly set grain=%d count=%d ptr=%p\n", grain, count, ptr); |
9385eb3d A |
2502 | return 0; |
2503 | } | |
2504 | previous = ptr; | |
2505 | ptr = ptr->next; | |
2506 | } | |
2507 | return 1; | |
2508 | } | |
2509 | ||
e9ce8d39 A |
2510 | /********************* LARGE ENTRY UTILITIES ************************/ |
2511 | ||
2512 | #if DEBUG_MALLOC | |
2513 | ||
9385eb3d | 2514 | static void |
59e0d9fe A |
2515 | large_debug_print(szone_t *szone) |
2516 | { | |
2517 | unsigned num_large_entries = szone->num_large_entries; | |
2518 | unsigned index = num_large_entries; | |
2519 | large_entry_t *range; | |
2520 | ||
2521 | for (index = 0, range = szone->large_entries; index < szone->num_large_entries; index++, range++) | |
2522 | if (!LARGE_ENTRY_IS_EMPTY(*range)) | |
2523 | malloc_printf("%d: %p(%y); ", index, LARGE_ENTRY_ADDRESS(*range), LARGE_ENTRY_SIZE(*range)); | |
2524 | ||
e9ce8d39 A |
2525 | malloc_printf("\n"); |
2526 | } | |
2527 | #endif | |
2528 | ||
59e0d9fe A |
2529 | /* |
2530 | * Scan the hash ring looking for an entry for the given pointer. | |
2531 | */ | |
9385eb3d | 2532 | static large_entry_t * |
59e0d9fe A |
2533 | large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr) |
2534 | { | |
2535 | // result only valid with lock held | |
2536 | unsigned num_large_entries = szone->num_large_entries; | |
2537 | unsigned hash_index; | |
2538 | unsigned index; | |
2539 | large_entry_t *range; | |
2540 | ||
2541 | if (!num_large_entries) | |
2542 | return NULL; | |
2543 | hash_index = ((uintptr_t)ptr >> vm_page_shift) % num_large_entries; | |
e9ce8d39 A |
2544 | index = hash_index; |
2545 | do { | |
59e0d9fe A |
2546 | range = szone->large_entries + index; |
2547 | if (LARGE_ENTRY_MATCHES(*range, ptr)) | |
2548 | return range; | |
2549 | if (LARGE_ENTRY_IS_EMPTY(*range)) | |
2550 | return NULL; // end of chain | |
2551 | index++; | |
2552 | if (index == num_large_entries) | |
2553 | index = 0; | |
e9ce8d39 A |
2554 | } while (index != hash_index); |
2555 | return NULL; | |
2556 | } | |
2557 | ||
9385eb3d | 2558 | static void |
59e0d9fe A |
2559 | large_entry_insert_no_lock(szone_t *szone, large_entry_t range) |
2560 | { | |
2561 | unsigned num_large_entries = szone->num_large_entries; | |
2562 | unsigned hash_index = (range.address_and_num_pages >> vm_page_shift) % num_large_entries; | |
2563 | unsigned index = hash_index; | |
2564 | large_entry_t *entry; | |
2565 | ||
e9ce8d39 | 2566 | do { |
59e0d9fe | 2567 | entry = szone->large_entries + index; |
9385eb3d A |
2568 | if (LARGE_ENTRY_IS_EMPTY(*entry)) { |
2569 | *entry = range; | |
2570 | return; // end of chain | |
2571 | } | |
59e0d9fe A |
2572 | index++; |
2573 | if (index == num_large_entries) | |
2574 | index = 0; | |
e9ce8d39 A |
2575 | } while (index != hash_index); |
2576 | } | |
2577 | ||
9385eb3d | 2578 | static INLINE void |
59e0d9fe A |
2579 | large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry) |
2580 | { | |
2581 | unsigned num_large_entries = szone->num_large_entries; | |
2582 | unsigned hash_index = entry - szone->large_entries; | |
2583 | unsigned index = hash_index; | |
2584 | large_entry_t range; | |
2585 | ||
e9ce8d39 | 2586 | do { |
59e0d9fe A |
2587 | index++; |
2588 | if (index == num_large_entries) | |
2589 | index = 0; | |
9385eb3d | 2590 | range = szone->large_entries[index]; |
59e0d9fe A |
2591 | if (LARGE_ENTRY_IS_EMPTY(range)) |
2592 | return; | |
9385eb3d A |
2593 | szone->large_entries[index].address_and_num_pages = 0; |
2594 | large_entry_insert_no_lock(szone, range); // this will reinsert in the | |
2595 | // proper place | |
e9ce8d39 A |
2596 | } while (index != hash_index); |
2597 | } | |
2598 | ||
9385eb3d | 2599 | static INLINE large_entry_t * |
59e0d9fe A |
2600 | large_entries_alloc_no_lock(szone_t *szone, unsigned num) |
2601 | { | |
e9ce8d39 A |
2602 | size_t size = num * sizeof(large_entry_t); |
2603 | boolean_t is_vm_allocation = size >= LARGE_THRESHOLD; | |
59e0d9fe | 2604 | |
e9ce8d39 | 2605 | if (is_vm_allocation) { |
9385eb3d A |
2606 | // Note that we allocate memory (via a system call) under a spin lock |
2607 | // That is certainly evil, however it's very rare in the lifetime of a process | |
2608 | // The alternative would slow down the normal case | |
2609 | return (void *)allocate_pages(szone, round_page(size), 0, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE)); | |
e9ce8d39 | 2610 | } else { |
59e0d9fe | 2611 | return small_malloc_cleared_no_lock(szone, SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1)); |
e9ce8d39 A |
2612 | } |
2613 | } | |
2614 | ||
9385eb3d | 2615 | static void |
59e0d9fe A |
2616 | large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate) |
2617 | { | |
9385eb3d | 2618 | // returns range to deallocate |
59e0d9fe A |
2619 | size_t size = num * sizeof(large_entry_t); |
2620 | boolean_t is_vm_allocation = size >= LARGE_THRESHOLD; | |
2621 | small_region_t *region; | |
2622 | msize_t msize_and_free; | |
2623 | ||
e9ce8d39 | 2624 | if (is_vm_allocation) { |
9385eb3d A |
2625 | range_to_deallocate->address = (vm_address_t)entries; |
2626 | range_to_deallocate->size = round_page(size); | |
e9ce8d39 | 2627 | } else { |
9385eb3d | 2628 | range_to_deallocate->size = 0; |
59e0d9fe A |
2629 | region = small_region_for_ptr_no_lock(szone, entries); |
2630 | msize_and_free = *SMALL_METADATA_FOR_PTR(entries); | |
9385eb3d | 2631 | if (msize_and_free & SMALL_IS_FREE) { |
59e0d9fe | 2632 | szone_error(szone, "object already freed being freed", entries); |
9385eb3d A |
2633 | return; |
2634 | } | |
2635 | small_free_no_lock(szone, region, entries, msize_and_free); | |
e9ce8d39 A |
2636 | } |
2637 | } | |
2638 | ||
9385eb3d | 2639 | static void |
59e0d9fe A |
2640 | large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate) |
2641 | { | |
9385eb3d | 2642 | // sets range_to_deallocate |
e9ce8d39 A |
2643 | unsigned old_num_entries = szone->num_large_entries; |
2644 | large_entry_t *old_entries = szone->large_entries; | |
59e0d9fe | 2645 | unsigned new_num_entries = (old_num_entries) ? old_num_entries * 2 + 1 : 63; // always an odd number for good hashing |
e9ce8d39 A |
2646 | large_entry_t *new_entries = large_entries_alloc_no_lock(szone, new_num_entries); |
2647 | unsigned index = old_num_entries; | |
59e0d9fe A |
2648 | large_entry_t oldRange; |
2649 | ||
e9ce8d39 A |
2650 | szone->num_large_entries = new_num_entries; |
2651 | szone->large_entries = new_entries; | |
59e0d9fe A |
2652 | |
2653 | /* rehash entries into the new list */ | |
e9ce8d39 | 2654 | while (index--) { |
59e0d9fe | 2655 | oldRange = old_entries[index]; |
9385eb3d A |
2656 | if (!LARGE_ENTRY_IS_EMPTY(oldRange)) { |
2657 | large_entry_insert_no_lock(szone, oldRange); | |
2658 | } | |
2659 | } | |
2660 | if (old_entries) { | |
2661 | large_entries_free_no_lock(szone, old_entries, old_num_entries, range_to_deallocate); | |
2662 | } else { | |
2663 | range_to_deallocate->size = 0; | |
e9ce8d39 | 2664 | } |
e9ce8d39 A |
2665 | } |
2666 | ||
59e0d9fe A |
2667 | // frees the specific entry in the size table |
2668 | // returns a range to truly deallocate | |
9385eb3d | 2669 | static vm_range_t |
59e0d9fe A |
2670 | large_free_no_lock(szone_t *szone, large_entry_t *entry) |
2671 | { | |
e9ce8d39 | 2672 | vm_range_t range; |
59e0d9fe A |
2673 | |
2674 | range.address = (vm_address_t)LARGE_ENTRY_ADDRESS(*entry); | |
2675 | range.size = (vm_size_t)LARGE_ENTRY_SIZE(*entry); | |
2676 | szone->num_large_objects_in_use--; | |
e9ce8d39 A |
2677 | szone->num_bytes_in_large_objects -= range.size; |
2678 | if (szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) { | |
59e0d9fe A |
2679 | protect(szone, (void *)range.address, range.size, VM_PROT_READ | VM_PROT_WRITE, szone->debug_flags); |
2680 | range.address -= vm_page_size; | |
2681 | range.size += 2 * vm_page_size; | |
2682 | } | |
e9ce8d39 A |
2683 | entry->address_and_num_pages = 0; |
2684 | large_entries_rehash_after_entry_no_lock(szone, entry); | |
2685 | #if DEBUG_MALLOC | |
2686 | if (large_entry_for_pointer_no_lock(szone, (void *)range.address)) { | |
59e0d9fe A |
2687 | malloc_printf("*** freed entry %p still in use; num_large_entries=%d\n", |
2688 | range.address, szone->num_large_entries); | |
9385eb3d A |
2689 | large_debug_print(szone); |
2690 | szone_sleep(); | |
e9ce8d39 A |
2691 | } |
2692 | #endif | |
3b2a1fe8 A |
2693 | return range; |
2694 | } | |
2695 | ||
9385eb3d | 2696 | static kern_return_t |
59e0d9fe A |
2697 | large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder) |
2698 | { | |
e9ce8d39 A |
2699 | unsigned index = 0; |
2700 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
2701 | unsigned count = 0; | |
2702 | large_entry_t *entries; | |
2703 | kern_return_t err; | |
59e0d9fe A |
2704 | vm_range_t range; |
2705 | large_entry_t entry; | |
2706 | ||
2707 | err = reader(task, large_entries_address, sizeof(large_entry_t) * num_entries, (void **)&entries); | |
2708 | if (err) | |
2709 | return err; | |
e9ce8d39 | 2710 | index = num_entries; |
59e0d9fe A |
2711 | if ((type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) && |
2712 | (num_entries * sizeof(large_entry_t) >= LARGE_THRESHOLD)) { | |
9385eb3d A |
2713 | range.address = large_entries_address; |
2714 | range.size = round_page(num_entries * sizeof(large_entry_t)); | |
2715 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &range, 1); | |
2716 | } | |
59e0d9fe A |
2717 | if (type_mask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE)) |
2718 | while (index--) { | |
2719 | entry = entries[index]; | |
2720 | if (!LARGE_ENTRY_IS_EMPTY(entry)) { | |
2721 | range.address = (vm_address_t)LARGE_ENTRY_ADDRESS(entry); | |
2722 | range.size = (vm_size_t)LARGE_ENTRY_SIZE(entry); | |
2723 | buffer[count++] = range; | |
2724 | if (count >= MAX_RECORDER_BUFFER) { | |
2725 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, buffer, count); | |
2726 | count = 0; | |
2727 | } | |
9385eb3d A |
2728 | } |
2729 | } | |
9385eb3d A |
2730 | if (count) { |
2731 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | |
2732 | | MALLOC_PTR_REGION_RANGE_TYPE, buffer, count); | |
2733 | } | |
e9ce8d39 A |
2734 | return 0; |
2735 | } | |
2736 | ||
2737 | /********************* HUGE ENTRY UTILITIES ************************/ | |
2738 | ||
9385eb3d | 2739 | static huge_entry_t * |
59e0d9fe A |
2740 | huge_entry_for_pointer_no_lock(szone_t *szone, const void *ptr) |
2741 | { | |
2742 | unsigned index; | |
2743 | huge_entry_t *huge; | |
2744 | ||
2745 | for (index = szone->num_huge_entries, huge = szone->huge_entries; | |
2746 | index > 0; | |
2747 | index--, huge++) { | |
2748 | ||
2749 | if ((void *)huge->address == ptr) | |
2750 | return huge; | |
e9ce8d39 A |
2751 | } |
2752 | return NULL; | |
2753 | } | |
2754 | ||
9385eb3d | 2755 | static boolean_t |
59e0d9fe A |
2756 | huge_entry_append(szone_t *szone, huge_entry_t huge) |
2757 | { | |
2758 | huge_entry_t *new_huge_entries = NULL, *old_huge_entries; | |
2759 | unsigned num_huge_entries; | |
2760 | ||
9385eb3d A |
2761 | // We do a little dance with locking because doing allocation (even in the |
2762 | // default szone) may cause something to get freed in this szone, with a | |
2763 | // deadlock | |
2764 | // Returns 1 on success | |
e9ce8d39 | 2765 | SZONE_LOCK(szone); |
59e0d9fe | 2766 | for (;;) { |
9385eb3d A |
2767 | num_huge_entries = szone->num_huge_entries; |
2768 | SZONE_UNLOCK(szone); | |
59e0d9fe A |
2769 | /* check for counter wrap */ |
2770 | if ((num_huge_entries + 1) < num_huge_entries) | |
2771 | return 0; | |
2772 | /* stale allocation from last time around the loop? */ | |
2773 | if (new_huge_entries) | |
2774 | szone_free(szone, new_huge_entries); | |
9385eb3d | 2775 | new_huge_entries = szone_malloc(szone, (num_huge_entries + 1) * sizeof(huge_entry_t)); |
59e0d9fe A |
2776 | if (new_huge_entries == NULL) |
2777 | return 0; | |
9385eb3d A |
2778 | SZONE_LOCK(szone); |
2779 | if (num_huge_entries == szone->num_huge_entries) { | |
2780 | // No change - our malloc still applies | |
59e0d9fe | 2781 | old_huge_entries = szone->huge_entries; |
9385eb3d A |
2782 | if (num_huge_entries) { |
2783 | memcpy(new_huge_entries, old_huge_entries, num_huge_entries * sizeof(huge_entry_t)); | |
2784 | } | |
2785 | new_huge_entries[szone->num_huge_entries++] = huge; | |
2786 | szone->huge_entries = new_huge_entries; | |
2787 | SZONE_UNLOCK(szone); | |
2788 | szone_free(szone, old_huge_entries); | |
5b2abdfb | 2789 | return 1; |
9385eb3d A |
2790 | } |
2791 | // try again! | |
2792 | } | |
2793 | } | |
2794 | ||
2795 | static kern_return_t | |
59e0d9fe A |
2796 | huge_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t huge_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder) |
2797 | { | |
e9ce8d39 A |
2798 | huge_entry_t *entries; |
2799 | kern_return_t err; | |
59e0d9fe A |
2800 | |
2801 | err = reader(task, huge_entries_address, sizeof(huge_entry_t) * num_entries, (void **)&entries); | |
2802 | if (err) | |
2803 | return err; | |
2804 | if (num_entries) | |
2805 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, entries, num_entries); | |
2806 | ||
e9ce8d39 A |
2807 | return 0; |
2808 | } | |
2809 | ||
9385eb3d | 2810 | static void * |
59e0d9fe A |
2811 | large_and_huge_malloc(szone_t *szone, unsigned num_pages) |
2812 | { | |
2813 | void *addr; | |
9385eb3d | 2814 | vm_range_t range_to_deallocate; |
59e0d9fe A |
2815 | huge_entry_t huge_entry; |
2816 | size_t size; | |
2817 | large_entry_t large_entry; | |
2818 | ||
2819 | if (!num_pages) | |
2820 | num_pages = 1; // minimal allocation size for this szone | |
2821 | size = (size_t)num_pages << vm_page_shift; | |
9385eb3d | 2822 | range_to_deallocate.size = 0; |
e9ce8d39 | 2823 | if (num_pages >= (1 << vm_page_shift)) { |
59e0d9fe A |
2824 | addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MAKE_TAG(VM_MEMORY_MALLOC_HUGE)); |
2825 | if (addr == NULL) | |
2826 | return NULL; | |
2827 | huge_entry.size = size; | |
2828 | huge_entry.address = (vm_address_t)addr; | |
2829 | if (!huge_entry_append(szone, huge_entry)) | |
2830 | return NULL; // we are leaking the allocation here | |
9385eb3d | 2831 | SZONE_LOCK(szone); |
59e0d9fe | 2832 | szone->num_bytes_in_huge_objects += size; |
e9ce8d39 | 2833 | } else { |
59e0d9fe | 2834 | |
9385eb3d A |
2835 | addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE)); |
2836 | #if DEBUG_MALLOC | |
59e0d9fe A |
2837 | if (LOG(szone, addr)) |
2838 | malloc_printf("in szone_malloc true large allocation at %p for %y\n", (void *)addr, size); | |
9385eb3d | 2839 | #endif |
5b2abdfb | 2840 | SZONE_LOCK(szone); |
59e0d9fe | 2841 | if (addr == NULL) { |
5b2abdfb A |
2842 | SZONE_UNLOCK(szone); |
2843 | return NULL; | |
e9ce8d39 | 2844 | } |
e9ce8d39 | 2845 | #if DEBUG_MALLOC |
59e0d9fe A |
2846 | if (large_entry_for_pointer_no_lock(szone, addr)) { |
2847 | malloc_printf("freshly allocated is already in use: %p\n", addr); | |
5b2abdfb | 2848 | large_debug_print(szone); |
9385eb3d | 2849 | szone_sleep(); |
5b2abdfb | 2850 | } |
e9ce8d39 | 2851 | #endif |
9385eb3d A |
2852 | if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) { |
2853 | // density of hash table too high; grow table | |
2854 | // we do that under lock to avoid a race | |
9385eb3d A |
2855 | large_entries_grow_no_lock(szone, &range_to_deallocate); |
2856 | } | |
59e0d9fe | 2857 | large_entry.address_and_num_pages = (uintptr_t)addr | num_pages; |
e9ce8d39 | 2858 | #if DEBUG_MALLOC |
59e0d9fe A |
2859 | if (large_entry_for_pointer_no_lock(szone, addr)) { |
2860 | malloc_printf("entry about to be added already in use: %p\n", addr); | |
9385eb3d A |
2861 | large_debug_print(szone); |
2862 | szone_sleep(); | |
2863 | } | |
e9ce8d39 | 2864 | #endif |
59e0d9fe | 2865 | large_entry_insert_no_lock(szone, large_entry); |
e9ce8d39 | 2866 | #if DEBUG_MALLOC |
9385eb3d | 2867 | if (!large_entry_for_pointer_no_lock(szone, (void *)addr)) { |
59e0d9fe | 2868 | malloc_printf("can't find entry just added\n"); |
9385eb3d A |
2869 | large_debug_print(szone); |
2870 | szone_sleep(); | |
2871 | } | |
e9ce8d39 | 2872 | #endif |
9385eb3d A |
2873 | szone->num_large_objects_in_use ++; |
2874 | szone->num_bytes_in_large_objects += size; | |
e9ce8d39 A |
2875 | } |
2876 | SZONE_UNLOCK(szone); | |
9385eb3d | 2877 | if (range_to_deallocate.size) { |
59e0d9fe | 2878 | deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0); // we deallocate outside the lock |
9385eb3d | 2879 | } |
e9ce8d39 A |
2880 | return (void *)addr; |
2881 | } | |
2882 | ||
9385eb3d | 2883 | static INLINE void |
59e0d9fe A |
2884 | free_large_or_huge(szone_t *szone, void *ptr) |
2885 | { | |
9385eb3d | 2886 | // We have established ptr is page-aligned and not tiny nor small |
e9ce8d39 A |
2887 | large_entry_t *entry; |
2888 | vm_range_t vm_range_to_deallocate; | |
2889 | huge_entry_t *huge; | |
59e0d9fe | 2890 | |
e9ce8d39 A |
2891 | SZONE_LOCK(szone); |
2892 | entry = large_entry_for_pointer_no_lock(szone, ptr); | |
2893 | if (entry) { | |
9385eb3d | 2894 | vm_range_to_deallocate = large_free_no_lock(szone, entry); |
e9ce8d39 | 2895 | #if DEBUG_MALLOC |
9385eb3d | 2896 | if (large_entry_for_pointer_no_lock(szone, ptr)) { |
59e0d9fe | 2897 | malloc_printf("*** just after freeing %p still in use num_large_entries=%d\n", ptr, szone->num_large_entries); |
9385eb3d A |
2898 | large_debug_print(szone); |
2899 | szone_sleep(); | |
2900 | } | |
e9ce8d39 A |
2901 | #endif |
2902 | } else if ((huge = huge_entry_for_pointer_no_lock(szone, ptr))) { | |
9385eb3d A |
2903 | vm_range_to_deallocate = *huge; |
2904 | *huge = szone->huge_entries[--szone->num_huge_entries]; // last entry fills that spot | |
59e0d9fe | 2905 | szone->num_bytes_in_huge_objects -= (size_t)vm_range_to_deallocate.size; |
e9ce8d39 A |
2906 | } else { |
2907 | #if DEBUG_MALLOC | |
9385eb3d | 2908 | large_debug_print(szone); |
e9ce8d39 | 2909 | #endif |
59e0d9fe | 2910 | szone_error(szone, "pointer being freed was not allocated", ptr); |
9385eb3d | 2911 | return; |
e9ce8d39 | 2912 | } |
e9ce8d39 | 2913 | SZONE_UNLOCK(szone); // we release the lock asap |
9385eb3d | 2914 | CHECK(szone, __PRETTY_FUNCTION__); |
e9ce8d39 A |
2915 | // we deallocate_pages, including guard pages |
2916 | if (vm_range_to_deallocate.address) { | |
9385eb3d | 2917 | #if DEBUG_MALLOC |
59e0d9fe A |
2918 | if (large_entry_for_pointer_no_lock(szone, (void *)vm_range_to_deallocate.address)) { |
2919 | malloc_printf("*** invariant broken: %p still in use num_large_entries=%d\n", vm_range_to_deallocate.address, szone->num_large_entries); | |
9385eb3d A |
2920 | large_debug_print(szone); |
2921 | szone_sleep(); | |
2922 | } | |
2923 | #endif | |
59e0d9fe | 2924 | deallocate_pages(szone, (void *)vm_range_to_deallocate.address, (size_t)vm_range_to_deallocate.size, 0); |
9385eb3d A |
2925 | } |
2926 | } | |
2927 | ||
2928 | static INLINE int | |
59e0d9fe A |
2929 | try_realloc_large_or_huge_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) |
2930 | { | |
9385eb3d | 2931 | vm_address_t addr = (vm_address_t)ptr + old_size; |
59e0d9fe A |
2932 | large_entry_t *large_entry, saved_entry; |
2933 | huge_entry_t *huge_entry, huge; | |
9385eb3d | 2934 | kern_return_t err; |
59e0d9fe | 2935 | |
e9ce8d39 | 2936 | #if DEBUG_MALLOC |
9385eb3d A |
2937 | if (old_size != ((old_size >> vm_page_shift) << vm_page_shift)) { |
2938 | malloc_printf("*** old_size is %d\n", old_size); | |
2939 | } | |
e9ce8d39 | 2940 | #endif |
9385eb3d | 2941 | SZONE_LOCK(szone); |
59e0d9fe | 2942 | large_entry = large_entry_for_pointer_no_lock(szone, (void *)addr); |
9385eb3d | 2943 | SZONE_UNLOCK(szone); |
59e0d9fe A |
2944 | if (large_entry) { |
2945 | return 0; // large pointer already exists in table - extension is not going to work | |
9385eb3d A |
2946 | } |
2947 | new_size = round_page(new_size); | |
9385eb3d A |
2948 | /* |
2949 | * Ask for allocation at a specific address, and mark as realloc | |
2950 | * to request coalescing with previous realloc'ed extensions. | |
2951 | */ | |
2952 | err = vm_allocate(mach_task_self(), &addr, new_size - old_size, VM_MAKE_TAG(VM_MEMORY_REALLOC)); | |
2953 | if (err != KERN_SUCCESS) { | |
2954 | return 0; | |
2955 | } | |
2956 | SZONE_LOCK(szone); | |
2957 | /* | |
2958 | * If the new size is still under the large/huge threshold, we can just | |
2959 | * extend the existing large block. | |
2960 | * | |
2961 | * Note: this logic is predicated on the understanding that an allocated | |
2962 | * block can never really shrink, so that the new size will always be | |
2963 | * larger than the old size. | |
59e0d9fe A |
2964 | * |
2965 | * Note: the use of 1 << vm_page_shift here has to do with the subdivision | |
2966 | * of the bits in the large_entry_t, and not the size of a page (directly). | |
9385eb3d A |
2967 | */ |
2968 | if ((new_size >> vm_page_shift) < (1 << vm_page_shift)) { | |
2969 | /* extend existing large entry */ | |
59e0d9fe A |
2970 | large_entry = large_entry_for_pointer_no_lock(szone, ptr); |
2971 | if (!large_entry) { | |
9385eb3d A |
2972 | szone_error(szone, "large entry reallocated is not properly in table", ptr); |
2973 | /* XXX will cause fault on next reference to entry */ | |
2974 | } | |
59e0d9fe | 2975 | large_entry->address_and_num_pages = (uintptr_t)ptr | (new_size >> vm_page_shift); |
9385eb3d A |
2976 | szone->num_bytes_in_large_objects += new_size - old_size; |
2977 | } else if ((old_size >> vm_page_shift) >= (1 << vm_page_shift)) { | |
2978 | /* extend existing huge entry */ | |
59e0d9fe | 2979 | huge_entry = huge_entry_for_pointer_no_lock(szone, ptr); |
9385eb3d A |
2980 | if (!huge_entry) { |
2981 | szone_error(szone, "huge entry reallocated is not properly in table", ptr); | |
2982 | /* XXX will cause fault on next reference to huge_entry */ | |
2983 | } | |
2984 | huge_entry->size = new_size; | |
2985 | szone->num_bytes_in_huge_objects += new_size - old_size; | |
2986 | } else { | |
2987 | /* need to convert large entry to huge entry */ | |
9385eb3d A |
2988 | |
2989 | /* release large entry, note we still have the VM allocation */ | |
59e0d9fe A |
2990 | large_entry = large_entry_for_pointer_no_lock(szone, ptr); |
2991 | saved_entry = *large_entry; // in case we need to put it back | |
2992 | large_free_no_lock(szone, large_entry); | |
9385eb3d A |
2993 | szone->num_bytes_in_large_objects -= old_size; |
2994 | ||
2995 | /* and get a huge entry */ | |
2996 | huge.address = (vm_address_t)ptr; | |
2997 | huge.size = new_size; /* fix up size */ | |
2998 | SZONE_UNLOCK(szone); | |
2999 | if (huge_entry_append(szone, huge)) { | |
3000 | szone->num_bytes_in_huge_objects += new_size; | |
3001 | return 1; // success! | |
3002 | } | |
3003 | SZONE_LOCK(szone); | |
3004 | // we leak memory (the extra space appended) but data structures are correct | |
3005 | large_entry_insert_no_lock(szone, saved_entry); // this will reinsert the large entry | |
e9ce8d39 | 3006 | } |
9385eb3d A |
3007 | SZONE_UNLOCK(szone); // we release the lock asap |
3008 | return 1; | |
e9ce8d39 A |
3009 | } |
3010 | ||
9385eb3d A |
3011 | /********************* Zone call backs ************************/ |
3012 | ||
3013 | static void | |
59e0d9fe A |
3014 | szone_free(szone_t *szone, void *ptr) |
3015 | { | |
3016 | tiny_region_t *tiny_region; | |
3017 | small_region_t *small_region; | |
3018 | ||
e9ce8d39 | 3019 | #if DEBUG_MALLOC |
59e0d9fe A |
3020 | if (LOG(szone, ptr)) |
3021 | malloc_printf("in szone_free with %p\n", ptr); | |
e9ce8d39 | 3022 | #endif |
59e0d9fe A |
3023 | if (!ptr) |
3024 | return; | |
3025 | /* | |
3026 | * Try to free to a tiny region. | |
3027 | */ | |
3028 | if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) { | |
9385eb3d A |
3029 | szone_error(szone, "Non-aligned pointer being freed", ptr); |
3030 | return; | |
3031 | } | |
59e0d9fe | 3032 | if ((tiny_region = tiny_region_for_ptr_no_lock(szone, ptr)) != NULL) { |
9385eb3d A |
3033 | free_tiny(szone, ptr, tiny_region); |
3034 | return; | |
3035 | } | |
59e0d9fe A |
3036 | |
3037 | /* | |
3038 | * Try to free to a small region. | |
3039 | */ | |
3040 | if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) { | |
9385eb3d A |
3041 | szone_error(szone, "Non-aligned pointer being freed (2)", ptr); |
3042 | return; | |
3043 | } | |
59e0d9fe | 3044 | if ((small_region = small_region_for_ptr_no_lock(szone, ptr)) != NULL) { |
9385eb3d A |
3045 | free_small(szone, ptr, small_region); |
3046 | return; | |
3047 | } | |
59e0d9fe A |
3048 | |
3049 | /* check that it's a legal large/huge allocation */ | |
3050 | if ((uintptr_t)ptr & (vm_page_size - 1)) { | |
3051 | szone_error(szone, "non-page-aligned, non-allocated pointer being freed", ptr); | |
9385eb3d A |
3052 | return; |
3053 | } | |
3054 | free_large_or_huge(szone, ptr); | |
3055 | } | |
3056 | ||
3057 | static INLINE void * | |
59e0d9fe A |
3058 | szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested) |
3059 | { | |
9385eb3d | 3060 | void *ptr; |
59e0d9fe A |
3061 | msize_t msize; |
3062 | unsigned num_pages; | |
3063 | ||
9385eb3d A |
3064 | if (size <= 31*TINY_QUANTUM) { |
3065 | // think tiny | |
59e0d9fe A |
3066 | msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); |
3067 | if (!msize) | |
3068 | msize = 1; | |
9385eb3d A |
3069 | ptr = tiny_malloc_should_clear(szone, msize, cleared_requested); |
3070 | } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (size < LARGE_THRESHOLD)) { | |
3071 | // think small | |
59e0d9fe | 3072 | msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); |
9385eb3d A |
3073 | if (! msize) msize = 1; |
3074 | ptr = small_malloc_should_clear(szone, msize, cleared_requested); | |
e9ce8d39 | 3075 | } else { |
59e0d9fe | 3076 | // large or huge |
9385eb3d | 3077 | num_pages = round_page(size) >> vm_page_shift; |
20d7cd4c A |
3078 | if (num_pages == 0) /* Overflowed */ |
3079 | ptr = 0; | |
3080 | else | |
3081 | ptr = large_and_huge_malloc(szone, num_pages); | |
e9ce8d39 | 3082 | } |
9385eb3d | 3083 | #if DEBUG_MALLOC |
59e0d9fe A |
3084 | if (LOG(szone, ptr)) |
3085 | malloc_printf("szone_malloc returned %p\n", ptr); | |
9385eb3d | 3086 | #endif |
59e0d9fe A |
3087 | /* |
3088 | * If requested, scribble on allocated memory. | |
3089 | */ | |
3090 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && ptr && !cleared_requested && size) | |
3091 | memset(ptr, 0xaa, size); | |
3092 | ||
e9ce8d39 A |
3093 | return ptr; |
3094 | } | |
3095 | ||
9385eb3d A |
3096 | static void * |
3097 | szone_malloc(szone_t *szone, size_t size) { | |
59e0d9fe | 3098 | return szone_malloc_should_clear(szone, size, 0); |
e9ce8d39 A |
3099 | } |
3100 | ||
9385eb3d | 3101 | static void * |
59e0d9fe A |
3102 | szone_calloc(szone_t *szone, size_t num_items, size_t size) |
3103 | { | |
3104 | return szone_malloc_should_clear(szone, num_items * size, 1); | |
e9ce8d39 A |
3105 | } |
3106 | ||
9385eb3d | 3107 | static void * |
59e0d9fe A |
3108 | szone_valloc(szone_t *szone, size_t size) |
3109 | { | |
e9ce8d39 A |
3110 | void *ptr; |
3111 | unsigned num_pages; | |
59e0d9fe | 3112 | |
e9ce8d39 | 3113 | num_pages = round_page(size) >> vm_page_shift; |
5b2abdfb | 3114 | ptr = large_and_huge_malloc(szone, num_pages); |
9385eb3d | 3115 | #if DEBUG_MALLOC |
59e0d9fe A |
3116 | if (LOG(szone, ptr)) |
3117 | malloc_printf("szone_valloc returned %p\n", ptr); | |
9385eb3d | 3118 | #endif |
e9ce8d39 A |
3119 | return ptr; |
3120 | } | |
3121 | ||
9385eb3d | 3122 | static size_t |
59e0d9fe A |
3123 | szone_size(szone_t *szone, const void *ptr) |
3124 | { | |
e9ce8d39 | 3125 | size_t size = 0; |
59e0d9fe A |
3126 | boolean_t is_free; |
3127 | msize_t msize, msize_and_free; | |
e9ce8d39 A |
3128 | large_entry_t *entry; |
3129 | huge_entry_t *huge; | |
59e0d9fe A |
3130 | |
3131 | if (!ptr) | |
3132 | return 0; | |
9385eb3d A |
3133 | #if DEBUG_MALLOC |
3134 | if (LOG(szone, ptr)) { | |
59e0d9fe A |
3135 | malloc_printf("in szone_size for %p (szone=%p)\n", ptr, szone); |
3136 | } | |
3137 | #endif | |
3138 | ||
3139 | /* | |
3140 | * Look for it in a tiny region. | |
3141 | */ | |
3142 | if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) | |
3143 | return 0; | |
3144 | if (tiny_region_for_ptr_no_lock(szone, ptr)) { | |
3145 | msize = get_tiny_meta_header(ptr, &is_free); | |
3146 | return (is_free) ? 0 : TINY_BYTES_FOR_MSIZE(msize); | |
3147 | } | |
3148 | ||
3149 | /* | |
3150 | * Look for it in a small region. | |
3151 | */ | |
3152 | if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) | |
9385eb3d | 3153 | return 0; |
59e0d9fe A |
3154 | if (small_region_for_ptr_no_lock(szone, ptr)) { |
3155 | msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); | |
3156 | return (msize_and_free & SMALL_IS_FREE) ? 0 : SMALL_BYTES_FOR_MSIZE(msize_and_free); | |
e9ce8d39 | 3157 | } |
59e0d9fe A |
3158 | |
3159 | /* | |
3160 | * If not page-aligned, it cannot have come from a large or huge allocation. | |
3161 | */ | |
3162 | if ((uintptr_t)ptr & (vm_page_size - 1)) | |
3163 | return(0); | |
3164 | ||
3165 | /* | |
3166 | * Look for it in a large or huge entry. | |
3167 | */ | |
e9ce8d39 A |
3168 | SZONE_LOCK(szone); |
3169 | entry = large_entry_for_pointer_no_lock(szone, ptr); | |
3170 | if (entry) { | |
9385eb3d | 3171 | size = LARGE_ENTRY_SIZE(*entry); |
e9ce8d39 | 3172 | } else if ((huge = huge_entry_for_pointer_no_lock(szone, ptr))) { |
9385eb3d | 3173 | size = huge->size; |
e9ce8d39 A |
3174 | } |
3175 | SZONE_UNLOCK(szone); | |
9385eb3d A |
3176 | #if DEBUG_MALLOC |
3177 | if (LOG(szone, ptr)) { | |
3178 | malloc_printf("szone_size for %p returned %d\n", ptr, (unsigned)size); | |
3179 | } | |
3180 | #endif | |
e9ce8d39 A |
3181 | return size; |
3182 | } | |
3183 | ||
9385eb3d | 3184 | static void * |
59e0d9fe A |
3185 | szone_realloc(szone_t *szone, void *ptr, size_t new_size) |
3186 | { | |
3187 | size_t old_size; | |
9385eb3d | 3188 | void *new_ptr; |
59e0d9fe | 3189 | |
e9ce8d39 | 3190 | #if DEBUG_MALLOC |
9385eb3d | 3191 | if (LOG(szone, ptr)) { |
59e0d9fe | 3192 | malloc_printf("in szone_realloc for %p, %d\n", ptr, (unsigned)new_size); |
e9ce8d39 A |
3193 | } |
3194 | #endif | |
9385eb3d A |
3195 | if (!ptr) { |
3196 | ptr = szone_malloc(szone, new_size); | |
9385eb3d A |
3197 | return ptr; |
3198 | } | |
3199 | old_size = szone_size(szone, ptr); | |
3200 | if (!old_size) { | |
59e0d9fe | 3201 | szone_error(szone, "pointer being reallocated was not allocated", ptr); |
9385eb3d | 3202 | return NULL; |
e9ce8d39 | 3203 | } |
9385eb3d | 3204 | /* we never shrink an allocation */ |
59e0d9fe A |
3205 | if (old_size >= new_size) |
3206 | return ptr; | |
3207 | ||
3208 | /* | |
3209 | * If the old and new sizes both suit the tiny allocator, try to reallocate in-place. | |
3210 | */ | |
9385eb3d | 3211 | if ((new_size + TINY_QUANTUM - 1) <= 31 * TINY_QUANTUM) { |
9385eb3d | 3212 | if (try_realloc_tiny_in_place(szone, ptr, old_size, new_size)) { |
9385eb3d A |
3213 | return ptr; |
3214 | } | |
59e0d9fe A |
3215 | |
3216 | /* | |
3217 | * If the old and new sizes both suit the small allocator, and we're not protecting the | |
3218 | * small allocations, try to reallocate in-place. | |
3219 | */ | |
3220 | } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && | |
3221 | ((new_size + SMALL_QUANTUM - 1) < LARGE_THRESHOLD) && | |
3222 | (old_size > 31 * TINY_QUANTUM)) { | |
9385eb3d | 3223 | if (try_realloc_small_in_place(szone, ptr, old_size, new_size)) { |
9385eb3d A |
3224 | return ptr; |
3225 | } | |
59e0d9fe A |
3226 | |
3227 | /* | |
3228 | * If the allocation's a large or huge allocation, try to reallocate in-place there. | |
3229 | */ | |
9385eb3d A |
3230 | } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (old_size > LARGE_THRESHOLD)) { |
3231 | if (try_realloc_large_or_huge_in_place(szone, ptr, old_size, new_size)) { | |
3232 | return ptr; | |
3233 | } | |
3234 | } | |
59e0d9fe A |
3235 | |
3236 | /* | |
3237 | * Can't reallocate in place for whatever reason; allocate a new buffer and copy. | |
3238 | */ | |
9385eb3d | 3239 | new_ptr = szone_malloc(szone, new_size); |
59e0d9fe A |
3240 | if (new_ptr == NULL) |
3241 | return NULL; | |
3242 | ||
3243 | /* | |
3244 | * If the allocation's large enough, try to copy using VM. If that fails, or | |
3245 | * if it's too small, just copy by hand. | |
3246 | */ | |
3247 | if ((old_size < VM_COPY_THRESHOLD) || | |
3248 | vm_copy(mach_task_self(), (vm_address_t)ptr, old_size, (vm_address_t)new_ptr)) | |
9385eb3d | 3249 | memcpy(new_ptr, ptr, old_size); |
9385eb3d | 3250 | szone_free(szone, ptr); |
59e0d9fe | 3251 | |
e9ce8d39 | 3252 | #if DEBUG_MALLOC |
9385eb3d A |
3253 | if (LOG(szone, ptr)) { |
3254 | malloc_printf("szone_realloc returned %p for %d\n", new_ptr, (unsigned)new_size); | |
e9ce8d39 | 3255 | } |
e9ce8d39 | 3256 | #endif |
9385eb3d | 3257 | return new_ptr; |
e9ce8d39 A |
3258 | } |
3259 | ||
59e0d9fe A |
3260 | // given a size, returns pointers capable of holding that size |
3261 | // returns the number of pointers allocated | |
3262 | // may return 0 - this function will do best attempts, but just that | |
3263 | static unsigned | |
3264 | szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count) | |
3265 | { | |
3266 | msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); | |
3267 | size_t chunk_size = TINY_BYTES_FOR_MSIZE(msize); | |
3268 | free_list_t **free_list = szone->tiny_free_list + msize - 1; | |
3269 | free_list_t *ptr = *free_list; | |
3270 | unsigned found = 0; | |
3271 | ||
3272 | if (size > 31*TINY_QUANTUM) | |
3273 | return 0; // only bother implementing this for tiny | |
3274 | if (!msize) | |
3275 | msize = 1; | |
9385eb3d A |
3276 | CHECK(szone, __PRETTY_FUNCTION__); |
3277 | SZONE_LOCK(szone); // might as well lock right here to avoid concurrency issues | |
9385eb3d | 3278 | while (found < count) { |
59e0d9fe A |
3279 | if (!ptr) |
3280 | break; | |
3281 | *results++ = ptr; | |
3282 | found++; | |
9385eb3d | 3283 | set_tiny_meta_header_in_use(ptr, msize); |
59e0d9fe | 3284 | ptr = ptr->next; |
9385eb3d A |
3285 | } |
3286 | if (ptr) { | |
59e0d9fe A |
3287 | ptr->previous = NULL; |
3288 | free_list_set_checksum(szone, ptr); | |
9385eb3d | 3289 | } |
59e0d9fe A |
3290 | *free_list = ptr; |
3291 | ||
9385eb3d A |
3292 | // Note that we could allocate from the free lists for larger msize |
3293 | // But that may un-necessarily fragment - so we might as well let the client do that | |
3294 | // We could also allocate from szone->tiny_bytes_free_at_end | |
3295 | // But that means we'll "eat-up" the untouched area faster, increasing the working set | |
3296 | // So we just return what we have and just that | |
3297 | szone->num_tiny_objects += found; | |
3298 | szone->num_bytes_in_tiny_objects += chunk_size * found; | |
5b2abdfb | 3299 | SZONE_UNLOCK(szone); |
9385eb3d | 3300 | return found; |
5b2abdfb A |
3301 | } |
3302 | ||
59e0d9fe A |
3303 | static void |
3304 | szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count) | |
3305 | { | |
3306 | unsigned cc = 0; | |
3307 | void *ptr; | |
3308 | tiny_region_t *tiny_region; | |
3309 | boolean_t is_free; | |
3310 | msize_t msize; | |
3311 | ||
9385eb3d A |
3312 | // frees all the pointers in to_be_freed |
3313 | // note that to_be_freed may be overwritten during the process | |
59e0d9fe A |
3314 | if (!count) |
3315 | return; | |
9385eb3d A |
3316 | CHECK(szone, __PRETTY_FUNCTION__); |
3317 | SZONE_LOCK(szone); | |
3318 | while (cc < count) { | |
59e0d9fe A |
3319 | ptr = to_be_freed[cc]; |
3320 | /* XXX this really slows us down */ | |
3321 | tiny_region = tiny_region_for_ptr_no_lock(szone, ptr); | |
9385eb3d A |
3322 | if (tiny_region) { |
3323 | // this is a tiny pointer | |
59e0d9fe A |
3324 | msize = get_tiny_meta_header(ptr, &is_free); |
3325 | if (is_free) | |
3326 | break; // a double free; let the standard free deal with it | |
9385eb3d A |
3327 | tiny_free_no_lock(szone, tiny_region, ptr, msize); |
3328 | to_be_freed[cc] = NULL; | |
3329 | } | |
3330 | cc++; | |
e9ce8d39 | 3331 | } |
9385eb3d A |
3332 | SZONE_UNLOCK(szone); |
3333 | CHECK(szone, __PRETTY_FUNCTION__); | |
3334 | while (count--) { | |
59e0d9fe A |
3335 | ptr = to_be_freed[count]; |
3336 | if (ptr) | |
3337 | szone_free(szone, ptr); | |
e9ce8d39 | 3338 | } |
e9ce8d39 A |
3339 | } |
3340 | ||
9385eb3d | 3341 | static void |
59e0d9fe A |
3342 | szone_destroy(szone_t *szone) |
3343 | { | |
3344 | unsigned index; | |
9385eb3d | 3345 | small_region_t pended_region = 0; |
59e0d9fe A |
3346 | large_entry_t *large; |
3347 | vm_range_t range_to_deallocate; | |
3348 | huge_entry_t *huge; | |
3349 | tiny_region_t tiny_region; | |
3350 | small_region_t small_region; | |
3351 | ||
3352 | /* destroy large entries */ | |
e9ce8d39 A |
3353 | index = szone->num_large_entries; |
3354 | while (index--) { | |
59e0d9fe A |
3355 | large = szone->large_entries + index; |
3356 | if (!LARGE_ENTRY_IS_EMPTY(*large)) { | |
9385eb3d | 3357 | // we deallocate_pages, including guard pages |
59e0d9fe | 3358 | deallocate_pages(szone, (void *)LARGE_ENTRY_ADDRESS(*large), LARGE_ENTRY_SIZE(*large), szone->debug_flags); |
9385eb3d A |
3359 | } |
3360 | } | |
3361 | if (szone->num_large_entries * sizeof(large_entry_t) >= LARGE_THRESHOLD) { | |
59e0d9fe A |
3362 | // we do not free in the small chunk case |
3363 | large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate); | |
3364 | if (range_to_deallocate.size) | |
3365 | deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0); | |
9385eb3d | 3366 | } |
59e0d9fe A |
3367 | |
3368 | /* destroy huge entries */ | |
e9ce8d39 A |
3369 | index = szone->num_huge_entries; |
3370 | while (index--) { | |
59e0d9fe A |
3371 | huge = szone->huge_entries + index; |
3372 | deallocate_pages(szone, (void *)huge->address, huge->size, szone->debug_flags); | |
9385eb3d | 3373 | } |
59e0d9fe A |
3374 | |
3375 | /* destroy tiny regions */ | |
9385eb3d A |
3376 | index = szone->num_tiny_regions; |
3377 | while (index--) { | |
59e0d9fe A |
3378 | tiny_region = szone->tiny_regions[index]; |
3379 | deallocate_pages(szone, TINY_REGION_ADDRESS(tiny_region), TINY_REGION_SIZE, 0); | |
e9ce8d39 | 3380 | } |
59e0d9fe | 3381 | /* destroy small regions; region 0 must go last as it contains the szone */ |
9385eb3d A |
3382 | index = szone->num_small_regions; |
3383 | while (index--) { | |
59e0d9fe A |
3384 | small_region = szone->small_regions[index]; |
3385 | /* | |
3386 | * If we've allocated more than the basic set of small regions, avoid destroying the | |
3387 | * region that contains the array. | |
3388 | */ | |
3389 | if ((index > 0) && | |
3390 | (SMALL_REGION_FOR_PTR(szone->small_regions) == SMALL_REGION_ADDRESS(small_region))) { | |
3391 | pended_region = small_region; | |
9385eb3d | 3392 | } else { |
59e0d9fe | 3393 | deallocate_pages(szone, (void *)SMALL_REGION_ADDRESS(small_region), SMALL_REGION_SIZE, 0); |
9385eb3d A |
3394 | } |
3395 | } | |
59e0d9fe A |
3396 | if (pended_region) |
3397 | deallocate_pages(NULL, (void *)SMALL_REGION_ADDRESS(pended_region), SMALL_REGION_SIZE, 0); | |
e9ce8d39 A |
3398 | } |
3399 | ||
9385eb3d | 3400 | static size_t |
59e0d9fe A |
3401 | szone_good_size(szone_t *szone, size_t size) |
3402 | { | |
3403 | msize_t msize; | |
3404 | unsigned num_pages; | |
3405 | ||
9385eb3d A |
3406 | if (size <= 31 * TINY_QUANTUM) { |
3407 | // think tiny | |
59e0d9fe | 3408 | msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); |
9385eb3d | 3409 | if (! msize) msize = 1; |
3d9156a7 | 3410 | return TINY_BYTES_FOR_MSIZE(msize); |
9385eb3d | 3411 | } |
e9ce8d39 | 3412 | if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (size < LARGE_THRESHOLD)) { |
9385eb3d | 3413 | // think small |
59e0d9fe | 3414 | msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); |
9385eb3d | 3415 | if (! msize) msize = 1; |
59e0d9fe | 3416 | return SMALL_BYTES_FOR_MSIZE(msize); |
e9ce8d39 | 3417 | } else { |
9385eb3d | 3418 | num_pages = round_page(size) >> vm_page_shift; |
59e0d9fe A |
3419 | if (!num_pages) |
3420 | num_pages = 1; // minimal allocation size for this | |
9385eb3d | 3421 | return num_pages << vm_page_shift; |
e9ce8d39 A |
3422 | } |
3423 | } | |
3424 | ||
3425 | unsigned szone_check_counter = 0; | |
3426 | unsigned szone_check_start = 0; | |
3427 | unsigned szone_check_modulo = 1; | |
3428 | ||
9385eb3d | 3429 | static boolean_t |
59e0d9fe A |
3430 | szone_check_all(szone_t *szone, const char *function) |
3431 | { | |
3432 | int index; | |
3433 | tiny_region_t *tiny; | |
3434 | small_region_t *small; | |
3435 | ||
e9ce8d39 | 3436 | SZONE_LOCK(szone); |
9385eb3d | 3437 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); |
59e0d9fe A |
3438 | |
3439 | /* check tiny regions - chould check region count */ | |
3440 | for (index = szone->num_tiny_regions - 1, tiny = szone->tiny_regions; | |
3441 | index >= 0; | |
3442 | index--, tiny++) { | |
3443 | if (!tiny_check_region(szone, tiny)) { | |
9385eb3d A |
3444 | SZONE_UNLOCK(szone); |
3445 | szone->debug_flags &= ~ CHECK_REGIONS; | |
59e0d9fe A |
3446 | malloc_printf("*** tiny region %d incorrect szone_check_all(%s) counter=%d\n", |
3447 | szone->num_tiny_regions - index, function, szone_check_counter); | |
3448 | szone_error(szone, "check: tiny region incorrect", NULL); | |
9385eb3d A |
3449 | return 0; |
3450 | } | |
3451 | } | |
3452 | ||
59e0d9fe A |
3453 | for (index = NUM_TINY_SLOTS - 1; index >= 0; index--) { |
3454 | if (!tiny_free_list_check(szone, index)) { | |
9385eb3d A |
3455 | SZONE_UNLOCK(szone); |
3456 | szone->debug_flags &= ~ CHECK_REGIONS; | |
59e0d9fe A |
3457 | malloc_printf("*** tiny free list incorrect (slot=%d) szone_check_all(%s) counter=%d\n", |
3458 | index, function, szone_check_counter); | |
3459 | szone_error(szone, "check: tiny free list incorrect", NULL); | |
9385eb3d A |
3460 | return 0; |
3461 | } | |
9385eb3d A |
3462 | } |
3463 | ||
59e0d9fe A |
3464 | /* check small regions - could check region count */ |
3465 | for (index = szone->num_small_regions - 1, small = szone->small_regions; | |
3466 | index >= 0; | |
3467 | index--, small++) { | |
3468 | if (!szone_check_small_region(szone, small)) { | |
9385eb3d A |
3469 | SZONE_UNLOCK(szone); |
3470 | szone->debug_flags &= ~ CHECK_REGIONS; | |
59e0d9fe A |
3471 | malloc_printf("*** small region %d incorrect szone_check_all(%s) counter=%d\n", |
3472 | szone->num_small_regions, index, function, szone_check_counter); | |
3473 | szone_error(szone, "check: small region incorrect", NULL); | |
9385eb3d A |
3474 | return 0; |
3475 | } | |
e9ce8d39 | 3476 | } |
59e0d9fe A |
3477 | for (index = NUM_SMALL_SLOTS - 1; index >= 0; index--) { |
3478 | if (!small_free_list_check(szone, index)) { | |
9385eb3d A |
3479 | SZONE_UNLOCK(szone); |
3480 | szone->debug_flags &= ~ CHECK_REGIONS; | |
59e0d9fe A |
3481 | malloc_printf("*** small free list incorrect (grain=%d) szone_check_all(%s) counter=%d\n", index, function, szone_check_counter); |
3482 | szone_error(szone, "check: small free list incorrect", NULL); | |
9385eb3d A |
3483 | return 0; |
3484 | } | |
e9ce8d39 A |
3485 | } |
3486 | SZONE_UNLOCK(szone); | |
9385eb3d | 3487 | // szone_print(szone, 1); |
e9ce8d39 A |
3488 | return 1; |
3489 | } | |
3490 | ||
9385eb3d | 3491 | static boolean_t |
59e0d9fe A |
3492 | szone_check(szone_t *szone) |
3493 | { | |
3494 | ||
3495 | if ((++szone_check_counter % 10000) == 0) | |
3496 | malloc_printf("at szone_check counter=%d\n", szone_check_counter); | |
3497 | if (szone_check_counter < szone_check_start) | |
3498 | return 1; | |
3499 | if (szone_check_counter % szone_check_modulo) | |
3500 | return 1; | |
e9ce8d39 A |
3501 | return szone_check_all(szone, ""); |
3502 | } | |
3503 | ||
9385eb3d | 3504 | static kern_return_t |
59e0d9fe A |
3505 | szone_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder) |
3506 | { | |
e9ce8d39 A |
3507 | szone_t *szone; |
3508 | kern_return_t err; | |
59e0d9fe | 3509 | |
e9ce8d39 | 3510 | if (!reader) reader = _szone_default_reader; |
e9ce8d39 A |
3511 | err = reader(task, zone_address, sizeof(szone_t), (void **)&szone); |
3512 | if (err) return err; | |
9385eb3d A |
3513 | err = tiny_in_use_enumerator(task, context, type_mask, |
3514 | (vm_address_t)szone->tiny_regions, szone->num_tiny_regions, szone->tiny_bytes_free_at_end , reader, recorder); | |
e9ce8d39 | 3515 | if (err) return err; |
9385eb3d A |
3516 | err = small_in_use_enumerator(task, context, type_mask, |
3517 | (vm_address_t)szone->small_regions, szone->num_small_regions, szone->small_bytes_free_at_end , reader, recorder); | |
e9ce8d39 | 3518 | if (err) return err; |
9385eb3d A |
3519 | err = large_in_use_enumerator(task, context, type_mask, |
3520 | (vm_address_t)szone->large_entries, szone->num_large_entries, reader, | |
3521 | recorder); | |
3522 | if (err) return err; | |
9385eb3d A |
3523 | err = huge_in_use_enumerator(task, context, type_mask, |
3524 | (vm_address_t)szone->huge_entries, szone->num_huge_entries, reader, | |
3525 | recorder); | |
e9ce8d39 A |
3526 | return err; |
3527 | } | |
3528 | ||
9385eb3d A |
3529 | // Following method is deprecated: use scalable_zone_statistics instead |
3530 | void | |
59e0d9fe A |
3531 | scalable_zone_info(malloc_zone_t *zone, unsigned *info_to_fill, unsigned count) |
3532 | { | |
9385eb3d A |
3533 | szone_t *szone = (void *)zone; |
3534 | unsigned info[13]; | |
59e0d9fe | 3535 | |
9385eb3d A |
3536 | // We do not lock to facilitate debug |
3537 | info[4] = szone->num_tiny_objects; | |
3538 | info[5] = szone->num_bytes_in_tiny_objects; | |
3539 | info[6] = szone->num_small_objects; | |
3540 | info[7] = szone->num_bytes_in_small_objects; | |
3541 | info[8] = szone->num_large_objects_in_use; | |
3542 | info[9] = szone->num_bytes_in_large_objects; | |
3543 | info[10] = szone->num_huge_entries; | |
3544 | info[11] = szone->num_bytes_in_huge_objects; | |
3545 | info[12] = szone->debug_flags; | |
3546 | info[0] = info[4] + info[6] + info[8] + info[10]; | |
3547 | info[1] = info[5] + info[7] + info[9] + info[11]; | |
3548 | info[3] = szone->num_tiny_regions * TINY_REGION_SIZE + szone->num_small_regions * SMALL_REGION_SIZE + info[9] + info[11]; | |
3549 | info[2] = info[3] - szone->tiny_bytes_free_at_end - szone->small_bytes_free_at_end; | |
3550 | memcpy(info_to_fill, info, sizeof(unsigned)*count); | |
e9ce8d39 A |
3551 | } |
3552 | ||
9385eb3d | 3553 | static void |
59e0d9fe A |
3554 | szone_print(szone_t *szone, boolean_t verbose) |
3555 | { | |
3556 | unsigned info[13]; | |
3557 | unsigned index = 0; | |
3558 | tiny_region_t *region; | |
3559 | ||
9385eb3d A |
3560 | SZONE_LOCK(szone); |
3561 | scalable_zone_info((void *)szone, info, 13); | |
59e0d9fe A |
3562 | malloc_printf("Scalable zone %p: inUse=%d(%y) touched=%y allocated=%y flags=%d\n", |
3563 | szone, info[0], info[1], info[2], info[3], info[12]); | |
3564 | malloc_printf("\ttiny=%d(%y) small=%d(%y) large=%d(%y) huge=%d(%y)\n", | |
3565 | info[4], info[5], info[6], info[7], info[8], info[9], info[10], info[11]); | |
9385eb3d A |
3566 | // tiny |
3567 | malloc_printf("%d tiny regions: \n", szone->num_tiny_regions); | |
3568 | while (index < szone->num_tiny_regions) { | |
59e0d9fe | 3569 | region = szone->tiny_regions + index; |
9385eb3d A |
3570 | print_tiny_region(verbose, *region, (index == szone->num_tiny_regions - 1) ? szone->tiny_bytes_free_at_end : 0); |
3571 | index++; | |
3572 | } | |
3573 | if (verbose) print_tiny_free_list(szone); | |
3574 | // small | |
3575 | malloc_printf("%d small regions: \n", szone->num_small_regions); | |
3576 | index = 0; | |
3577 | while (index < szone->num_small_regions) { | |
59e0d9fe | 3578 | region = szone->small_regions + index; |
9385eb3d A |
3579 | print_small_region(szone, verbose, region, (index == szone->num_small_regions - 1) ? szone->small_bytes_free_at_end : 0); |
3580 | index++; | |
3581 | } | |
59e0d9fe A |
3582 | if (verbose) |
3583 | print_small_free_list(szone); | |
9385eb3d A |
3584 | SZONE_UNLOCK(szone); |
3585 | } | |
3586 | ||
3587 | static void | |
59e0d9fe A |
3588 | szone_log(malloc_zone_t *zone, void *log_address) |
3589 | { | |
3590 | szone_t *szone = (szone_t *)zone; | |
3591 | ||
e9ce8d39 A |
3592 | szone->log_address = log_address; |
3593 | } | |
3594 | ||
9385eb3d | 3595 | static void |
59e0d9fe A |
3596 | szone_force_lock(szone_t *szone) |
3597 | { | |
e9ce8d39 A |
3598 | SZONE_LOCK(szone); |
3599 | } | |
3600 | ||
9385eb3d | 3601 | static void |
59e0d9fe A |
3602 | szone_force_unlock(szone_t *szone) |
3603 | { | |
e9ce8d39 A |
3604 | SZONE_UNLOCK(szone); |
3605 | } | |
3606 | ||
9385eb3d | 3607 | boolean_t |
59e0d9fe A |
3608 | scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone) |
3609 | { | |
3610 | szone_t *szone = (szone_t *)zone; | |
3611 | ||
9385eb3d A |
3612 | switch (subzone) { |
3613 | case 0: | |
3614 | stats->blocks_in_use = szone->num_tiny_objects; | |
3615 | stats->size_in_use = szone->num_bytes_in_tiny_objects; | |
3616 | stats->size_allocated = szone->num_tiny_regions * TINY_REGION_SIZE; | |
3617 | stats->max_size_in_use = stats->size_allocated - szone->tiny_bytes_free_at_end; | |
3618 | return 1; | |
3619 | case 1: | |
3620 | stats->blocks_in_use = szone->num_small_objects; | |
3621 | stats->size_in_use = szone->num_bytes_in_small_objects; | |
3622 | stats->size_allocated = szone->num_small_regions * SMALL_REGION_SIZE; | |
3623 | stats->max_size_in_use = stats->size_allocated - szone->small_bytes_free_at_end; | |
3624 | return 1; | |
3625 | case 2: | |
3626 | stats->blocks_in_use = szone->num_large_objects_in_use; | |
3627 | stats->size_in_use = szone->num_bytes_in_large_objects; | |
3628 | stats->max_size_in_use = stats->size_allocated = stats->size_in_use; | |
3629 | return 1; | |
3630 | case 3: | |
3631 | stats->blocks_in_use = szone->num_huge_entries; | |
3632 | stats->size_in_use = szone->num_bytes_in_huge_objects; | |
3633 | stats->max_size_in_use = stats->size_allocated = stats->size_in_use; | |
3634 | return 1; | |
3635 | } | |
3636 | return 0; | |
3637 | } | |
3638 | ||
3639 | static void | |
59e0d9fe A |
3640 | szone_statistics(szone_t *szone, malloc_statistics_t *stats) |
3641 | { | |
3642 | size_t big_and_huge; | |
3643 | ||
3644 | stats->blocks_in_use = | |
3645 | szone->num_tiny_objects + | |
3646 | szone->num_small_objects + | |
3647 | szone->num_large_objects_in_use + | |
3648 | szone->num_huge_entries; | |
3649 | big_and_huge = szone->num_bytes_in_large_objects + szone->num_bytes_in_huge_objects; | |
9385eb3d | 3650 | stats->size_in_use = szone->num_bytes_in_tiny_objects + szone->num_bytes_in_small_objects + big_and_huge; |
59e0d9fe A |
3651 | stats->max_size_in_use = stats->size_allocated = |
3652 | szone->num_tiny_regions * TINY_REGION_SIZE + | |
3653 | szone->num_small_regions * SMALL_REGION_SIZE + | |
3654 | big_and_huge ; | |
3655 | ||
9385eb3d A |
3656 | // Now we account for the untouched areas |
3657 | stats->max_size_in_use -= szone->tiny_bytes_free_at_end; | |
3658 | stats->max_size_in_use -= szone->small_bytes_free_at_end; | |
3659 | } | |
e9ce8d39 | 3660 | |
9385eb3d A |
3661 | static const struct malloc_introspection_t szone_introspect = { |
3662 | (void *)szone_ptr_in_use_enumerator, | |
3663 | (void *)szone_good_size, | |
3664 | (void *)szone_check, | |
3665 | (void *)szone_print, | |
3666 | szone_log, | |
3667 | (void *)szone_force_lock, | |
3668 | (void *)szone_force_unlock, | |
3669 | (void *)szone_statistics | |
3670 | }; // marked as const to spare the DATA section | |
3671 | ||
3672 | malloc_zone_t * | |
59e0d9fe A |
3673 | create_scalable_zone(size_t initial_size, unsigned debug_flags) |
3674 | { | |
e9ce8d39 | 3675 | szone_t *szone; |
e9ce8d39 | 3676 | size_t msize; |
59e0d9fe A |
3677 | size_t msize_used; |
3678 | msize_t free_msize; | |
3679 | ||
3680 | /* | |
3681 | * Sanity-check our build-time assumptions about the size of a page. | |
3682 | * Since we have sized various things assuming the default page size, | |
3683 | * attempting to determine it dynamically is not useful. | |
3684 | */ | |
3685 | if ((vm_page_size != _vm_page_size) || (vm_page_shift != _vm_page_shift)) { | |
3686 | malloc_printf("*** FATAL ERROR - machine page size does not match our assumptions.\n"); | |
9385eb3d A |
3687 | exit(-1); |
3688 | } | |
59e0d9fe A |
3689 | |
3690 | /* get memory for the zone */ | |
3691 | szone = allocate_pages(NULL, SMALL_REGION_SIZE, SMALL_BLOCKS_ALIGN, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC)); | |
3692 | if (!szone) | |
3693 | return NULL; | |
3694 | /* set up the szone structure */ | |
9385eb3d A |
3695 | szone->tiny_regions = szone->initial_tiny_regions; |
3696 | szone->small_regions = szone->initial_small_regions; | |
59e0d9fe | 3697 | msize_used = msize; szone->num_small_objects++; |
9385eb3d | 3698 | szone->basic_zone.version = 3; |
e9ce8d39 A |
3699 | szone->basic_zone.size = (void *)szone_size; |
3700 | szone->basic_zone.malloc = (void *)szone_malloc; | |
3701 | szone->basic_zone.calloc = (void *)szone_calloc; | |
3702 | szone->basic_zone.valloc = (void *)szone_valloc; | |
3703 | szone->basic_zone.free = (void *)szone_free; | |
3704 | szone->basic_zone.realloc = (void *)szone_realloc; | |
3705 | szone->basic_zone.destroy = (void *)szone_destroy; | |
9385eb3d A |
3706 | szone->basic_zone.batch_malloc = (void *)szone_batch_malloc; |
3707 | szone->basic_zone.batch_free = (void *)szone_batch_free; | |
3708 | szone->basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect; | |
59e0d9fe A |
3709 | szone->debug_flags = debug_flags; |
3710 | ||
3711 | /* as the szone is allocated out of the first tiny, region, reflect that allocation */ | |
3712 | szone->small_regions[0] = szone; | |
3713 | szone->num_small_regions = 1; | |
3714 | msize = SMALL_MSIZE_FOR_BYTES(sizeof(szone_t) + SMALL_QUANTUM - 1); | |
3715 | free_msize = NUM_SMALL_BLOCKS - msize; | |
3716 | *SMALL_METADATA_FOR_PTR(szone) = msize; | |
3717 | *(SMALL_METADATA_FOR_PTR(szone) + msize) = free_msize; | |
3718 | szone->small_bytes_free_at_end = SMALL_BYTES_FOR_MSIZE(free_msize); | |
3719 | ||
e9ce8d39 | 3720 | LOCK_INIT(szone->lock); |
9385eb3d A |
3721 | #if 0 |
3722 | #warning CHECK_REGIONS enabled | |
3723 | debug_flags |= CHECK_REGIONS; | |
3724 | #endif | |
3725 | #if 0 | |
3726 | #warning LOG enabled | |
3727 | szone->log_address = ~0; | |
3728 | #endif | |
e9ce8d39 A |
3729 | CHECK(szone, __PRETTY_FUNCTION__); |
3730 | return (malloc_zone_t *)szone; | |
3731 | } | |
3732 | ||
5b2abdfb A |
3733 | /********* Support code for emacs unexec ************/ |
3734 | ||
3735 | /* History of freezedry version numbers: | |
3736 | * | |
3737 | * 1) Old malloc (before the scalable malloc implementation in this file | |
3738 | * existed). | |
3739 | * 2) Original freezedrying code for scalable malloc. This code was apparently | |
3740 | * based on the old freezedrying code and was fundamentally flawed in its | |
3741 | * assumption that tracking allocated memory regions was adequate to fake | |
3742 | * operations on freezedried memory. This doesn't work, since scalable | |
3743 | * malloc does not store flags in front of large page-aligned allocations. | |
3744 | * 3) Original szone-based freezedrying code. | |
9385eb3d | 3745 | * 4) Fresher malloc with tiny zone |
59e0d9fe | 3746 | * 5) 32/64bit compatible malloc |
5b2abdfb A |
3747 | * |
3748 | * No version backward compatibility is provided, but the version number does | |
3749 | * make it possible for malloc_jumpstart() to return an error if the application | |
3750 | * was freezedried with an older version of malloc. | |
3751 | */ | |
59e0d9fe | 3752 | #define MALLOC_FREEZEDRY_VERSION 5 |
5b2abdfb A |
3753 | |
3754 | typedef struct { | |
3755 | unsigned version; | |
3756 | unsigned nszones; | |
3757 | szone_t *szones; | |
3758 | } malloc_frozen; | |
3759 | ||
9385eb3d | 3760 | static void * |
59e0d9fe A |
3761 | frozen_malloc(szone_t *zone, size_t new_size) |
3762 | { | |
5b2abdfb A |
3763 | return malloc(new_size); |
3764 | } | |
3765 | ||
9385eb3d | 3766 | static void * |
59e0d9fe A |
3767 | frozen_calloc(szone_t *zone, size_t num_items, size_t size) |
3768 | { | |
5b2abdfb A |
3769 | return calloc(num_items, size); |
3770 | } | |
3771 | ||
9385eb3d | 3772 | static void * |
59e0d9fe A |
3773 | frozen_valloc(szone_t *zone, size_t new_size) |
3774 | { | |
5b2abdfb A |
3775 | return valloc(new_size); |
3776 | } | |
3777 | ||
9385eb3d | 3778 | static void * |
59e0d9fe A |
3779 | frozen_realloc(szone_t *zone, void *ptr, size_t new_size) |
3780 | { | |
5b2abdfb A |
3781 | size_t old_size = szone_size(zone, ptr); |
3782 | void *new_ptr; | |
59e0d9fe | 3783 | |
5b2abdfb A |
3784 | if (new_size <= old_size) { |
3785 | return ptr; | |
3786 | } | |
5b2abdfb | 3787 | new_ptr = malloc(new_size); |
5b2abdfb A |
3788 | if (old_size > 0) { |
3789 | memcpy(new_ptr, ptr, old_size); | |
3790 | } | |
5b2abdfb A |
3791 | return new_ptr; |
3792 | } | |
3793 | ||
9385eb3d | 3794 | static void |
59e0d9fe A |
3795 | frozen_free(szone_t *zone, void *ptr) |
3796 | { | |
5b2abdfb A |
3797 | } |
3798 | ||
9385eb3d | 3799 | static void |
59e0d9fe A |
3800 | frozen_destroy(szone_t *zone) |
3801 | { | |
5b2abdfb A |
3802 | } |
3803 | ||
3804 | /********* Pseudo-private API for emacs unexec ************/ | |
3805 | ||
3806 | /* | |
3807 | * malloc_freezedry() records all of the szones in use, so that they can be | |
3808 | * partially reconstituted by malloc_jumpstart(). Due to the differences | |
3809 | * between reconstituted memory regions and those created by the szone code, | |
3810 | * care is taken not to reallocate from the freezedried memory, except in the | |
3811 | * case of a non-growing realloc(). | |
3812 | * | |
3813 | * Due to the flexibility provided by the zone registration mechanism, it is | |
3814 | * impossible to implement generic freezedrying for any zone type. This code | |
3815 | * only handles applications that use the szone allocator, so malloc_freezedry() | |
3816 | * returns 0 (error) if any non-szone zones are encountered. | |
3817 | */ | |
3818 | ||
59e0d9fe A |
3819 | uintptr_t |
3820 | malloc_freezedry(void) | |
3821 | { | |
5b2abdfb A |
3822 | extern unsigned malloc_num_zones; |
3823 | extern malloc_zone_t **malloc_zones; | |
3824 | malloc_frozen *data; | |
3825 | unsigned i; | |
3826 | ||
3827 | /* Allocate space in which to store the freezedry state. */ | |
3828 | data = (malloc_frozen *) malloc(sizeof(malloc_frozen)); | |
3829 | ||
9385eb3d | 3830 | /* Set freezedry version number so that malloc_jumpstart() can check for compatibility. */ |
5b2abdfb A |
3831 | data->version = MALLOC_FREEZEDRY_VERSION; |
3832 | ||
3833 | /* Allocate the array of szone pointers. */ | |
3834 | data->nszones = malloc_num_zones; | |
3835 | data->szones = (szone_t *) calloc(malloc_num_zones, sizeof(szone_t)); | |
3836 | ||
59e0d9fe A |
3837 | /* |
3838 | * Fill in the array of szone structures. They are copied rather than | |
5b2abdfb | 3839 | * referenced, since the originals are likely to be clobbered during malloc |
59e0d9fe A |
3840 | * initialization. |
3841 | */ | |
5b2abdfb A |
3842 | for (i = 0; i < malloc_num_zones; i++) { |
3843 | if (strcmp(malloc_zones[i]->zone_name, "DefaultMallocZone")) { | |
3844 | /* Unknown zone type. */ | |
3845 | free(data->szones); | |
3846 | free(data); | |
3847 | return 0; | |
3848 | } | |
3849 | memcpy(&data->szones[i], malloc_zones[i], sizeof(szone_t)); | |
3850 | } | |
3851 | ||
59e0d9fe | 3852 | return((uintptr_t)data); |
5b2abdfb A |
3853 | } |
3854 | ||
9385eb3d | 3855 | int |
59e0d9fe A |
3856 | malloc_jumpstart(uintptr_t cookie) |
3857 | { | |
3858 | malloc_frozen *data = (malloc_frozen *)cookie; | |
5b2abdfb A |
3859 | unsigned i; |
3860 | ||
3861 | if (data->version != MALLOC_FREEZEDRY_VERSION) { | |
3862 | /* Unsupported freezedry version. */ | |
3863 | return 1; | |
3864 | } | |
3865 | ||
3866 | for (i = 0; i < data->nszones; i++) { | |
3867 | /* Set function pointers. Even the functions that stay the same must be | |
3868 | * set, since there are no guarantees that they will be mapped to the | |
3869 | * same addresses. */ | |
3870 | data->szones[i].basic_zone.size = (void *) szone_size; | |
3871 | data->szones[i].basic_zone.malloc = (void *) frozen_malloc; | |
3872 | data->szones[i].basic_zone.calloc = (void *) frozen_calloc; | |
3873 | data->szones[i].basic_zone.valloc = (void *) frozen_valloc; | |
3874 | data->szones[i].basic_zone.free = (void *) frozen_free; | |
3875 | data->szones[i].basic_zone.realloc = (void *) frozen_realloc; | |
3876 | data->szones[i].basic_zone.destroy = (void *) frozen_destroy; | |
9385eb3d | 3877 | data->szones[i].basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect; |
5b2abdfb A |
3878 | |
3879 | /* Register the freezedried zone. */ | |
3880 | malloc_zone_register(&data->szones[i].basic_zone); | |
3881 | } | |
3882 | ||
3883 | return 0; | |
3884 | } | |
59e0d9fe | 3885 |