]>
Commit | Line | Data |
---|---|---|
e9ce8d39 A |
1 | /* |
2 | * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
59e0d9fe A |
6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
7 | * | |
734aad71 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
e9ce8d39 A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
734aad71 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
e9ce8d39 A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | ||
26 | /* Author: Bertrand Serlet, August 1999 */ | |
27 | ||
59e0d9fe | 28 | #include "scalable_malloc.h" |
e9ce8d39 | 29 | |
59e0d9fe | 30 | #include <pthread_internals.h> |
9385eb3d | 31 | |
59e0d9fe A |
32 | #include <unistd.h> |
33 | #include <libc.h> | |
e9ce8d39 | 34 | #include <mach/vm_statistics.h> |
59e0d9fe | 35 | #include <mach/mach_init.h> |
e9ce8d39 A |
36 | |
37 | /********************* DEFINITIONS ************************/ | |
38 | ||
e9ce8d39 | 39 | #define DEBUG_MALLOC 0 // set to one to debug malloc itself |
9385eb3d | 40 | |
e9ce8d39 A |
41 | #define DEBUG_CLIENT 0 // set to one to help debug a nasty memory smasher |
42 | ||
43 | #if DEBUG_MALLOC | |
9385eb3d | 44 | #warning DEBUG_MALLOC ENABLED |
59e0d9fe A |
45 | # define INLINE |
46 | # define CHECK_LOCKED(szone, fun) \ | |
47 | do { \ | |
48 | if (__is_threaded && TRY_LOCK(szone->lock)) { \ | |
9385eb3d | 49 | malloc_printf("*** lock was not set %p in %s\n", szone->lock, fun); \ |
59e0d9fe A |
50 | } \ |
51 | } while (0) | |
9385eb3d | 52 | #else |
59e0d9fe A |
53 | # define INLINE __inline__ |
54 | # define CHECK_LOCKED(szone, fun) {} | |
9385eb3d A |
55 | #endif |
56 | ||
59e0d9fe A |
57 | /* |
58 | * Access to global variables is slow, so optimise our handling of vm_page_size | |
59 | * and vm_page_shift. | |
60 | */ | |
61 | #define _vm_page_size vm_page_size /* to get to the originals */ | |
62 | #define _vm_page_shift vm_page_shift | |
63 | #define vm_page_size 4096 /* our normal working sizes */ | |
9385eb3d | 64 | #define vm_page_shift 12 |
e9ce8d39 | 65 | |
9385eb3d A |
66 | typedef unsigned short msize_t; // a size in multiples of SHIFT_SMALL_QUANTUM or SHIFT_TINY_QUANTUM |
67 | ||
59e0d9fe A |
68 | /* |
69 | * Note that in the LP64 case, this is 24 bytes, necessitating the 32-byte tiny grain size. | |
70 | */ | |
9385eb3d | 71 | typedef struct { |
59e0d9fe | 72 | uintptr_t checksum; |
9385eb3d A |
73 | void *previous; |
74 | void *next; | |
75 | } free_list_t; | |
76 | ||
77 | typedef struct { | |
59e0d9fe | 78 | uintptr_t address_and_num_pages; |
9385eb3d A |
79 | // this type represents both an address and a number of pages |
80 | // the low bits are the number of pages; the high bits are the address | |
81 | // note that the exact number of bits used for depends on the page size | |
82 | // also, this cannot represent pointers larger than 1 << (vm_page_shift * 2) | |
83 | } compact_range_t; | |
84 | ||
85 | typedef unsigned char grain_t; | |
86 | ||
e9ce8d39 A |
87 | #define CHECK_REGIONS (1 << 31) |
88 | ||
59e0d9fe A |
89 | #ifdef __LP64__ |
90 | # define CHECKSUM_MAGIC 0xdeadbeef0badc0de | |
91 | #else | |
92 | # define CHECKSUM_MAGIC 0x357B | |
93 | #endif | |
5b2abdfb | 94 | |
9385eb3d | 95 | #define MAX_RECORDER_BUFFER 256 |
e9ce8d39 | 96 | |
9385eb3d | 97 | /********************* DEFINITIONS for tiny ************************/ |
e9ce8d39 | 98 | |
59e0d9fe A |
99 | /* |
100 | * Memory in the Tiny range is allocated from regions (heaps) pointed to by the szone's tiny_regions | |
101 | * pointer. | |
102 | * | |
103 | * Each region is laid out as a heap (1MB in 32-bit mode, 2MB in 64-bit mode), followed by a header | |
104 | * block. The header block is arranged: | |
105 | * | |
106 | * 0x0 | |
107 | * header bits | |
108 | * 0x2000 | |
109 | * 0xffffffff pad word | |
110 | * 0x2004 | |
111 | * in-use bits | |
112 | * 0x4004 | |
113 | * pad word (not written) | |
114 | * | |
115 | * Each bitfield comprises NUM_TINY_BLOCKS bits, and refers to the corresponding TINY_QUANTUM block | |
116 | * within the heap. | |
117 | * | |
118 | * The bitfields are used to encode the state of memory within the heap. The header bit indicates | |
119 | * that the corresponding quantum is the first quantum in a block (either in use or free). The | |
120 | * in-use bit is set for the header if the block has been handed out (allocated). If the header | |
121 | * bit is not set, the in-use bit is invalid. | |
122 | * | |
123 | * The szone maintains an array of 32 freelists, each of which is used to hold free objects | |
124 | * of the corresponding quantum size. | |
125 | * | |
126 | * A free block is laid out as: | |
127 | * | |
128 | * Offset (32-bit mode) (64-bit mode) | |
129 | * 0x0 0x0 | |
130 | * checksum | |
131 | * 0x4 0x08 | |
132 | * previous | |
133 | * 0x8 0x10 | |
134 | * next | |
135 | * 0xc 0x18 | |
136 | * size (in quantum counts) | |
137 | * end - 2 end - 2 | |
138 | * size (in quantum counts) | |
139 | * end end | |
140 | * | |
141 | * All fields are pointer-sized, except for the size which is an unsigned short. | |
142 | * | |
143 | */ | |
144 | ||
145 | #ifdef __LP64__ | |
146 | # define SHIFT_TINY_QUANTUM 5 // Required to fit free_list_t | |
147 | #else | |
148 | # define SHIFT_TINY_QUANTUM 4 // Required for AltiVec | |
149 | #endif | |
9385eb3d | 150 | #define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM) |
e9ce8d39 | 151 | |
59e0d9fe | 152 | #define FOLLOWING_TINY_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM)) |
e9ce8d39 | 153 | |
59e0d9fe | 154 | #define NUM_TINY_SLOTS 32 // number of slots for free-lists |
e9ce8d39 | 155 | |
59e0d9fe A |
156 | #define SHIFT_NUM_TINY_BLOCKS 16 |
157 | #define NUM_TINY_BLOCKS (1 << SHIFT_NUM_TINY_BLOCKS) | |
158 | #define TINY_BLOCKS_ALIGN (SHIFT_NUM_TINY_BLOCKS + SHIFT_TINY_QUANTUM) | |
e9ce8d39 | 159 | |
59e0d9fe A |
160 | /* |
161 | * Enough room for the data, followed by the bit arrays (2-bits per block) plus 2 words of padding | |
162 | * as our bitmap operators overflow, plus rounding to the nearest page. | |
163 | */ | |
164 | #define TINY_REGION_SIZE ((NUM_TINY_BLOCKS * TINY_QUANTUM + (NUM_TINY_BLOCKS >> 2) + 8 + vm_page_size - 1) & ~ (vm_page_size - 1)) | |
165 | ||
166 | /* | |
167 | * Obtain the size of a free tiny block (in msize_t units). | |
168 | */ | |
169 | #ifdef __LP64__ | |
170 | # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[14]) | |
171 | #else | |
172 | # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[6]) | |
173 | #endif | |
174 | /* | |
175 | * The size is also kept at the very end of a free block. | |
176 | */ | |
9385eb3d | 177 | #define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1] |
e9ce8d39 | 178 | |
59e0d9fe A |
179 | /* |
180 | * Beginning and end pointers for a region's heap. | |
181 | */ | |
182 | #define TINY_REGION_ADDRESS(region) ((void *)(region)) | |
183 | #define TINY_REGION_END(region) (TINY_REGION_ADDRESS(region) + (1 << TINY_BLOCKS_ALIGN)) | |
e9ce8d39 | 184 | |
59e0d9fe A |
185 | /* |
186 | * Locate the heap base for a pointer known to be within a tiny region. | |
187 | */ | |
188 | #define TINY_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << TINY_BLOCKS_ALIGN) - 1))) | |
9385eb3d | 189 | |
59e0d9fe A |
190 | /* |
191 | * Convert between byte and msize units. | |
192 | */ | |
193 | #define TINY_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_TINY_QUANTUM) | |
194 | #define TINY_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_TINY_QUANTUM) | |
195 | ||
196 | /* | |
197 | * Locate the block header for a pointer known to be within a tiny region. | |
198 | */ | |
199 | #define TINY_BLOCK_HEADER_FOR_PTR(_p) ((void *)(((((uintptr_t)(_p)) >> TINY_BLOCKS_ALIGN) + 1) << TINY_BLOCKS_ALIGN)) | |
200 | ||
201 | /* | |
202 | * Locate the inuse map for a given block header pointer. | |
203 | */ | |
204 | #define TINY_INUSE_FOR_HEADER(_h) ((void *)((uintptr_t)(_h) + (NUM_TINY_BLOCKS >> 3) + 4)) | |
205 | ||
206 | /* | |
207 | * Compute the bitmap index for a pointer known to be within a tiny region. | |
208 | */ | |
209 | #define TINY_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_TINY_QUANTUM) & (NUM_TINY_BLOCKS - 1)) | |
210 | ||
211 | typedef void *tiny_region_t; | |
9385eb3d | 212 | |
59e0d9fe | 213 | #define INITIAL_NUM_TINY_REGIONS 24 // must be even for szone to be aligned |
9385eb3d A |
214 | |
215 | #define TINY_CACHE 1 // This governs a last-free cache of 1 that bypasses the free-list | |
216 | ||
217 | #if ! TINY_CACHE | |
218 | #warning TINY_CACHE turned off | |
219 | #endif | |
220 | ||
221 | /********************* DEFINITIONS for small ************************/ | |
222 | ||
59e0d9fe A |
223 | /* |
224 | * Memory in the Small range is allocated from regions (heaps) pointed to by the szone's small_regions | |
225 | * pointer. | |
226 | * | |
227 | * Each region is laid out as a heap (8MB in 32-bit and 64-bit mode), followed by the metadata array. | |
228 | * The array is arranged as an array of shorts, one for each SMALL_QUANTUM in the heap. | |
229 | * | |
230 | * The MSB of each short is set for the first quantum in a free block. The low 15 bits encode the | |
231 | * block size (in SMALL_QUANTUM units), or are zero if the quantum is not the first in a block. | |
232 | * | |
233 | * The szone maintains an array of 32 freelists, each of which is used to hold free objects | |
234 | * of the corresponding quantum size. | |
235 | * | |
236 | * A free block is laid out as: | |
237 | * | |
238 | * Offset (32-bit mode) (64-bit mode) | |
239 | * 0x0 0x0 | |
240 | * checksum | |
241 | * 0x4 0x08 | |
242 | * previous | |
243 | * 0x8 0x10 | |
244 | * next | |
245 | * 0xc 0x18 | |
246 | * size (in quantum counts) | |
247 | * end - 2 end - 2 | |
248 | * size (in quantum counts) | |
249 | * end end | |
250 | * | |
251 | * All fields are pointer-sized, except for the size which is an unsigned short. | |
252 | * | |
253 | */ | |
9385eb3d | 254 | |
59e0d9fe | 255 | #define SMALL_IS_FREE (1 << 15) |
9385eb3d A |
256 | |
257 | #define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9 | |
258 | #define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes | |
259 | ||
59e0d9fe | 260 | #define FOLLOWING_SMALL_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM)) |
9385eb3d | 261 | |
59e0d9fe | 262 | #define NUM_SMALL_SLOTS 32 // number of slots for free-lists |
9385eb3d | 263 | |
59e0d9fe A |
264 | /* |
265 | * We can only represent up to 1<<15 for msize; but we choose to stay even below that to avoid the | |
266 | * convention msize=0 => msize = (1<<15) | |
267 | */ | |
268 | #define SHIFT_NUM_SMALL_BLOCKS 14 | |
269 | #define NUM_SMALL_BLOCKS (1 << SHIFT_NUM_SMALL_BLOCKS) | |
270 | #define SMALL_BLOCKS_ALIGN (SHIFT_NUM_SMALL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23 | |
9385eb3d A |
271 | #define SMALL_REGION_SIZE (NUM_SMALL_BLOCKS * SMALL_QUANTUM + NUM_SMALL_BLOCKS * 2) // data + meta data |
272 | ||
273 | #define SMALL_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1] | |
274 | ||
59e0d9fe A |
275 | /* |
276 | * Convert between byte and msize units. | |
277 | */ | |
278 | #define SMALL_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_SMALL_QUANTUM) | |
279 | #define SMALL_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_SMALL_QUANTUM) | |
280 | ||
281 | ||
282 | #define SMALL_REGION_ADDRESS(region) ((unsigned char *)region) | |
283 | #define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region) + (1 << SMALL_BLOCKS_ALIGN)) | |
284 | ||
285 | /* | |
286 | * Locate the heap base for a pointer known to be within a small region. | |
287 | */ | |
288 | #define SMALL_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << SMALL_BLOCKS_ALIGN) - 1))) | |
289 | ||
290 | /* | |
291 | * Locate the metadata base for a pointer known to be within a small region. | |
292 | */ | |
293 | #define SMALL_META_HEADER_FOR_PTR(_p) ((msize_t *)(((((uintptr_t)(_p)) >> SMALL_BLOCKS_ALIGN) + 1) << SMALL_BLOCKS_ALIGN)) | |
294 | ||
295 | /* | |
296 | * Compute the metadata index for a pointer known to be within a small region. | |
297 | */ | |
298 | #define SMALL_META_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_SMALL_QUANTUM) & (NUM_SMALL_BLOCKS - 1)) | |
299 | ||
300 | /* | |
301 | * Find the metadata word for a pointer known to be within a small region. | |
302 | */ | |
303 | #define SMALL_METADATA_FOR_PTR(_p) (SMALL_META_HEADER_FOR_PTR(_p) + SMALL_META_INDEX_FOR_PTR(_p)) | |
304 | ||
305 | /* | |
306 | * Determine whether a pointer known to be within a small region points to memory which is free. | |
307 | */ | |
308 | #define SMALL_PTR_IS_FREE(_p) (*SMALL_METADATA_FOR_PTR(_p) & SMALL_IS_FREE) | |
9385eb3d | 309 | |
59e0d9fe A |
310 | /* |
311 | * Extract the msize value for a pointer known to be within a small region. | |
312 | */ | |
313 | #define SMALL_PTR_SIZE(_p) (*SMALL_METADATA_FOR_PTR(_p) & ~SMALL_IS_FREE) | |
314 | ||
315 | typedef void * small_region_t; | |
9385eb3d | 316 | |
59e0d9fe | 317 | #define INITIAL_NUM_SMALL_REGIONS 6 // must be even for szone to be aligned |
e9ce8d39 | 318 | |
59e0d9fe | 319 | #define PROTECT_SMALL 0 // Should be 0: 1 is too slow for normal use |
e9ce8d39 | 320 | |
9385eb3d A |
321 | #define SMALL_CACHE 1 |
322 | #if !SMALL_CACHE | |
323 | #warning SMALL_CACHE turned off | |
324 | #endif | |
e9ce8d39 | 325 | |
9385eb3d | 326 | /********************* DEFINITIONS for large ************************/ |
e9ce8d39 | 327 | |
9385eb3d | 328 | #define LARGE_THRESHOLD (15 * 1024) // at or above this use "large" |
e9ce8d39 | 329 | |
9385eb3d A |
330 | #if (LARGE_THRESHOLD > NUM_SMALL_SLOTS * SMALL_QUANTUM) |
331 | #error LARGE_THRESHOLD should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM | |
332 | #endif | |
e9ce8d39 | 333 | |
9385eb3d A |
334 | #define VM_COPY_THRESHOLD (40 * 1024) |
335 | // When all memory is touched after a copy, vm_copy() is always a lose | |
336 | // But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages (on a G3/300MHz) | |
337 | // This must be larger than LARGE_THRESHOLD | |
e9ce8d39 | 338 | |
59e0d9fe A |
339 | /* |
340 | * Given a large_entry, return the address of the allocated block. | |
341 | */ | |
9385eb3d | 342 | #define LARGE_ENTRY_ADDRESS(entry) \ |
59e0d9fe A |
343 | (void *)(((entry).address_and_num_pages >> vm_page_shift) << vm_page_shift) |
344 | ||
345 | /* | |
346 | * Given a large entry, return the number of pages or bytes in the allocated block. | |
347 | */ | |
9385eb3d | 348 | #define LARGE_ENTRY_NUM_PAGES(entry) \ |
59e0d9fe | 349 | ((entry).address_and_num_pages & (vm_page_size - 1)) |
9385eb3d A |
350 | #define LARGE_ENTRY_SIZE(entry) \ |
351 | (LARGE_ENTRY_NUM_PAGES(entry) << vm_page_shift) | |
59e0d9fe A |
352 | |
353 | /* | |
354 | * Compare a pointer with a large entry. | |
355 | */ | |
9385eb3d | 356 | #define LARGE_ENTRY_MATCHES(entry,ptr) \ |
59e0d9fe A |
357 | ((((entry).address_and_num_pages - (uintptr_t)(ptr)) >> vm_page_shift) == 0) |
358 | ||
359 | #define LARGE_ENTRY_IS_EMPTY(entry) (((entry).address_and_num_pages) == 0) | |
e9ce8d39 A |
360 | |
361 | typedef compact_range_t large_entry_t; | |
362 | ||
9385eb3d A |
363 | /********************* DEFINITIONS for huge ************************/ |
364 | ||
e9ce8d39 A |
365 | typedef vm_range_t huge_entry_t; |
366 | ||
9385eb3d | 367 | /********************* zone itself ************************/ |
e9ce8d39 A |
368 | |
369 | typedef struct { | |
370 | malloc_zone_t basic_zone; | |
371 | pthread_lock_t lock; | |
372 | unsigned debug_flags; | |
373 | void *log_address; | |
9385eb3d A |
374 | |
375 | /* Regions for tiny objects */ | |
376 | unsigned num_tiny_regions; | |
377 | tiny_region_t *tiny_regions; | |
378 | void *last_tiny_free; // low SHIFT_TINY_QUANTUM indicate the msize | |
379 | unsigned tiny_bitmap; // cache of the 32 free lists | |
380 | free_list_t *tiny_free_list[NUM_TINY_SLOTS]; // 31 free lists for 1*TINY_QUANTUM to 31*TINY_QUANTUM plus 1 for larger than 32*SMALL_QUANTUM | |
381 | size_t tiny_bytes_free_at_end; // the last free region in the last block is treated as a big block in use that is not accounted for | |
382 | unsigned num_tiny_objects; | |
59e0d9fe | 383 | size_t num_bytes_in_tiny_objects; |
9385eb3d | 384 | |
e9ce8d39 | 385 | /* Regions for small objects */ |
9385eb3d A |
386 | unsigned num_small_regions; |
387 | small_region_t *small_regions; | |
388 | void *last_small_free; // low SHIFT_SMALL_QUANTUM indicate the msize | |
389 | unsigned small_bitmap; // cache of the free list | |
390 | free_list_t *small_free_list[NUM_SMALL_SLOTS]; | |
391 | size_t small_bytes_free_at_end; // the last free region in the last block is treated as a big block in use that is not accounted for | |
e9ce8d39 | 392 | unsigned num_small_objects; |
59e0d9fe | 393 | size_t num_bytes_in_small_objects; |
9385eb3d | 394 | |
e9ce8d39 | 395 | /* large objects: vm_page_shift <= log2(size) < 2 *vm_page_shift */ |
5b2abdfb | 396 | unsigned num_large_objects_in_use; |
e9ce8d39 | 397 | unsigned num_large_entries; |
9385eb3d | 398 | large_entry_t *large_entries; // hashed by location; null entries don't count |
59e0d9fe | 399 | size_t num_bytes_in_large_objects; |
e9ce8d39 A |
400 | |
401 | /* huge objects: log2(size) >= 2 *vm_page_shift */ | |
59e0d9fe | 402 | unsigned num_huge_entries; |
e9ce8d39 | 403 | huge_entry_t *huge_entries; |
59e0d9fe | 404 | size_t num_bytes_in_huge_objects; |
9385eb3d A |
405 | |
406 | /* Initial region list */ | |
407 | tiny_region_t initial_tiny_regions[INITIAL_NUM_TINY_REGIONS]; | |
408 | small_region_t initial_small_regions[INITIAL_NUM_SMALL_REGIONS]; | |
e9ce8d39 A |
409 | } szone_t; |
410 | ||
59e0d9fe A |
411 | #if DEBUG_MALLOC || DEBUG_CLIENT |
412 | static void szone_sleep(void); | |
413 | #endif | |
414 | static void szone_error(szone_t *szone, const char *msg, const void *ptr); | |
415 | static void protect(szone_t *szone, void *address, size_t size, unsigned protection, unsigned debug_flags); | |
416 | static void *allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label); | |
417 | static void deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags); | |
418 | static kern_return_t _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr); | |
419 | ||
420 | static INLINE void free_list_checksum(szone_t *szone, free_list_t *ptr, const char *msg); | |
421 | static INLINE void free_list_set_checksum(szone_t *szone, free_list_t *ptr); | |
422 | static unsigned free_list_count(const free_list_t *ptr); | |
423 | ||
424 | static INLINE msize_t get_tiny_meta_header(const void *ptr, boolean_t *is_free); | |
425 | static INLINE void set_tiny_meta_header_in_use(const void *ptr, msize_t msize); | |
426 | static INLINE void set_tiny_meta_header_middle(const void *ptr); | |
427 | static INLINE void set_tiny_meta_header_free(const void *ptr, msize_t msize); | |
428 | static INLINE boolean_t tiny_meta_header_is_free(const void *ptr); | |
429 | static INLINE void *tiny_previous_preceding_free(void *ptr, msize_t *prev_msize); | |
430 | static INLINE void tiny_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize); | |
431 | static INLINE void tiny_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize); | |
432 | static INLINE tiny_region_t *tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr); | |
433 | static INLINE void tiny_free_no_lock(szone_t *szone, tiny_region_t *region, void *ptr, msize_t msize); | |
434 | static void *tiny_malloc_from_region_no_lock(szone_t *szone, msize_t msize); | |
435 | static INLINE boolean_t try_realloc_tiny_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size); | |
436 | static boolean_t tiny_check_region(szone_t *szone, tiny_region_t *region); | |
437 | static kern_return_t tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t tiny_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder); | |
438 | static INLINE void *tiny_malloc_from_free_list(szone_t *szone, msize_t msize); | |
439 | static INLINE void *tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested); | |
440 | static INLINE void free_tiny(szone_t *szone, void *ptr, tiny_region_t *tiny_region); | |
441 | static void print_tiny_free_list(szone_t *szone); | |
442 | static void print_tiny_region(boolean_t verbose, tiny_region_t region, size_t bytes_at_end); | |
443 | static boolean_t tiny_free_list_check(szone_t *szone, grain_t slot); | |
444 | ||
445 | static INLINE void small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize); | |
446 | static INLINE void small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize); | |
447 | static INLINE void small_meta_header_set_middle(msize_t *meta_headers, msize_t index); | |
448 | static void small_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize); | |
449 | static void small_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize); | |
450 | static INLINE small_region_t *small_region_for_ptr_no_lock(szone_t *szone, const void *ptr); | |
451 | static INLINE void small_free_no_lock(szone_t *szone, small_region_t *region, void *ptr, msize_t msize); | |
452 | static void *small_malloc_from_region_no_lock(szone_t *szone, msize_t msize); | |
453 | static INLINE boolean_t try_realloc_small_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size); | |
454 | static boolean_t szone_check_small_region(szone_t *szone, small_region_t *region); | |
455 | static kern_return_t small_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t small_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder); | |
456 | static INLINE void *small_malloc_from_free_list(szone_t *szone, msize_t msize); | |
457 | static INLINE void *small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested); | |
458 | static INLINE void *small_malloc_cleared_no_lock(szone_t *szone, msize_t msize); | |
459 | static INLINE void free_small(szone_t *szone, void *ptr, small_region_t *small_region); | |
460 | static void print_small_free_list(szone_t *szone); | |
461 | static void print_small_region(szone_t *szone, boolean_t verbose, small_region_t *region, size_t bytes_at_end); | |
462 | static boolean_t small_free_list_check(szone_t *szone, grain_t grain); | |
e9ce8d39 | 463 | |
9385eb3d | 464 | #if DEBUG_MALLOC |
59e0d9fe A |
465 | static void large_debug_print(szone_t *szone); |
466 | #endif | |
467 | static large_entry_t *large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr); | |
468 | static void large_entry_insert_no_lock(szone_t *szone, large_entry_t range); | |
469 | static INLINE void large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry); | |
470 | static INLINE large_entry_t *large_entries_alloc_no_lock(szone_t *szone, unsigned num); | |
471 | static void large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate); | |
472 | static void large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate); | |
473 | static vm_range_t large_free_no_lock(szone_t *szone, large_entry_t *entry); | |
474 | static kern_return_t large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder); | |
475 | static huge_entry_t *huge_entry_for_pointer_no_lock(szone_t *szone, const void *ptr); | |
476 | static boolean_t huge_entry_append(szone_t *szone, huge_entry_t huge); | |
477 | static kern_return_t huge_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t huge_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder); | |
478 | static void *large_and_huge_malloc(szone_t *szone, unsigned num_pages); | |
479 | static INLINE void free_large_or_huge(szone_t *szone, void *ptr); | |
480 | static INLINE int try_realloc_large_or_huge_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size); | |
481 | ||
482 | static void szone_free(szone_t *szone, void *ptr); | |
483 | static INLINE void *szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested); | |
484 | static void *szone_malloc(szone_t *szone, size_t size); | |
485 | static void *szone_calloc(szone_t *szone, size_t num_items, size_t size); | |
486 | static void *szone_valloc(szone_t *szone, size_t size); | |
487 | static size_t szone_size(szone_t *szone, const void *ptr); | |
488 | static void *szone_realloc(szone_t *szone, void *ptr, size_t new_size); | |
489 | static unsigned szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count); | |
490 | static void szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count); | |
491 | static void szone_destroy(szone_t *szone); | |
492 | static size_t szone_good_size(szone_t *szone, size_t size); | |
493 | ||
494 | static boolean_t szone_check_all(szone_t *szone, const char *function); | |
495 | static boolean_t szone_check(szone_t *szone); | |
496 | static kern_return_t szone_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); | |
497 | static void szone_print(szone_t *szone, boolean_t verbose); | |
498 | static void szone_log(malloc_zone_t *zone, void *log_address); | |
499 | static void szone_force_lock(szone_t *szone); | |
500 | static void szone_force_unlock(szone_t *szone); | |
501 | ||
502 | static void szone_statistics(szone_t *szone, malloc_statistics_t *stats); | |
503 | ||
504 | static void *frozen_malloc(szone_t *zone, size_t new_size); | |
505 | static void *frozen_calloc(szone_t *zone, size_t num_items, size_t size); | |
506 | static void *frozen_valloc(szone_t *zone, size_t new_size); | |
507 | static void *frozen_realloc(szone_t *zone, void *ptr, size_t new_size); | |
508 | static void frozen_free(szone_t *zone, void *ptr); | |
509 | static void frozen_destroy(szone_t *zone); | |
510 | ||
511 | #if DEBUG_MALLOC | |
512 | # define LOG(szone,ptr) \ | |
513 | (szone->log_address && (((uintptr_t)szone->log_address == -1) || (szone->log_address == (void *)(ptr)))) | |
9385eb3d | 514 | #else |
59e0d9fe | 515 | # define LOG(szone,ptr) 0 |
9385eb3d | 516 | #endif |
e9ce8d39 | 517 | |
59e0d9fe A |
518 | #define SZONE_LOCK(szone) \ |
519 | do { \ | |
520 | LOCK(szone->lock); \ | |
521 | } while (0) | |
e9ce8d39 | 522 | |
59e0d9fe A |
523 | #define SZONE_UNLOCK(szone) \ |
524 | do { \ | |
525 | UNLOCK(szone->lock); \ | |
526 | } while (0) | |
e9ce8d39 | 527 | |
59e0d9fe A |
528 | #define LOCK_AND_NOTE_LOCKED(szone,locked) \ |
529 | do { \ | |
530 | CHECK(szone, __PRETTY_FUNCTION__); \ | |
531 | locked = 1; SZONE_LOCK(szone); \ | |
532 | } while (0) | |
e9ce8d39 | 533 | |
9385eb3d | 534 | #if DEBUG_MALLOC || DEBUG_CLIENT |
59e0d9fe | 535 | # define CHECK(szone,fun) \ |
9385eb3d A |
536 | if ((szone)->debug_flags & CHECK_REGIONS) szone_check_all(szone, fun) |
537 | #else | |
59e0d9fe | 538 | # define CHECK(szone,fun) do {} while (0) |
9385eb3d | 539 | #endif |
e9ce8d39 | 540 | |
9385eb3d | 541 | /********************* VERY LOW LEVEL UTILITIES ************************/ |
e9ce8d39 | 542 | |
9385eb3d A |
543 | #if DEBUG_MALLOC || DEBUG_CLIENT |
544 | static void | |
59e0d9fe A |
545 | szone_sleep(void) |
546 | { | |
547 | ||
9385eb3d | 548 | if (getenv("MallocErrorSleep")) { |
59e0d9fe | 549 | malloc_printf("*** sleeping to help debug\n"); |
9385eb3d A |
550 | sleep(3600); // to help debug |
551 | } | |
552 | } | |
553 | #endif | |
e9ce8d39 | 554 | |
9385eb3d | 555 | static void |
59e0d9fe A |
556 | szone_error(szone_t *szone, const char *msg, const void *ptr) |
557 | { | |
558 | ||
e9ce8d39 A |
559 | if (szone) SZONE_UNLOCK(szone); |
560 | if (ptr) { | |
59e0d9fe | 561 | malloc_printf("*** error for object %p: %s\n", ptr, msg); |
e9ce8d39 | 562 | } else { |
59e0d9fe | 563 | malloc_printf("*** error: %s\n", msg); |
e9ce8d39 | 564 | } |
9385eb3d A |
565 | #if DEBUG_MALLOC |
566 | szone_print(szone, 1); | |
567 | szone_sleep(); | |
568 | #endif | |
e9ce8d39 | 569 | #if DEBUG_CLIENT |
9385eb3d | 570 | szone_sleep(); |
e9ce8d39 A |
571 | #endif |
572 | } | |
573 | ||
9385eb3d | 574 | static void |
59e0d9fe A |
575 | protect(szone_t *szone, void *address, size_t size, unsigned protection, unsigned debug_flags) |
576 | { | |
e9ce8d39 | 577 | kern_return_t err; |
59e0d9fe | 578 | |
e9ce8d39 | 579 | if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_PRELUDE)) { |
59e0d9fe | 580 | err = vm_protect(mach_task_self(), (vm_address_t)(uintptr_t)address - vm_page_size, vm_page_size, 0, protection); |
9385eb3d | 581 | if (err) { |
59e0d9fe A |
582 | malloc_printf("*** can't protect(%p) region for prelude guard page at %p\n", |
583 | protection,address - (1 << vm_page_shift)); | |
9385eb3d | 584 | } |
e9ce8d39 A |
585 | } |
586 | if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE)) { | |
59e0d9fe | 587 | err = vm_protect(mach_task_self(), (vm_address_t)(uintptr_t)address + size, vm_page_size, 0, protection); |
9385eb3d | 588 | if (err) { |
59e0d9fe A |
589 | malloc_printf("*** can't protect(%p) region for postlude guard page at %p\n", |
590 | protection, address + size); | |
9385eb3d | 591 | } |
e9ce8d39 A |
592 | } |
593 | } | |
594 | ||
59e0d9fe A |
595 | static void * |
596 | allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label) | |
597 | { | |
9385eb3d | 598 | // align specifies a desired alignment (as a log) or 0 if no alignment requested |
e9ce8d39 | 599 | kern_return_t err; |
59e0d9fe A |
600 | vm_address_t vm_addr; |
601 | uintptr_t addr, aligned_address; | |
e9ce8d39 A |
602 | boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES; |
603 | size_t allocation_size = round_page(size); | |
59e0d9fe A |
604 | size_t delta; |
605 | ||
9385eb3d A |
606 | if (align) add_guard_pages = 0; // too cumbersome to deal with that |
607 | if (!allocation_size) allocation_size = 1 << vm_page_shift; | |
608 | if (add_guard_pages) allocation_size += 2 * (1 << vm_page_shift); | |
59e0d9fe A |
609 | if (align) allocation_size += (size_t)1 << align; |
610 | err = vm_allocate(mach_task_self(), &vm_addr, allocation_size, vm_page_label | 1); | |
e9ce8d39 | 611 | if (err) { |
59e0d9fe A |
612 | malloc_printf("*** vm_allocate(size=%lld) failed (error code=%d)\n", (long long)size, err); |
613 | szone_error(szone, "can't allocate region", NULL); | |
9385eb3d A |
614 | return NULL; |
615 | } | |
59e0d9fe | 616 | addr = (uintptr_t)vm_addr; |
9385eb3d | 617 | if (align) { |
59e0d9fe | 618 | aligned_address = (addr + ((uintptr_t)1 << align) - 1) & ~ (((uintptr_t)1 << align) - 1); |
9385eb3d | 619 | if (aligned_address != addr) { |
59e0d9fe A |
620 | delta = aligned_address - addr; |
621 | err = vm_deallocate(mach_task_self(), (vm_address_t)addr, delta); | |
622 | if (err) | |
623 | malloc_printf("*** freeing unaligned header failed with %d\n", err); | |
9385eb3d A |
624 | addr = aligned_address; |
625 | allocation_size -= delta; | |
626 | } | |
627 | if (allocation_size > size) { | |
59e0d9fe A |
628 | err = vm_deallocate(mach_task_self(), (vm_address_t)addr + size, allocation_size - size); |
629 | if (err) | |
630 | malloc_printf("*** freeing unaligned footer failed with %d\n", err); | |
9385eb3d | 631 | } |
e9ce8d39 A |
632 | } |
633 | if (add_guard_pages) { | |
59e0d9fe A |
634 | addr += (uintptr_t)1 << vm_page_shift; |
635 | protect(szone, (void *)addr, size, 0, debug_flags); | |
e9ce8d39 | 636 | } |
59e0d9fe | 637 | return (void *)addr; |
e9ce8d39 A |
638 | } |
639 | ||
9385eb3d | 640 | static void |
59e0d9fe A |
641 | deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags) |
642 | { | |
e9ce8d39 A |
643 | kern_return_t err; |
644 | boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES; | |
59e0d9fe | 645 | |
e9ce8d39 | 646 | if (add_guard_pages) { |
9385eb3d A |
647 | addr -= 1 << vm_page_shift; |
648 | size += 2 * (1 << vm_page_shift); | |
e9ce8d39 | 649 | } |
59e0d9fe A |
650 | err = vm_deallocate(mach_task_self(), (vm_address_t)addr, size); |
651 | if (err && szone) | |
652 | szone_error(szone, "Can't deallocate_pages region", addr); | |
e9ce8d39 A |
653 | } |
654 | ||
9385eb3d | 655 | static kern_return_t |
59e0d9fe A |
656 | _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) |
657 | { | |
e9ce8d39 A |
658 | *ptr = (void *)address; |
659 | return 0; | |
660 | } | |
661 | ||
9385eb3d | 662 | static INLINE void |
59e0d9fe A |
663 | free_list_checksum(szone_t *szone, free_list_t *ptr, const char *msg) |
664 | { | |
665 | // We always checksum, as testing whether to do it (based on szone->debug_flags) is as fast as | |
666 | // doing it | |
667 | // XXX not necessarily true for LP64 case | |
668 | if (ptr->checksum != (((uintptr_t)ptr->previous) ^ ((uintptr_t)ptr->next) ^ CHECKSUM_MAGIC)) { | |
9385eb3d | 669 | #if DEBUG_MALLOC |
59e0d9fe | 670 | malloc_printf("*** incorrect checksum: %s\n", msg); |
9385eb3d | 671 | #endif |
59e0d9fe A |
672 | szone_error(szone, "incorrect checksum for freed object " |
673 | "- object was probably modified after being freed, break at szone_error to debug", ptr); | |
9385eb3d A |
674 | } |
675 | } | |
676 | ||
677 | static INLINE void | |
59e0d9fe A |
678 | free_list_set_checksum(szone_t *szone, free_list_t *ptr) |
679 | { | |
9385eb3d A |
680 | // We always set checksum, as testing whether to do it (based on |
681 | // szone->debug_flags) is slower than just doing it | |
59e0d9fe A |
682 | // XXX not necessarily true for LP64 case |
683 | ptr->checksum = ((uintptr_t)ptr->previous) ^ ((uintptr_t)ptr->next) ^ CHECKSUM_MAGIC; | |
9385eb3d A |
684 | } |
685 | ||
686 | static unsigned | |
59e0d9fe A |
687 | free_list_count(const free_list_t *ptr) |
688 | { | |
9385eb3d | 689 | unsigned count = 0; |
59e0d9fe | 690 | |
9385eb3d A |
691 | while (ptr) { |
692 | count++; | |
9385eb3d A |
693 | ptr = ptr->next; |
694 | } | |
695 | return count; | |
696 | } | |
697 | ||
59e0d9fe A |
698 | /* XXX inconsistent use of BITMAP32 and BITARRAY operations could be cleaned up */ |
699 | ||
9385eb3d A |
700 | #define BITMAP32_SET(bitmap,bit) (bitmap |= 1 << (bit)) |
701 | #define BITMAP32_CLR(bitmap,bit) (bitmap &= ~ (1 << (bit))) | |
702 | #define BITMAP32_BIT(bitmap,bit) ((bitmap >> (bit)) & 1) | |
703 | ||
704 | #define BITMAP32_FFS(bitmap) (ffs(bitmap)) | |
705 | // returns bit # of first bit that's one, starting at 1 (returns 0 if !bitmap) | |
706 | ||
707 | /********************* TINY FREE LIST UTILITIES ************************/ | |
708 | ||
709 | // We encode the meta-headers as follows: | |
710 | // Each quantum has an associated set of 2 bits: | |
711 | // block_header when 1 says this block is the beginning of a block | |
712 | // in_use when 1 says this block is in use | |
713 | // so a block in use of size 3 is 1-1 0-X 0-X | |
714 | // for a free block TINY_FREE_SIZE(ptr) carries the size and the bits are 1-0 X-X X-X | |
715 | // for a block middle the bits are 0-0 | |
716 | ||
717 | // Attention double evaluation for these | |
718 | #define BITARRAY_SET(bits,index) (bits[index>>3] |= (1 << (index & 7))) | |
719 | #define BITARRAY_CLR(bits,index) (bits[index>>3] &= ~(1 << (index & 7))) | |
720 | #define BITARRAY_BIT(bits,index) (((bits[index>>3]) >> (index & 7)) & 1) | |
721 | ||
722 | // Following is for start<8 and end<=start+32 | |
59e0d9fe A |
723 | #define BITARRAY_MCLR_LESS_32(bits,start,end) \ |
724 | do { \ | |
725 | unsigned char *_bits = (bits); \ | |
726 | unsigned _end = (end); \ | |
727 | switch (_end >> 3) { \ | |
728 | case 4: _bits[4] &= ~ ((1 << (_end - 32)) - 1); _end = 32; \ | |
9385eb3d | 729 | case 3: _bits[3] &= ~ ((1 << (_end - 24)) - 1); _end = 24; \ |
59e0d9fe A |
730 | case 2: _bits[2] &= ~ ((1 << (_end - 16)) - 1); _end = 16; \ |
731 | case 1: _bits[1] &= ~ ((1 << (_end - 8)) - 1); _end = 8; \ | |
9385eb3d | 732 | case 0: _bits[0] &= ~ ((1 << _end) - (1 << (start))); \ |
59e0d9fe A |
733 | } \ |
734 | } while (0) | |
9385eb3d A |
735 | |
736 | #if 0 // Simple but slow version | |
737 | #warning Slow version in effect | |
59e0d9fe A |
738 | #define BITARRAY_MCLR(bits,index,num) \ |
739 | do { \ | |
740 | unsigned _ctr = (num); \ | |
741 | unsigned _cur = (index); \ | |
742 | \ | |
743 | while (_ctr--) {BITARRAY_CLR(bits,_cur); _cur++; } \ | |
744 | } while (0) | |
9385eb3d | 745 | #else |
e9ce8d39 | 746 | |
9385eb3d | 747 | // Following is for num <= 32 |
59e0d9fe A |
748 | #define BITARRAY_MCLR(bits,index,num) \ |
749 | do { \ | |
750 | unsigned _index = (index); \ | |
751 | unsigned char *_rebased = (bits) + (_index >> 3); \ | |
752 | \ | |
753 | _index &= 7; \ | |
754 | BITARRAY_MCLR_LESS_32(_rebased, _index, _index + (num)); \ | |
755 | } while (0) | |
9385eb3d A |
756 | #endif |
757 | ||
758 | static INLINE msize_t | |
59e0d9fe A |
759 | get_tiny_meta_header(const void *ptr, boolean_t *is_free) |
760 | { | |
9385eb3d A |
761 | // returns msize and is_free |
762 | // may return 0 for the msize component (meaning 65536) | |
59e0d9fe A |
763 | unsigned char *block_header; |
764 | unsigned char *in_use; | |
765 | msize_t index; | |
766 | unsigned byte_index; | |
767 | ||
768 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
769 | index = TINY_INDEX_FOR_PTR(ptr); | |
770 | byte_index = index >> 3; | |
771 | ||
9385eb3d A |
772 | block_header += byte_index; |
773 | index &= 7; | |
774 | *is_free = 0; | |
59e0d9fe A |
775 | if (!BITMAP32_BIT(*block_header, index)) |
776 | return 0; | |
777 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
9385eb3d A |
778 | if (!BITMAP32_BIT(*in_use, index)) { |
779 | *is_free = 1; | |
780 | return TINY_FREE_SIZE(ptr); | |
781 | } | |
59e0d9fe A |
782 | uint32_t *addr = (uint32_t *)((uintptr_t)block_header & ~3); |
783 | uint32_t word0 = OSReadLittleInt32(addr, 0) >> index; | |
784 | uint32_t word1 = OSReadLittleInt32(addr, 4) << (8 - index); | |
785 | uint32_t bits = (((uintptr_t)block_header & 3) * 8); // precision loss on LP64 OK here | |
786 | uint32_t word = (word0 >> bits) | (word1 << (24 - bits)); | |
787 | uint32_t result = ffs(word >> 1); | |
9385eb3d | 788 | return result; |
e9ce8d39 A |
789 | } |
790 | ||
9385eb3d | 791 | static INLINE void |
59e0d9fe A |
792 | set_tiny_meta_header_in_use(const void *ptr, msize_t msize) |
793 | { | |
794 | unsigned char *block_header; | |
795 | unsigned char *in_use; | |
796 | msize_t index; | |
797 | unsigned byte_index; | |
798 | msize_t clr_msize; | |
799 | unsigned end_bit; | |
800 | ||
801 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
802 | index = TINY_INDEX_FOR_PTR(ptr); | |
803 | byte_index = index >> 3; | |
804 | ||
9385eb3d | 805 | #if DEBUG_MALLOC |
59e0d9fe A |
806 | if (msize >= 32) |
807 | malloc_printf("set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr, msize); | |
808 | if ((unsigned)index + (unsigned)msize > 0x10000) | |
809 | malloc_printf("set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr, msize); | |
9385eb3d | 810 | #endif |
9385eb3d A |
811 | block_header += byte_index; |
812 | index &= 7; | |
813 | BITMAP32_SET(*block_header, index); | |
59e0d9fe | 814 | in_use = TINY_INUSE_FOR_HEADER(block_header); |
9385eb3d A |
815 | BITMAP32_SET(*in_use, index); |
816 | index++; | |
59e0d9fe | 817 | clr_msize = msize-1; |
9385eb3d A |
818 | if (clr_msize) { |
819 | byte_index = index >> 3; | |
820 | block_header += byte_index; in_use += byte_index; | |
821 | index &= 7; | |
59e0d9fe | 822 | end_bit = index + clr_msize; |
9385eb3d A |
823 | BITARRAY_MCLR_LESS_32(block_header, index, end_bit); |
824 | BITARRAY_MCLR_LESS_32(in_use, index, end_bit); | |
825 | } | |
826 | BITARRAY_SET(block_header, index+clr_msize); // we set the block_header bit for the following block to reaffirm next block is a block | |
827 | #if DEBUG_MALLOC | |
59e0d9fe A |
828 | { |
829 | boolean_t ff; | |
830 | msize_t mf; | |
831 | ||
832 | mf = get_tiny_meta_header(ptr, &ff); | |
833 | if (msize != mf) { | |
834 | malloc_printf("setting header for tiny in_use %p : %d\n", ptr, msize); | |
835 | malloc_printf("reading header for tiny %p : %d %d\n", ptr, mf, ff); | |
836 | } | |
9385eb3d A |
837 | } |
838 | #endif | |
e9ce8d39 A |
839 | } |
840 | ||
9385eb3d | 841 | static INLINE void |
59e0d9fe A |
842 | set_tiny_meta_header_middle(const void *ptr) |
843 | { | |
9385eb3d | 844 | // indicates this block is in the middle of an in use block |
59e0d9fe A |
845 | unsigned char *block_header; |
846 | unsigned char *in_use; | |
847 | msize_t index; | |
848 | ||
849 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
850 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
851 | index = TINY_INDEX_FOR_PTR(ptr); | |
852 | ||
853 | BITARRAY_CLR(block_header, index); | |
854 | BITARRAY_CLR(in_use, index); | |
9385eb3d A |
855 | TINY_FREE_SIZE(ptr) = 0; |
856 | } | |
857 | ||
858 | static INLINE void | |
59e0d9fe A |
859 | set_tiny_meta_header_free(const void *ptr, msize_t msize) |
860 | { | |
9385eb3d | 861 | // !msize is acceptable and means 65536 |
59e0d9fe A |
862 | unsigned char *block_header; |
863 | unsigned char *in_use; | |
864 | msize_t index; | |
865 | ||
866 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
867 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
868 | index = TINY_INDEX_FOR_PTR(ptr); | |
869 | ||
9385eb3d A |
870 | #if DEBUG_MALLOC |
871 | if ((unsigned)index + (unsigned)msize > 0x10000) { | |
59e0d9fe | 872 | malloc_printf("setting header for tiny free %p msize too large: %d\n", ptr, msize); |
e9ce8d39 | 873 | } |
9385eb3d A |
874 | #endif |
875 | BITARRAY_SET(block_header, index); BITARRAY_CLR(in_use, index); | |
876 | TINY_FREE_SIZE(ptr) = msize; | |
877 | // mark the end of this block | |
878 | if (msize) { // msize==0 => the whole region is free | |
879 | void *follower = FOLLOWING_TINY_PTR(ptr, msize); | |
880 | TINY_PREVIOUS_MSIZE(follower) = msize; | |
881 | } | |
882 | #if DEBUG_MALLOC | |
883 | boolean_t ff; | |
884 | msize_t mf = get_tiny_meta_header(ptr, &ff); | |
885 | if ((msize != mf) || !ff) { | |
59e0d9fe | 886 | malloc_printf("setting header for tiny free %p : %d\n", ptr, msize); |
9385eb3d A |
887 | malloc_printf("reading header for tiny %p : %d %d\n", ptr, mf, ff); |
888 | } | |
889 | #endif | |
e9ce8d39 A |
890 | } |
891 | ||
9385eb3d | 892 | static INLINE boolean_t |
59e0d9fe A |
893 | tiny_meta_header_is_free(const void *ptr) |
894 | { | |
9385eb3d A |
895 | // returns msize and is_free shifted by 16 |
896 | // may return 0 for the msize component (meaning 65536) | |
59e0d9fe A |
897 | unsigned char *block_header; |
898 | unsigned char *in_use; | |
899 | msize_t index; | |
900 | ||
901 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
902 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
903 | index = TINY_INDEX_FOR_PTR(ptr); | |
904 | if (!BITARRAY_BIT(block_header, index)) | |
905 | return 0; | |
9385eb3d A |
906 | return !BITARRAY_BIT(in_use, index); |
907 | } | |
908 | ||
909 | static INLINE void * | |
59e0d9fe A |
910 | tiny_previous_preceding_free(void *ptr, msize_t *prev_msize) |
911 | { | |
9385eb3d | 912 | // returns the previous block, assuming and verifying it's free |
59e0d9fe A |
913 | unsigned char *block_header; |
914 | unsigned char *in_use; | |
915 | msize_t index; | |
916 | msize_t previous_msize; | |
917 | msize_t previous_index; | |
918 | void *previous_ptr; | |
919 | ||
920 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
921 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
922 | index = TINY_INDEX_FOR_PTR(ptr); | |
923 | ||
924 | if (!index) | |
925 | return NULL; | |
926 | if ((previous_msize = TINY_PREVIOUS_MSIZE(ptr)) > index) | |
927 | return NULL; | |
928 | ||
929 | previous_index = index - previous_msize; | |
930 | previous_ptr = (void *)(TINY_REGION_FOR_PTR(ptr) + TINY_BYTES_FOR_MSIZE(previous_index)); | |
931 | if (TINY_FREE_SIZE(previous_ptr) != previous_msize) | |
932 | return NULL; | |
933 | ||
934 | if (!BITARRAY_BIT(block_header, previous_index)) | |
935 | return NULL; | |
936 | if (BITARRAY_BIT(in_use, previous_index)) | |
937 | return NULL; | |
938 | ||
9385eb3d A |
939 | // conservative check did match true check |
940 | *prev_msize = previous_msize; | |
9385eb3d | 941 | return previous_ptr; |
e9ce8d39 A |
942 | } |
943 | ||
9385eb3d | 944 | static INLINE void |
59e0d9fe A |
945 | tiny_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize) |
946 | { | |
e9ce8d39 | 947 | // Adds an item to the proper free list |
9385eb3d A |
948 | // Also marks the meta-header of the block properly |
949 | // Assumes szone has been locked | |
950 | grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1; | |
e9ce8d39 | 951 | free_list_t *free_ptr = ptr; |
9385eb3d | 952 | free_list_t *free_head = szone->tiny_free_list[slot]; |
59e0d9fe | 953 | |
e9ce8d39 | 954 | #if DEBUG_MALLOC |
9385eb3d | 955 | if (LOG(szone,ptr)) { |
59e0d9fe | 956 | malloc_printf("in tiny_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
957 | } |
958 | if (((unsigned)ptr) & (TINY_QUANTUM - 1)) { | |
959 | szone_error(szone, "tiny_free_list_add_ptr: Unaligned ptr", ptr); | |
e9ce8d39 A |
960 | } |
961 | #endif | |
9385eb3d | 962 | set_tiny_meta_header_free(ptr, msize); |
e9ce8d39 | 963 | if (free_head) { |
9385eb3d | 964 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); |
e9ce8d39 | 965 | #if DEBUG_MALLOC |
9385eb3d A |
966 | if (free_head->previous) { |
967 | malloc_printf("ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, free_head, free_head->previous); | |
968 | szone_error(szone, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr); | |
969 | } | |
970 | if (! tiny_meta_header_is_free(free_head)) { | |
971 | malloc_printf("ptr=%p slot=%d free_head=%p\n", ptr, slot, free_head); | |
972 | szone_error(szone, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr); | |
973 | } | |
974 | #endif | |
975 | free_head->previous = free_ptr; | |
976 | free_list_set_checksum(szone, free_head); | |
977 | } else { | |
978 | BITMAP32_SET(szone->tiny_bitmap, slot); | |
e9ce8d39 A |
979 | } |
980 | free_ptr->previous = NULL; | |
981 | free_ptr->next = free_head; | |
982 | free_list_set_checksum(szone, free_ptr); | |
9385eb3d | 983 | szone->tiny_free_list[slot] = free_ptr; |
e9ce8d39 A |
984 | } |
985 | ||
9385eb3d | 986 | static INLINE void |
59e0d9fe A |
987 | tiny_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize) |
988 | { | |
e9ce8d39 A |
989 | // Removes item in the proper free list |
990 | // msize could be read, but all callers have it so we pass it in | |
9385eb3d A |
991 | // Assumes szone has been locked |
992 | grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1; | |
e9ce8d39 A |
993 | free_list_t *free_ptr = ptr; |
994 | free_list_t *next = free_ptr->next; | |
995 | free_list_t *previous = free_ptr->previous; | |
59e0d9fe | 996 | |
e9ce8d39 | 997 | #if DEBUG_MALLOC |
9385eb3d A |
998 | if (LOG(szone,ptr)) { |
999 | malloc_printf("In tiny_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr, msize); | |
1000 | } | |
e9ce8d39 | 1001 | #endif |
9385eb3d | 1002 | free_list_checksum(szone, free_ptr, __PRETTY_FUNCTION__); |
e9ce8d39 | 1003 | if (!previous) { |
9385eb3d | 1004 | // The block to remove is the head of the free list |
e9ce8d39 | 1005 | #if DEBUG_MALLOC |
9385eb3d | 1006 | if (szone->tiny_free_list[slot] != ptr) { |
59e0d9fe A |
1007 | malloc_printf("ptr=%p slot=%d msize=%d szone->tiny_free_list[slot]=%p\n", |
1008 | ptr, slot, msize, szone->tiny_free_list[slot]); | |
9385eb3d A |
1009 | szone_error(szone, "tiny_free_list_remove_ptr: Internal invariant broken (szone->tiny_free_list[slot])", ptr); |
1010 | return; | |
1011 | } | |
e9ce8d39 | 1012 | #endif |
9385eb3d A |
1013 | szone->tiny_free_list[slot] = next; |
1014 | if (!next) BITMAP32_CLR(szone->tiny_bitmap, slot); | |
e9ce8d39 | 1015 | } else { |
9385eb3d A |
1016 | previous->next = next; |
1017 | free_list_set_checksum(szone, previous); | |
e9ce8d39 A |
1018 | } |
1019 | if (next) { | |
9385eb3d A |
1020 | next->previous = previous; |
1021 | free_list_set_checksum(szone, next); | |
e9ce8d39 | 1022 | } |
e9ce8d39 A |
1023 | } |
1024 | ||
59e0d9fe A |
1025 | /* |
1026 | * Find the tiny region containing (ptr) (if any). | |
1027 | * | |
1028 | * We take advantage of the knowledge that tiny regions are always (1 << TINY_BLOCKS_ALIGN) aligned. | |
1029 | */ | |
9385eb3d | 1030 | static INLINE tiny_region_t * |
59e0d9fe A |
1031 | tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr) |
1032 | { | |
1033 | tiny_region_t *region; | |
1034 | tiny_region_t rbase; | |
1035 | int i; | |
1036 | ||
1037 | /* mask off irrelevant lower bits */ | |
1038 | rbase = TINY_REGION_FOR_PTR(ptr); | |
1039 | /* iterate over allocated regions - XXX not terribly efficient for large number of regions */ | |
1040 | for (i = szone->num_tiny_regions, region = szone->tiny_regions; i > 0; i--, region++) | |
1041 | if (rbase == *region) | |
1042 | return(region); | |
1043 | return(NULL); | |
9385eb3d A |
1044 | } |
1045 | ||
1046 | static INLINE void | |
59e0d9fe A |
1047 | tiny_free_no_lock(szone_t *szone, tiny_region_t *region, void *ptr, msize_t msize) |
1048 | { | |
1049 | size_t original_size = TINY_BYTES_FOR_MSIZE(msize); | |
9385eb3d | 1050 | void *next_block = ((char *)ptr + original_size); |
59e0d9fe A |
1051 | msize_t previous_msize; |
1052 | void *previous; | |
1053 | msize_t next_msize; | |
1054 | free_list_t *big_free_block; | |
1055 | free_list_t *after_next_block; | |
1056 | free_list_t *before_next_block; | |
1057 | ||
9385eb3d A |
1058 | #if DEBUG_MALLOC |
1059 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1060 | malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1061 | } |
1062 | if (! msize) { | |
59e0d9fe A |
1063 | malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
1064 | szone_error(szone, "trying to free tiny block that is too small", ptr); | |
9385eb3d A |
1065 | } |
1066 | #endif | |
1067 | // We try to coalesce this block with the preceeding one | |
59e0d9fe | 1068 | previous = tiny_previous_preceding_free(ptr, &previous_msize); |
9385eb3d A |
1069 | if (previous) { |
1070 | #if DEBUG_MALLOC | |
1071 | if (LOG(szone, ptr) || LOG(szone,previous)) { | |
59e0d9fe | 1072 | malloc_printf("in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous); |
9385eb3d A |
1073 | } |
1074 | #endif | |
1075 | tiny_free_list_remove_ptr(szone, previous, previous_msize); | |
1076 | ptr = previous; | |
1077 | msize += previous_msize; | |
1078 | } | |
1079 | // We try to coalesce with the next block | |
59e0d9fe | 1080 | if ((next_block < TINY_REGION_END(*region)) && tiny_meta_header_is_free(next_block)) { |
9385eb3d | 1081 | // The next block is free, we coalesce |
59e0d9fe | 1082 | next_msize = TINY_FREE_SIZE(next_block); |
9385eb3d A |
1083 | #if DEBUG_MALLOC |
1084 | if (LOG(szone, ptr) || LOG(szone, next_block)) { | |
59e0d9fe A |
1085 | malloc_printf("in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n", |
1086 | ptr, msize, next_block, next_msize); | |
9385eb3d A |
1087 | } |
1088 | #endif | |
1089 | if (next_msize >= NUM_TINY_SLOTS) { | |
1090 | // we take a short cut here to avoid removing next_block from the slot 31 freelist and then adding ptr back to slot 31 | |
9385eb3d | 1091 | msize += next_msize; |
59e0d9fe A |
1092 | big_free_block = (free_list_t *)next_block; |
1093 | after_next_block = big_free_block->next; | |
1094 | before_next_block = big_free_block->previous; | |
9385eb3d A |
1095 | free_list_checksum(szone, big_free_block, __PRETTY_FUNCTION__); |
1096 | if (!before_next_block) { | |
1097 | szone->tiny_free_list[NUM_TINY_SLOTS-1] = ptr; | |
1098 | } else { | |
1099 | before_next_block->next = ptr; | |
1100 | free_list_set_checksum(szone, before_next_block); | |
1101 | } | |
1102 | if (after_next_block) { | |
1103 | after_next_block->previous = ptr; | |
1104 | free_list_set_checksum(szone, after_next_block); | |
1105 | } | |
1106 | ((free_list_t *)ptr)->previous = before_next_block; | |
1107 | ((free_list_t *)ptr)->next = after_next_block; | |
1108 | free_list_set_checksum(szone, ptr); | |
1109 | set_tiny_meta_header_free(ptr, msize); | |
1110 | set_tiny_meta_header_middle(big_free_block); // clear the meta_header to enable coalescing backwards | |
1111 | goto tiny_free_ending; | |
1112 | } | |
1113 | tiny_free_list_remove_ptr(szone, next_block, next_msize); | |
1114 | set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards | |
1115 | msize += next_msize; | |
1116 | } | |
1117 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize) { | |
59e0d9fe | 1118 | memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
1119 | } |
1120 | tiny_free_list_add_ptr(szone, ptr, msize); | |
1121 | tiny_free_ending: | |
1122 | // When in proper debug mode we write on the memory to help debug memory smashers | |
1123 | szone->num_tiny_objects--; | |
1124 | szone->num_bytes_in_tiny_objects -= original_size; // we use original_size and not msize to avoid double counting the coalesced blocks | |
1125 | } | |
1126 | ||
1127 | static void * | |
59e0d9fe A |
1128 | tiny_malloc_from_region_no_lock(szone_t *szone, msize_t msize) |
1129 | { | |
1130 | tiny_region_t last_region, *new_regions; | |
1131 | void *last_block, *ptr, *aligned_address; | |
1132 | ||
9385eb3d A |
1133 | // Allocates from the last region or a freshly allocated region |
1134 | // Before anything we transform the tiny_bytes_free_at_end - if any - to a regular free block | |
1135 | if (szone->tiny_bytes_free_at_end) { | |
59e0d9fe A |
1136 | last_region = szone->tiny_regions[szone->num_tiny_regions-1]; |
1137 | last_block = TINY_REGION_END(last_region) - szone->tiny_bytes_free_at_end; | |
1138 | tiny_free_list_add_ptr(szone, last_block, TINY_MSIZE_FOR_BYTES(szone->tiny_bytes_free_at_end)); | |
9385eb3d A |
1139 | szone->tiny_bytes_free_at_end = 0; |
1140 | } | |
9385eb3d | 1141 | // time to create a new region |
59e0d9fe A |
1142 | aligned_address = allocate_pages(szone, TINY_REGION_SIZE, TINY_BLOCKS_ALIGN, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_TINY)); |
1143 | if (!aligned_address) { | |
9385eb3d A |
1144 | // out of memory! |
1145 | return NULL; | |
1146 | } | |
9385eb3d | 1147 | // We set the padding after block_header to be all 1 |
59e0d9fe | 1148 | ((uint32_t *)(aligned_address + (1 << TINY_BLOCKS_ALIGN) + (NUM_TINY_BLOCKS >> 3)))[0] = ~0; |
9385eb3d | 1149 | if (szone->num_tiny_regions == INITIAL_NUM_TINY_REGIONS) { |
59e0d9fe A |
1150 | // XXX logic here fails after initial reallocation of tiny regions is exhausted (approx 4GB of |
1151 | // tiny allocations) | |
9385eb3d A |
1152 | new_regions = small_malloc_from_region_no_lock(szone, 16); // 16 * 512 bytes is plenty of tiny regions (more than 4,000) |
1153 | if (!new_regions) return NULL; | |
1154 | memcpy(new_regions, szone->tiny_regions, INITIAL_NUM_TINY_REGIONS * sizeof(tiny_region_t)); | |
1155 | szone->tiny_regions = new_regions; // we set the pointer after it's all ready to enable enumeration from another thread without locking | |
1156 | } | |
59e0d9fe | 1157 | szone->tiny_regions[szone->num_tiny_regions] = aligned_address; |
9385eb3d | 1158 | szone->num_tiny_regions ++; // we set the number after the pointer is all ready to enable enumeration from another thread without taking the lock |
59e0d9fe | 1159 | ptr = aligned_address; |
9385eb3d A |
1160 | set_tiny_meta_header_in_use(ptr, msize); |
1161 | szone->num_tiny_objects++; | |
59e0d9fe | 1162 | szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(msize); |
9385eb3d | 1163 | // We put a header on the last block so that it appears in use (for coalescing, etc...) |
59e0d9fe A |
1164 | set_tiny_meta_header_in_use(ptr + TINY_BYTES_FOR_MSIZE(msize), 1); |
1165 | szone->tiny_bytes_free_at_end = TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS - msize); | |
9385eb3d A |
1166 | #if DEBUG_MALLOC |
1167 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1168 | malloc_printf("in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1169 | } |
1170 | #endif | |
1171 | return ptr; | |
1172 | } | |
1173 | ||
1174 | static INLINE boolean_t | |
59e0d9fe A |
1175 | try_realloc_tiny_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) |
1176 | { | |
9385eb3d | 1177 | // returns 1 on success |
59e0d9fe A |
1178 | msize_t index; |
1179 | msize_t old_msize; | |
1180 | unsigned next_index; | |
1181 | void *next_block; | |
1182 | boolean_t is_free; | |
1183 | msize_t next_msize, coalesced_msize, leftover_msize; | |
1184 | void *leftover; | |
1185 | ||
1186 | index = TINY_INDEX_FOR_PTR(ptr); | |
1187 | old_msize = TINY_MSIZE_FOR_BYTES(old_size); | |
1188 | next_index = index + old_msize; | |
1189 | ||
9385eb3d | 1190 | if (next_index >= NUM_TINY_BLOCKS) { |
9385eb3d A |
1191 | return 0; |
1192 | } | |
59e0d9fe | 1193 | next_block = (char *)ptr + old_size; |
9385eb3d | 1194 | SZONE_LOCK(szone); |
59e0d9fe | 1195 | is_free = tiny_meta_header_is_free(next_block); |
9385eb3d A |
1196 | if (!is_free) { |
1197 | SZONE_UNLOCK(szone); | |
1198 | return 0; // next_block is in use; | |
1199 | } | |
59e0d9fe A |
1200 | next_msize = TINY_FREE_SIZE(next_block); |
1201 | if (old_size + TINY_MSIZE_FOR_BYTES(next_msize) < new_size) { | |
9385eb3d A |
1202 | SZONE_UNLOCK(szone); |
1203 | return 0; // even with next block, not enough | |
1204 | } | |
1205 | tiny_free_list_remove_ptr(szone, next_block, next_msize); | |
1206 | set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards | |
59e0d9fe A |
1207 | coalesced_msize = TINY_MSIZE_FOR_BYTES(new_size - old_size + TINY_QUANTUM - 1); |
1208 | leftover_msize = next_msize - coalesced_msize; | |
9385eb3d | 1209 | if (leftover_msize) { |
59e0d9fe | 1210 | leftover = next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize); |
9385eb3d A |
1211 | tiny_free_list_add_ptr(szone, leftover, leftover_msize); |
1212 | } | |
1213 | set_tiny_meta_header_in_use(ptr, old_msize + coalesced_msize); | |
1214 | #if DEBUG_MALLOC | |
1215 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1216 | malloc_printf("in try_realloc_tiny_in_place(), ptr=%p, msize=%d\n", ptr, old_msize + coalesced_msize); |
9385eb3d A |
1217 | } |
1218 | #endif | |
59e0d9fe | 1219 | szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(coalesced_msize); |
9385eb3d A |
1220 | SZONE_UNLOCK(szone); |
1221 | CHECK(szone, __PRETTY_FUNCTION__); | |
9385eb3d A |
1222 | return 1; |
1223 | } | |
1224 | ||
1225 | static boolean_t | |
59e0d9fe A |
1226 | tiny_check_region(szone_t *szone, tiny_region_t *region) |
1227 | { | |
1228 | uintptr_t start, ptr, region_end, follower; | |
9385eb3d | 1229 | boolean_t prev_free = 0; |
59e0d9fe A |
1230 | boolean_t is_free; |
1231 | msize_t msize; | |
1232 | free_list_t *free_head; | |
1233 | ||
1234 | /* establish region limits */ | |
1235 | start = (uintptr_t)TINY_REGION_ADDRESS(*region); | |
1236 | ptr = start; | |
1237 | region_end = (uintptr_t)TINY_REGION_END(*region); | |
1238 | ||
1239 | /* | |
1240 | * The last region may have a trailing chunk which has not been converted into inuse/freelist | |
1241 | * blocks yet. | |
1242 | */ | |
1243 | if (region == szone->tiny_regions + szone->num_tiny_regions - 1) | |
1244 | region_end -= szone->tiny_bytes_free_at_end; | |
1245 | ||
1246 | ||
1247 | /* | |
1248 | * Scan blocks within the region. | |
1249 | */ | |
1250 | while (ptr < region_end) { | |
1251 | /* | |
1252 | * If the first block is free, and its size is 65536 (msize = 0) then the entire region is | |
1253 | * free. | |
1254 | */ | |
1255 | msize = get_tiny_meta_header((void *)ptr, &is_free); | |
1256 | if (is_free && !msize && (ptr == start)) { | |
9385eb3d A |
1257 | return 1; |
1258 | } | |
59e0d9fe A |
1259 | |
1260 | /* | |
1261 | * If the block's size is 65536 (msize = 0) then since we're not the first entry the size is | |
1262 | * corrupt. | |
1263 | */ | |
1264 | if (!msize) { | |
1265 | malloc_printf("*** invariant broken for tiny block %p this msize=%d - size is too small\n", | |
1266 | ptr, msize); | |
9385eb3d A |
1267 | return 0; |
1268 | } | |
59e0d9fe | 1269 | |
9385eb3d | 1270 | if (!is_free) { |
59e0d9fe A |
1271 | /* |
1272 | * In use blocks cannot be more than 31 quanta large. | |
1273 | */ | |
9385eb3d | 1274 | prev_free = 0; |
59e0d9fe A |
1275 | if (msize > 31 * TINY_QUANTUM) { |
1276 | malloc_printf("*** invariant broken for %p this tiny msize=%d[%p] - size is too large\n", | |
1277 | ptr, msize, msize); | |
9385eb3d A |
1278 | return 0; |
1279 | } | |
59e0d9fe A |
1280 | /* move to next block */ |
1281 | ptr += TINY_BYTES_FOR_MSIZE(msize); | |
9385eb3d | 1282 | } else { |
59e0d9fe A |
1283 | /* |
1284 | * Free blocks must have been coalesced, we cannot have a free block following another | |
1285 | * free block. | |
1286 | */ | |
9385eb3d | 1287 | if (prev_free) { |
59e0d9fe A |
1288 | malloc_printf("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n", |
1289 | ptr, msize); | |
9385eb3d A |
1290 | return 0; |
1291 | } | |
1292 | prev_free = 1; | |
59e0d9fe A |
1293 | /* |
1294 | * Check the integrity of this block's entry in its freelist. | |
1295 | */ | |
1296 | free_head = (free_list_t *)ptr; | |
9385eb3d A |
1297 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); |
1298 | if (free_head->previous && !tiny_meta_header_is_free(free_head->previous)) { | |
59e0d9fe A |
1299 | malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n", |
1300 | ptr, free_head->previous); | |
9385eb3d A |
1301 | return 0; |
1302 | } | |
1303 | if (free_head->next && !tiny_meta_header_is_free(free_head->next)) { | |
59e0d9fe A |
1304 | malloc_printf("*** invariant broken for %p (next in free list %p is not a free pointer)\n", |
1305 | ptr, free_head->next); | |
9385eb3d A |
1306 | return 0; |
1307 | } | |
59e0d9fe A |
1308 | /* |
1309 | * Check the free block's trailing size value. | |
1310 | */ | |
1311 | follower = (uintptr_t)FOLLOWING_TINY_PTR(ptr, msize); | |
1312 | if ((follower != region_end) && (TINY_PREVIOUS_MSIZE(follower) != msize)) { | |
1313 | malloc_printf("*** invariant broken for tiny free %p followed by %p in region [%p-%p] " | |
1314 | "(end marker incorrect) should be %d; in fact %d\n", | |
1315 | ptr, follower, TINY_REGION_ADDRESS(*region), region_end, msize, TINY_PREVIOUS_MSIZE(follower)); | |
9385eb3d A |
1316 | return 0; |
1317 | } | |
59e0d9fe | 1318 | /* move to next block */ |
9385eb3d A |
1319 | ptr = follower; |
1320 | } | |
1321 | } | |
59e0d9fe A |
1322 | /* |
1323 | * Ensure that we scanned the entire region | |
1324 | */ | |
1325 | if (ptr != region_end) { | |
1326 | malloc_printf("*** invariant broken for region end %p - %p\n", ptr, region_end); | |
9385eb3d A |
1327 | return 0; |
1328 | } | |
59e0d9fe A |
1329 | /* |
1330 | * Check the trailing block's integrity. | |
1331 | */ | |
9385eb3d A |
1332 | if (region == szone->tiny_regions + szone->num_tiny_regions - 1) { |
1333 | if (szone->tiny_bytes_free_at_end) { | |
59e0d9fe | 1334 | msize = get_tiny_meta_header((void *)ptr, &is_free); |
9385eb3d | 1335 | if (is_free || (msize != 1)) { |
59e0d9fe | 1336 | malloc_printf("*** invariant broken for blocker block %p - %d %d\n", ptr, msize, is_free); |
9385eb3d A |
1337 | } |
1338 | } | |
1339 | } | |
1340 | return 1; | |
1341 | } | |
1342 | ||
1343 | static kern_return_t | |
59e0d9fe A |
1344 | tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t tiny_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder) |
1345 | { | |
9385eb3d A |
1346 | tiny_region_t *regions; |
1347 | unsigned index = 0; | |
1348 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
1349 | unsigned count = 0; | |
1350 | kern_return_t err; | |
59e0d9fe A |
1351 | tiny_region_t region; |
1352 | vm_range_t range; | |
1353 | vm_range_t admin_range; | |
1354 | vm_range_t ptr_range; | |
1355 | unsigned char *mapped_region; | |
1356 | unsigned char *block_header; | |
1357 | unsigned char *in_use; | |
1358 | unsigned block_index; | |
1359 | unsigned block_limit; | |
1360 | boolean_t is_free; | |
1361 | msize_t msize; | |
1362 | void *mapped_ptr; | |
1363 | unsigned bit; | |
1364 | ||
9385eb3d A |
1365 | err = reader(task, region_address, sizeof(tiny_region_t) * num_regions, (void **)®ions); |
1366 | if (err) return err; | |
1367 | while (index < num_regions) { | |
1368 | // unsigned num_in_use = 0; | |
1369 | // unsigned num_free = 0; | |
59e0d9fe A |
1370 | region = regions[index]; |
1371 | range.address = (vm_address_t)TINY_REGION_ADDRESS(region); | |
1372 | range.size = (vm_size_t)TINY_REGION_SIZE; | |
9385eb3d | 1373 | if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { |
59e0d9fe A |
1374 | admin_range.address = range.address + (1 << TINY_BLOCKS_ALIGN); |
1375 | admin_range.size = range.size - (1 << TINY_BLOCKS_ALIGN); | |
9385eb3d A |
1376 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1); |
1377 | } | |
1378 | if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { | |
59e0d9fe A |
1379 | ptr_range.address = range.address; |
1380 | ptr_range.size = 1 << TINY_BLOCKS_ALIGN; | |
9385eb3d A |
1381 | recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); |
1382 | } | |
1383 | if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { | |
9385eb3d | 1384 | err = reader(task, range.address, range.size, (void **)&mapped_region); |
59e0d9fe A |
1385 | if (err) |
1386 | return err; | |
1387 | block_header = (unsigned char *)(mapped_region + (1 << TINY_BLOCKS_ALIGN)); | |
1388 | in_use = block_header + (NUM_TINY_BLOCKS >> 3) + 4; | |
1389 | block_index = 0; | |
1390 | block_limit = NUM_TINY_BLOCKS; | |
9385eb3d | 1391 | if (index == num_regions - 1) |
59e0d9fe | 1392 | block_limit -= TINY_MSIZE_FOR_BYTES(tiny_bytes_free_at_end); |
9385eb3d | 1393 | while (block_index < block_limit) { |
59e0d9fe | 1394 | is_free = !BITARRAY_BIT(in_use, block_index); |
9385eb3d | 1395 | if (is_free) { |
59e0d9fe | 1396 | mapped_ptr = mapped_region + TINY_BYTES_FOR_MSIZE(block_index); |
9385eb3d | 1397 | msize = TINY_FREE_SIZE(mapped_ptr); |
59e0d9fe A |
1398 | if (!msize) |
1399 | break; | |
9385eb3d A |
1400 | } else { |
1401 | msize = 1; | |
59e0d9fe A |
1402 | bit = block_index + 1; |
1403 | while (! BITARRAY_BIT(block_header, bit)) { | |
1404 | bit++; | |
1405 | msize ++; | |
1406 | } | |
1407 | buffer[count].address = range.address + TINY_BYTES_FOR_MSIZE(block_index); | |
1408 | buffer[count].size = TINY_BYTES_FOR_MSIZE(msize); | |
9385eb3d A |
1409 | count++; |
1410 | if (count >= MAX_RECORDER_BUFFER) { | |
1411 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
1412 | count = 0; | |
1413 | } | |
1414 | } | |
1415 | block_index += msize; | |
1416 | } | |
1417 | } | |
9385eb3d A |
1418 | index++; |
1419 | } | |
1420 | if (count) { | |
1421 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
1422 | } | |
1423 | return 0; | |
1424 | } | |
1425 | ||
1426 | static INLINE void * | |
59e0d9fe A |
1427 | tiny_malloc_from_free_list(szone_t *szone, msize_t msize) |
1428 | { | |
9385eb3d | 1429 | // Assumes we've locked the region |
59e0d9fe A |
1430 | free_list_t *ptr; |
1431 | msize_t this_msize; | |
1432 | grain_t slot = msize - 1; | |
1433 | free_list_t **free_list = szone->tiny_free_list; | |
1434 | free_list_t **the_slot = free_list + slot; | |
1435 | free_list_t *next; | |
1436 | free_list_t **limit; | |
1437 | unsigned bitmap; | |
1438 | msize_t leftover_msize; | |
1439 | free_list_t *leftover_ptr; | |
1440 | ||
1441 | /* | |
1442 | * Look for an exact match by checking the freelist for this msize. | |
1443 | */ | |
9385eb3d A |
1444 | ptr = *the_slot; |
1445 | if (ptr) { | |
59e0d9fe | 1446 | next = ptr->next; |
9385eb3d A |
1447 | if (next) { |
1448 | next->previous = NULL; | |
1449 | free_list_set_checksum(szone, next); | |
1450 | } | |
1451 | *the_slot = next; | |
1452 | this_msize = msize; | |
1453 | #if DEBUG_MALLOC | |
59e0d9fe A |
1454 | if (LOG(szone, ptr)) { |
1455 | malloc_printf("in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr, this_msize); | |
9385eb3d A |
1456 | } |
1457 | #endif | |
1458 | goto return_tiny_alloc; | |
1459 | } | |
59e0d9fe A |
1460 | |
1461 | /* | |
1462 | * Iterate over freelists for larger blocks looking for the next-largest block. | |
1463 | */ | |
1464 | bitmap = szone->tiny_bitmap & ~ ((1 << slot) - 1); | |
1465 | if (!bitmap) | |
1466 | goto try_tiny_malloc_from_end; | |
9385eb3d | 1467 | slot = BITMAP32_FFS(bitmap) - 1; |
59e0d9fe | 1468 | limit = free_list + NUM_TINY_SLOTS - 1; |
9385eb3d A |
1469 | free_list += slot; |
1470 | while (free_list < limit) { | |
1471 | // try bigger grains | |
1472 | ptr = *free_list; | |
1473 | if (ptr) { | |
59e0d9fe | 1474 | next = ptr->next; |
9385eb3d A |
1475 | if (next) { |
1476 | next->previous = NULL; | |
1477 | free_list_set_checksum(szone, next); | |
1478 | } | |
1479 | *free_list = next; | |
1480 | this_msize = TINY_FREE_SIZE(ptr); | |
1481 | #if DEBUG_MALLOC | |
59e0d9fe A |
1482 | if (LOG(szone, ptr)) { |
1483 | malloc_printf("in tiny_malloc_from_free_list(), bigger grain ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize); | |
9385eb3d A |
1484 | } |
1485 | #endif | |
1486 | goto add_leftover_and_proceed; | |
1487 | } | |
1488 | free_list++; | |
1489 | } | |
1490 | // we are now looking at the last slot (31) | |
1491 | ptr = *limit; | |
1492 | if (ptr) { | |
9385eb3d | 1493 | this_msize = TINY_FREE_SIZE(ptr); |
59e0d9fe | 1494 | next = ptr->next; |
9385eb3d A |
1495 | if (this_msize - msize >= NUM_TINY_SLOTS) { |
1496 | // the leftover will go back to the free list, so we optimize by modifying the free list rather than removing the head and then adding back | |
59e0d9fe A |
1497 | leftover_msize = this_msize - msize; |
1498 | leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); | |
9385eb3d A |
1499 | *limit = leftover_ptr; |
1500 | if (next) { | |
1501 | next->previous = leftover_ptr; | |
1502 | free_list_set_checksum(szone, next); | |
1503 | } | |
59e0d9fe A |
1504 | leftover_ptr->next = next; |
1505 | leftover_ptr->previous = NULL; | |
9385eb3d A |
1506 | free_list_set_checksum(szone, leftover_ptr); |
1507 | set_tiny_meta_header_free(leftover_ptr, leftover_msize); | |
1508 | #if DEBUG_MALLOC | |
1509 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1510 | malloc_printf("in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize); |
9385eb3d A |
1511 | } |
1512 | #endif | |
1513 | this_msize = msize; | |
1514 | goto return_tiny_alloc; | |
1515 | } | |
1516 | *limit = next; | |
1517 | if (next) { | |
1518 | next->previous = NULL; | |
1519 | free_list_set_checksum(szone, next); | |
1520 | } | |
1521 | goto add_leftover_and_proceed; | |
1522 | } | |
1523 | try_tiny_malloc_from_end: | |
1524 | // Let's see if we can use szone->tiny_bytes_free_at_end | |
59e0d9fe A |
1525 | if (szone->tiny_bytes_free_at_end >= TINY_BYTES_FOR_MSIZE(msize)) { |
1526 | ptr = (free_list_t *)(TINY_REGION_END(szone->tiny_regions[szone->num_tiny_regions-1]) - szone->tiny_bytes_free_at_end); | |
1527 | szone->tiny_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(msize); | |
9385eb3d A |
1528 | if (szone->tiny_bytes_free_at_end) { |
1529 | // let's add an in use block after ptr to serve as boundary | |
59e0d9fe | 1530 | set_tiny_meta_header_in_use((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize), 1); |
9385eb3d A |
1531 | } |
1532 | this_msize = msize; | |
1533 | #if DEBUG_MALLOC | |
59e0d9fe A |
1534 | if (LOG(szone, ptr)) { |
1535 | malloc_printf("in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr, msize); | |
9385eb3d A |
1536 | } |
1537 | #endif | |
1538 | goto return_tiny_alloc; | |
1539 | } | |
1540 | return NULL; | |
1541 | add_leftover_and_proceed: | |
9385eb3d | 1542 | if (!this_msize || (this_msize > msize)) { |
59e0d9fe A |
1543 | leftover_msize = this_msize - msize; |
1544 | leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); | |
9385eb3d A |
1545 | #if DEBUG_MALLOC |
1546 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1547 | malloc_printf("in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize); |
9385eb3d A |
1548 | } |
1549 | #endif | |
1550 | tiny_free_list_add_ptr(szone, leftover_ptr, leftover_msize); | |
1551 | this_msize = msize; | |
1552 | } | |
1553 | return_tiny_alloc: | |
1554 | szone->num_tiny_objects++; | |
59e0d9fe | 1555 | szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(this_msize); |
9385eb3d A |
1556 | #if DEBUG_MALLOC |
1557 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1558 | malloc_printf("in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize); |
9385eb3d A |
1559 | } |
1560 | #endif | |
1561 | set_tiny_meta_header_in_use(ptr, this_msize); | |
1562 | return ptr; | |
1563 | } | |
1564 | ||
1565 | static INLINE void * | |
59e0d9fe A |
1566 | tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) |
1567 | { | |
9385eb3d A |
1568 | boolean_t locked = 0; |
1569 | void *ptr; | |
59e0d9fe | 1570 | |
9385eb3d | 1571 | #if DEBUG_MALLOC |
59e0d9fe A |
1572 | if (!msize) { |
1573 | szone_error(szone, "invariant broken (!msize) in allocation (region)", NULL); | |
1574 | return(NULL); | |
9385eb3d A |
1575 | } |
1576 | #endif | |
1577 | #if TINY_CACHE | |
59e0d9fe A |
1578 | ptr = szone->last_tiny_free; |
1579 | if ((((uintptr_t)ptr) & (TINY_QUANTUM - 1)) == msize) { | |
9385eb3d A |
1580 | // we have a candidate - let's lock to make sure |
1581 | LOCK_AND_NOTE_LOCKED(szone, locked); | |
59e0d9fe | 1582 | if (ptr == szone->last_tiny_free) { |
9385eb3d | 1583 | szone->last_tiny_free = NULL; |
9385eb3d A |
1584 | SZONE_UNLOCK(szone); |
1585 | CHECK(szone, __PRETTY_FUNCTION__); | |
59e0d9fe | 1586 | ptr = (void *)((uintptr_t)ptr & ~ (TINY_QUANTUM - 1)); |
9385eb3d | 1587 | if (cleared_requested) { |
59e0d9fe | 1588 | memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
1589 | } |
1590 | #if DEBUG_MALLOC | |
1591 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1592 | malloc_printf("in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1593 | } |
1594 | #endif | |
1595 | return ptr; | |
1596 | } | |
9385eb3d A |
1597 | } |
1598 | #endif | |
1599 | // Except in rare occasions where we need to add a new region, we are going to end up locking, so we might as well lock right away to avoid doing unnecessary optimistic probes | |
1600 | if (!locked) LOCK_AND_NOTE_LOCKED(szone, locked); | |
1601 | ptr = tiny_malloc_from_free_list(szone, msize); | |
9385eb3d A |
1602 | if (ptr) { |
1603 | SZONE_UNLOCK(szone); | |
1604 | CHECK(szone, __PRETTY_FUNCTION__); | |
1605 | if (cleared_requested) { | |
59e0d9fe | 1606 | memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
1607 | } |
1608 | return ptr; | |
1609 | } | |
1610 | ptr = tiny_malloc_from_region_no_lock(szone, msize); | |
9385eb3d A |
1611 | // we don't clear because this freshly allocated space is pristine |
1612 | SZONE_UNLOCK(szone); | |
1613 | CHECK(szone, __PRETTY_FUNCTION__); | |
1614 | return ptr; | |
1615 | } | |
1616 | ||
1617 | static INLINE void | |
59e0d9fe A |
1618 | free_tiny(szone_t *szone, void *ptr, tiny_region_t *tiny_region) |
1619 | { | |
1620 | msize_t msize; | |
1621 | boolean_t is_free; | |
1622 | #if TINY_CACHE | |
1623 | void *ptr2; | |
1624 | #endif | |
1625 | ||
9385eb3d A |
1626 | // ptr is known to be in tiny_region |
1627 | SZONE_LOCK(szone); | |
1628 | #if TINY_CACHE | |
59e0d9fe A |
1629 | ptr2 = szone->last_tiny_free; |
1630 | /* check that we don't already have this pointer in the cache */ | |
1631 | if (ptr == (void *)((uintptr_t)ptr2 & ~ (TINY_QUANTUM - 1))) { | |
1632 | szone_error(szone, "double free", ptr); | |
9385eb3d A |
1633 | return; |
1634 | } | |
1635 | #endif /* TINY_CACHE */ | |
59e0d9fe | 1636 | msize = get_tiny_meta_header(ptr, &is_free); |
9385eb3d | 1637 | if (is_free) { |
59e0d9fe | 1638 | szone_error(szone, "double free", ptr); |
9385eb3d A |
1639 | return; |
1640 | } | |
9385eb3d A |
1641 | #if DEBUG_MALLOC |
1642 | if (!msize) { | |
1643 | malloc_printf("*** szone_free() block in use is too large: %p\n", ptr); | |
59e0d9fe | 1644 | return; |
9385eb3d A |
1645 | } |
1646 | #endif | |
1647 | #if TINY_CACHE | |
1648 | if (msize < TINY_QUANTUM) { // to see if the bits fit in the last 4 bits | |
59e0d9fe | 1649 | szone->last_tiny_free = (void *)(((uintptr_t)ptr) | msize); |
9385eb3d | 1650 | if (!ptr2) { |
9385eb3d A |
1651 | SZONE_UNLOCK(szone); |
1652 | CHECK(szone, __PRETTY_FUNCTION__); | |
1653 | return; | |
1654 | } | |
59e0d9fe A |
1655 | msize = (uintptr_t)ptr2 & (TINY_QUANTUM - 1); |
1656 | ptr = (void *)(((uintptr_t)ptr2) & ~(TINY_QUANTUM - 1)); | |
9385eb3d A |
1657 | tiny_region = tiny_region_for_ptr_no_lock(szone, ptr); |
1658 | if (!tiny_region) { | |
59e0d9fe | 1659 | szone_error(szone, "double free (tiny cache)", ptr); |
9385eb3d A |
1660 | } |
1661 | } | |
1662 | #endif | |
1663 | tiny_free_no_lock(szone, tiny_region, ptr, msize); | |
1664 | SZONE_UNLOCK(szone); | |
1665 | CHECK(szone, __PRETTY_FUNCTION__); | |
1666 | } | |
1667 | ||
1668 | static void | |
59e0d9fe A |
1669 | print_tiny_free_list(szone_t *szone) |
1670 | { | |
1671 | grain_t slot = 0; | |
1672 | free_list_t *ptr; | |
1673 | ||
1674 | malloc_printf("tiny free sizes: "); | |
9385eb3d | 1675 | while (slot < NUM_TINY_SLOTS) { |
59e0d9fe | 1676 | ptr = szone->tiny_free_list[slot]; |
9385eb3d A |
1677 | if (ptr) { |
1678 | malloc_printf("%s%y[%d]; ", (slot == NUM_TINY_SLOTS-1) ? ">=" : "", (slot+1)*TINY_QUANTUM, free_list_count(ptr)); | |
1679 | } | |
1680 | slot++; | |
1681 | } | |
9385eb3d A |
1682 | } |
1683 | ||
1684 | static void | |
59e0d9fe A |
1685 | print_tiny_region(boolean_t verbose, tiny_region_t region, size_t bytes_at_end) |
1686 | { | |
9385eb3d A |
1687 | unsigned counts[1024]; |
1688 | unsigned in_use = 0; | |
59e0d9fe A |
1689 | uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(region); |
1690 | uintptr_t current = start; | |
1691 | uintptr_t limit = (uintptr_t)TINY_REGION_END(region) - bytes_at_end; | |
1692 | boolean_t is_free; | |
1693 | msize_t msize; | |
1694 | unsigned ci; | |
1695 | ||
9385eb3d A |
1696 | memset(counts, 0, 1024 * sizeof(unsigned)); |
1697 | while (current < limit) { | |
59e0d9fe | 1698 | msize = get_tiny_meta_header((void *)current, &is_free); |
9385eb3d A |
1699 | if (is_free & !msize && (current == start)) { |
1700 | // first block is all free | |
1701 | break; | |
1702 | } | |
1703 | if (!msize) { | |
59e0d9fe | 1704 | malloc_printf("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize); |
9385eb3d A |
1705 | break; |
1706 | } | |
59e0d9fe | 1707 | if (!is_free) { |
9385eb3d | 1708 | // block in use |
59e0d9fe A |
1709 | if (msize > 32) |
1710 | malloc_printf("*** error at %p msize for in_use is %d\n", (void *)current, msize); | |
1711 | if (msize < 1024) | |
1712 | counts[msize]++; | |
9385eb3d A |
1713 | in_use++; |
1714 | } | |
59e0d9fe | 1715 | current += TINY_BYTES_FOR_MSIZE(msize); |
9385eb3d | 1716 | } |
59e0d9fe | 1717 | malloc_printf("Tiny region [%p-%p, %y]\t", (void *)start, TINY_REGION_END(region), (int)TINY_REGION_SIZE); |
9385eb3d | 1718 | malloc_printf("In_use=%d ", in_use); |
59e0d9fe | 1719 | if (bytes_at_end) malloc_printf("untouched=%y ", bytes_at_end); |
9385eb3d | 1720 | if (verbose && in_use) { |
59e0d9fe A |
1721 | malloc_printf("\tSizes in use: "); |
1722 | for (ci = 0; ci < 1024; ci++) | |
1723 | if (counts[ci]) | |
1724 | malloc_printf("%d[%d]", TINY_BYTES_FOR_MSIZE(ci), counts[ci]); | |
9385eb3d A |
1725 | } |
1726 | malloc_printf("\n"); | |
1727 | } | |
1728 | ||
1729 | static boolean_t | |
59e0d9fe A |
1730 | tiny_free_list_check(szone_t *szone, grain_t slot) |
1731 | { | |
e9ce8d39 | 1732 | unsigned count = 0; |
9385eb3d | 1733 | free_list_t *ptr = szone->tiny_free_list[slot]; |
e9ce8d39 | 1734 | free_list_t *previous = NULL; |
59e0d9fe A |
1735 | boolean_t is_free; |
1736 | ||
1737 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
e9ce8d39 | 1738 | while (ptr) { |
9385eb3d | 1739 | free_list_checksum(szone, ptr, __PRETTY_FUNCTION__); |
59e0d9fe | 1740 | is_free = tiny_meta_header_is_free(ptr); |
9385eb3d | 1741 | if (! is_free) { |
59e0d9fe | 1742 | malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr); |
9385eb3d A |
1743 | return 0; |
1744 | } | |
59e0d9fe A |
1745 | if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) { |
1746 | malloc_printf("*** inaligned ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
9385eb3d A |
1747 | return 0; |
1748 | } | |
1749 | if (!tiny_region_for_ptr_no_lock(szone, ptr)) { | |
59e0d9fe | 1750 | malloc_printf("*** itr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr); |
9385eb3d A |
1751 | return 0; |
1752 | } | |
1753 | if (ptr->previous != previous) { | |
59e0d9fe | 1754 | malloc_printf("*** irevious incorrectly set slot=%d count=%d ptr=%p\n", slot, count, ptr); |
9385eb3d A |
1755 | return 0; |
1756 | } | |
1757 | previous = ptr; | |
1758 | ptr = ptr->next; | |
1759 | count++; | |
e9ce8d39 A |
1760 | } |
1761 | return 1; | |
1762 | } | |
1763 | ||
9385eb3d A |
1764 | /********************* SMALL FREE LIST UTILITIES ************************/ |
1765 | ||
59e0d9fe A |
1766 | /* |
1767 | * Mark a block as free. Only the first quantum of a block is marked thusly, | |
1768 | * the remainder are marked "middle". | |
1769 | */ | |
9385eb3d | 1770 | static INLINE void |
59e0d9fe A |
1771 | small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize) |
1772 | { | |
1773 | ||
9385eb3d A |
1774 | meta_headers[index] = msize | SMALL_IS_FREE; |
1775 | } | |
1776 | ||
59e0d9fe A |
1777 | /* |
1778 | * Mark a block as in use. Only the first quantum of a block is marked thusly, | |
1779 | * the remainder are marked "middle". | |
1780 | */ | |
9385eb3d | 1781 | static INLINE void |
59e0d9fe A |
1782 | small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize) |
1783 | { | |
1784 | ||
9385eb3d A |
1785 | meta_headers[index] = msize; |
1786 | } | |
1787 | ||
59e0d9fe A |
1788 | /* |
1789 | * Mark a quantum as being the second or later in a block. | |
1790 | */ | |
9385eb3d | 1791 | static INLINE void |
59e0d9fe A |
1792 | small_meta_header_set_middle(msize_t *meta_headers, msize_t index) |
1793 | { | |
1794 | ||
9385eb3d A |
1795 | meta_headers[index] = 0; |
1796 | } | |
e9ce8d39 | 1797 | |
59e0d9fe A |
1798 | // Adds an item to the proper free list |
1799 | // Also marks the header of the block properly | |
1800 | // Assumes szone has been locked | |
9385eb3d | 1801 | static void |
59e0d9fe A |
1802 | small_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize) |
1803 | { | |
9385eb3d A |
1804 | grain_t grain = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1; |
1805 | free_list_t *free_ptr = ptr; | |
1806 | free_list_t *free_head = szone->small_free_list[grain]; | |
59e0d9fe A |
1807 | void *follower; |
1808 | ||
1809 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d A |
1810 | #if DEBUG_MALLOC |
1811 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1812 | malloc_printf("in small_free_list_add_ptr(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d | 1813 | } |
59e0d9fe | 1814 | if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) { |
9385eb3d A |
1815 | szone_error(szone, "small_free_list_add_ptr: Unaligned ptr", ptr); |
1816 | } | |
1817 | #endif | |
1818 | if (free_head) { | |
1819 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); | |
1820 | #if DEBUG_MALLOC | |
1821 | if (free_head->previous) { | |
59e0d9fe A |
1822 | malloc_printf("ptr=%p grain=%d free_head=%p previous=%p\n", |
1823 | ptr, grain, free_head, free_head->previous); | |
9385eb3d A |
1824 | szone_error(szone, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr); |
1825 | } | |
59e0d9fe | 1826 | if (!SMALL_PTR_IS_FREE(free_head)) { |
9385eb3d A |
1827 | malloc_printf("ptr=%p grain=%d free_head=%p\n", ptr, grain, free_head); |
1828 | szone_error(szone, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr); | |
1829 | } | |
1830 | #endif | |
1831 | free_head->previous = free_ptr; | |
1832 | free_list_set_checksum(szone, free_head); | |
e9ce8d39 | 1833 | } else { |
9385eb3d A |
1834 | BITMAP32_SET(szone->small_bitmap, grain); |
1835 | } | |
1836 | free_ptr->previous = NULL; | |
1837 | free_ptr->next = free_head; | |
1838 | free_list_set_checksum(szone, free_ptr); | |
1839 | szone->small_free_list[grain] = free_ptr; | |
59e0d9fe | 1840 | follower = ptr + SMALL_BYTES_FOR_MSIZE(msize); |
9385eb3d A |
1841 | SMALL_PREVIOUS_MSIZE(follower) = msize; |
1842 | } | |
1843 | ||
59e0d9fe A |
1844 | // Removes item in the proper free list |
1845 | // msize could be read, but all callers have it so we pass it in | |
1846 | // Assumes szone has been locked | |
9385eb3d | 1847 | static void |
59e0d9fe A |
1848 | small_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize) |
1849 | { | |
9385eb3d A |
1850 | grain_t grain = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1; |
1851 | free_list_t *free_ptr = ptr; | |
1852 | free_list_t *next = free_ptr->next; | |
1853 | free_list_t *previous = free_ptr->previous; | |
59e0d9fe A |
1854 | |
1855 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
e9ce8d39 | 1856 | #if DEBUG_MALLOC |
9385eb3d | 1857 | if (LOG(szone,ptr)) { |
59e0d9fe | 1858 | malloc_printf("in small_free_list_remove_ptr(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1859 | } |
1860 | #endif | |
1861 | free_list_checksum(szone, free_ptr, __PRETTY_FUNCTION__); | |
1862 | if (!previous) { | |
e9ce8d39 | 1863 | #if DEBUG_MALLOC |
9385eb3d | 1864 | if (szone->small_free_list[grain] != ptr) { |
59e0d9fe A |
1865 | malloc_printf("ptr=%p grain=%d msize=%d szone->small_free_list[grain]=%p\n", |
1866 | ptr, grain, msize, szone->small_free_list[grain]); | |
9385eb3d A |
1867 | szone_error(szone, "small_free_list_remove_ptr: Internal invariant broken (szone->small_free_list[grain])", ptr); |
1868 | return; | |
1869 | } | |
1870 | #endif | |
1871 | szone->small_free_list[grain] = next; | |
1872 | if (!next) BITMAP32_CLR(szone->small_bitmap, grain); | |
1873 | } else { | |
1874 | previous->next = next; | |
1875 | free_list_set_checksum(szone, previous); | |
1876 | } | |
1877 | if (next) { | |
1878 | next->previous = previous; | |
1879 | free_list_set_checksum(szone, next); | |
1880 | } | |
1881 | } | |
1882 | ||
1883 | static INLINE small_region_t * | |
59e0d9fe A |
1884 | small_region_for_ptr_no_lock(szone_t *szone, const void *ptr) |
1885 | { | |
1886 | small_region_t *region; | |
1887 | small_region_t rbase; | |
1888 | int i; | |
1889 | ||
1890 | /* find assumed heap/region base */ | |
1891 | rbase = SMALL_REGION_FOR_PTR(ptr); | |
1892 | ||
1893 | /* scan existing regions for a match */ | |
1894 | for (i = szone->num_small_regions, region = szone->small_regions; i > 0; i--, region++) | |
1895 | if (rbase == *region) | |
1896 | return(region); | |
1897 | return(NULL); | |
9385eb3d A |
1898 | } |
1899 | ||
1900 | static INLINE void | |
59e0d9fe A |
1901 | small_free_no_lock(szone_t *szone, small_region_t *region, void *ptr, msize_t msize) |
1902 | { | |
1903 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
1904 | unsigned index = SMALL_META_INDEX_FOR_PTR(ptr); | |
1905 | size_t original_size = SMALL_BYTES_FOR_MSIZE(msize); | |
1906 | unsigned char *next_block = ((unsigned char *)ptr + original_size); | |
1907 | msize_t next_index = index + msize; | |
1908 | msize_t previous_msize, next_msize; | |
1909 | void *previous; | |
1910 | ||
9385eb3d A |
1911 | // Assumes locked |
1912 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d A |
1913 | #if DEBUG_MALLOC |
1914 | if (LOG(szone,ptr)) { | |
59e0d9fe | 1915 | malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
9385eb3d A |
1916 | } |
1917 | if (! msize) { | |
59e0d9fe A |
1918 | malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); |
1919 | szone_error(szone, "trying to free small block that is too small", ptr); | |
9385eb3d | 1920 | } |
9385eb3d A |
1921 | #endif |
1922 | // We try to coalesce this block with the preceeding one | |
1923 | if (index && (SMALL_PREVIOUS_MSIZE(ptr) <= index)) { | |
59e0d9fe | 1924 | previous_msize = SMALL_PREVIOUS_MSIZE(ptr); |
9385eb3d | 1925 | if (meta_headers[index - previous_msize] == (previous_msize | SMALL_IS_FREE)) { |
59e0d9fe | 1926 | previous = ptr - SMALL_BYTES_FOR_MSIZE(previous_msize); |
9385eb3d | 1927 | // previous is really to be coalesced |
e9ce8d39 | 1928 | #if DEBUG_MALLOC |
9385eb3d | 1929 | if (LOG(szone, ptr) || LOG(szone,previous)) { |
59e0d9fe | 1930 | malloc_printf("in small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous); |
9385eb3d A |
1931 | } |
1932 | #endif | |
9385eb3d A |
1933 | small_free_list_remove_ptr(szone, previous, previous_msize); |
1934 | small_meta_header_set_middle(meta_headers, index); | |
1935 | ptr = previous; | |
1936 | msize += previous_msize; | |
1937 | index -= previous_msize; | |
e9ce8d39 | 1938 | } |
9385eb3d A |
1939 | } |
1940 | // We try to coalesce with the next block | |
59e0d9fe | 1941 | if ((next_block < SMALL_REGION_END(*region)) && (meta_headers[next_index] & SMALL_IS_FREE)) { |
9385eb3d | 1942 | // next block is free, we coalesce |
59e0d9fe | 1943 | next_msize = meta_headers[next_index] & ~ SMALL_IS_FREE; |
9385eb3d A |
1944 | #if DEBUG_MALLOC |
1945 | if (LOG(szone,ptr)) malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", ptr, msize, next_block, next_msize); | |
1946 | #endif | |
9385eb3d A |
1947 | small_free_list_remove_ptr(szone, next_block, next_msize); |
1948 | small_meta_header_set_middle(meta_headers, next_index); | |
1949 | msize += next_msize; | |
e9ce8d39 A |
1950 | } |
1951 | if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) { | |
1952 | if (!msize) { | |
59e0d9fe | 1953 | szone_error(szone, "incorrect size information - block header was damaged", ptr); |
e9ce8d39 | 1954 | } else { |
59e0d9fe | 1955 | memset(ptr, 0x55, SMALL_BYTES_FOR_MSIZE(msize)); |
e9ce8d39 A |
1956 | } |
1957 | } | |
9385eb3d A |
1958 | small_free_list_add_ptr(szone, ptr, msize); |
1959 | small_meta_header_set_is_free(meta_headers, index, msize); | |
e9ce8d39 A |
1960 | szone->num_small_objects--; |
1961 | szone->num_bytes_in_small_objects -= original_size; // we use original_size and not msize to avoid double counting the coalesced blocks | |
1962 | } | |
1963 | ||
9385eb3d | 1964 | static void * |
59e0d9fe A |
1965 | small_malloc_from_region_no_lock(szone_t *szone, msize_t msize) |
1966 | { | |
1967 | small_region_t last_region; | |
1968 | void *last_block; | |
1969 | void *ptr; | |
1970 | void *new_address; | |
1971 | msize_t *meta_headers; | |
1972 | msize_t index ; | |
1973 | size_t region_capacity; | |
1974 | msize_t new_msize; | |
1975 | small_region_t *new_regions; | |
1976 | msize_t msize_left; | |
1977 | ||
e9ce8d39 | 1978 | // Allocates from the last region or a freshly allocated region |
9385eb3d A |
1979 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); |
1980 | // Before anything we transform the small_bytes_free_at_end - if any - to a regular free block | |
1981 | if (szone->small_bytes_free_at_end) { | |
59e0d9fe A |
1982 | last_region = szone->small_regions[szone->num_small_regions - 1]; |
1983 | last_block = (void *)(SMALL_REGION_END(last_region) - szone->small_bytes_free_at_end); | |
1984 | small_free_list_add_ptr(szone, last_block, SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end)); | |
1985 | *SMALL_METADATA_FOR_PTR(last_block) = SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end) | SMALL_IS_FREE; | |
9385eb3d | 1986 | szone->small_bytes_free_at_end = 0; |
e9ce8d39 A |
1987 | } |
1988 | // time to create a new region | |
59e0d9fe | 1989 | new_address = allocate_pages(szone, SMALL_REGION_SIZE, SMALL_BLOCKS_ALIGN, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL)); |
e9ce8d39 | 1990 | if (!new_address) { |
9385eb3d A |
1991 | // out of memory! |
1992 | return NULL; | |
1993 | } | |
59e0d9fe A |
1994 | ptr = new_address; |
1995 | meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
1996 | index = 0; | |
9385eb3d A |
1997 | if (szone->num_small_regions == INITIAL_NUM_SMALL_REGIONS) { |
1998 | // time to grow the number of regions | |
59e0d9fe A |
1999 | region_capacity = (1 << (32 - SMALL_BLOCKS_ALIGN)) - 20; // that is for sure the maximum number of small regions we can have |
2000 | new_msize = (region_capacity * sizeof(small_region_t) + SMALL_QUANTUM - 1) / SMALL_QUANTUM; | |
2001 | new_regions = ptr; | |
9385eb3d A |
2002 | small_meta_header_set_in_use(meta_headers, index, new_msize); |
2003 | szone->num_small_objects++; | |
59e0d9fe | 2004 | szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(new_msize); |
9385eb3d | 2005 | memcpy(new_regions, szone->small_regions, INITIAL_NUM_SMALL_REGIONS * sizeof(small_region_t)); |
59e0d9fe A |
2006 | // We intentionally leak the previous regions pointer to avoid multi-threading crashes if |
2007 | // another thread was reading it (unlocked) while we are changing it. | |
9385eb3d | 2008 | szone->small_regions = new_regions; // note we set this pointer after it's all set |
59e0d9fe | 2009 | ptr += SMALL_BYTES_FOR_MSIZE(new_msize); |
9385eb3d | 2010 | index = new_msize; |
9385eb3d | 2011 | } |
59e0d9fe A |
2012 | szone->small_regions[szone->num_small_regions] = new_address; |
2013 | // we bump the number of regions AFTER we have changes the regions pointer to enable finding a | |
2014 | // small region without taking the lock | |
2015 | // XXX naive assumption assumes memory ordering coherence between this and other CPUs | |
2016 | szone->num_small_regions++; | |
2017 | small_meta_header_set_in_use(meta_headers, index, msize); | |
2018 | msize_left = NUM_SMALL_BLOCKS - index; | |
2019 | szone->num_small_objects++; | |
2020 | szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(msize); | |
2021 | // add a big free block | |
2022 | index += msize; msize_left -= msize; | |
2023 | meta_headers[index] = msize_left; | |
2024 | szone->small_bytes_free_at_end = SMALL_BYTES_FOR_MSIZE(msize_left); | |
2025 | return ptr; | |
2026 | } | |
2027 | ||
2028 | static INLINE boolean_t | |
2029 | try_realloc_small_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) | |
2030 | { | |
2031 | // returns 1 on success | |
2032 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
2033 | unsigned index = SMALL_META_INDEX_FOR_PTR(ptr); | |
2034 | msize_t old_msize = SMALL_MSIZE_FOR_BYTES(old_size); | |
2035 | msize_t new_msize = SMALL_MSIZE_FOR_BYTES(new_size + SMALL_QUANTUM - 1); | |
2036 | void *next_block = (char *)ptr + old_size; | |
2037 | unsigned next_index = index + old_msize; | |
2038 | msize_t next_msize_and_free; | |
2039 | msize_t next_msize; | |
2040 | msize_t leftover_msize; | |
2041 | void *leftover; | |
2042 | unsigned leftover_index; | |
2043 | ||
2044 | if (next_index >= NUM_SMALL_BLOCKS) { | |
2045 | return 0; | |
2046 | } | |
2047 | #if DEBUG_MALLOC | |
2048 | if ((uintptr_t)next_block & (SMALL_QUANTUM - 1)) { | |
2049 | szone_error(szone, "internal invariant broken in realloc(next_block)", next_block); | |
2050 | } | |
2051 | if (meta_headers[index] != old_msize) | |
2052 | malloc_printf("*** try_realloc_small_in_place incorrect old %d %d\n", | |
2053 | meta_headers[index], old_msize); | |
2054 | #endif | |
2055 | SZONE_LOCK(szone); | |
2056 | /* | |
2057 | * Look for a free block immediately afterwards. If it's large enough, we can consume (part of) | |
2058 | * it. | |
2059 | */ | |
2060 | next_msize_and_free = meta_headers[next_index]; | |
2061 | next_msize = next_msize_and_free & ~ SMALL_IS_FREE; | |
2062 | if (!(next_msize_and_free & SMALL_IS_FREE) || (old_msize + next_msize < new_msize)) { | |
2063 | SZONE_UNLOCK(szone); | |
2064 | return 0; | |
2065 | } | |
2066 | /* | |
2067 | * The following block is big enough; pull it from its freelist and chop off enough to satisfy | |
2068 | * our needs. | |
2069 | */ | |
2070 | small_free_list_remove_ptr(szone, next_block, next_msize); | |
2071 | small_meta_header_set_middle(meta_headers, next_index); | |
2072 | leftover_msize = old_msize + next_msize - new_msize; | |
2073 | if (leftover_msize) { | |
2074 | /* there's some left, so put the remainder back */ | |
2075 | leftover = (unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(new_msize); | |
2076 | small_free_list_add_ptr(szone, leftover, leftover_msize); | |
2077 | leftover_index = index + new_msize; | |
2078 | small_meta_header_set_is_free(meta_headers, leftover_index, leftover_msize); | |
2079 | } | |
2080 | #if DEBUG_MALLOC | |
2081 | if (SMALL_BYTES_FOR_MSIZE(new_msize) >= LARGE_THRESHOLD) { | |
2082 | malloc_printf("*** realloc in place for %p exceeded msize=%d\n", new_msize); | |
2083 | } | |
2084 | #endif | |
2085 | small_meta_header_set_in_use(meta_headers, index, new_msize); | |
2086 | #if DEBUG_MALLOC | |
2087 | if (LOG(szone,ptr)) { | |
2088 | malloc_printf("in szone_realloc(), ptr=%p, msize=%d\n", ptr, *SMALL_METADATA_FOR_PTR(ptr)); | |
2089 | } | |
2090 | #endif | |
2091 | szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(new_msize - old_msize); | |
2092 | SZONE_UNLOCK(szone); | |
2093 | CHECK(szone, __PRETTY_FUNCTION__); | |
2094 | return 1; | |
e9ce8d39 A |
2095 | } |
2096 | ||
9385eb3d | 2097 | static boolean_t |
59e0d9fe A |
2098 | szone_check_small_region(szone_t *szone, small_region_t *region) |
2099 | { | |
2100 | unsigned char *ptr = SMALL_REGION_ADDRESS(*region); | |
2101 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
2102 | unsigned char *region_end = SMALL_REGION_END(*region); | |
e9ce8d39 | 2103 | msize_t prev_free = 0; |
59e0d9fe A |
2104 | unsigned index; |
2105 | msize_t msize_and_free; | |
2106 | msize_t msize; | |
2107 | free_list_t *free_head; | |
2108 | msize_t *follower; | |
2109 | ||
2110 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d | 2111 | if (region == szone->small_regions + szone->num_small_regions - 1) region_end -= szone->small_bytes_free_at_end; |
59e0d9fe A |
2112 | while (ptr < region_end) { |
2113 | index = SMALL_META_INDEX_FOR_PTR(ptr); | |
2114 | msize_and_free = meta_headers[index]; | |
2115 | if (!(msize_and_free & SMALL_IS_FREE)) { | |
9385eb3d | 2116 | // block is in use |
59e0d9fe | 2117 | msize = msize_and_free; |
9385eb3d | 2118 | if (!msize) { |
59e0d9fe A |
2119 | malloc_printf("*** invariant broken: null msize ptr=%p region#=%d num_small_regions=%d end=%p\n", |
2120 | ptr, region - szone->small_regions, szone->num_small_regions, (void *)region_end); | |
9385eb3d A |
2121 | return 0; |
2122 | } | |
2123 | if (msize > (LARGE_THRESHOLD / SMALL_QUANTUM)) { | |
59e0d9fe A |
2124 | malloc_printf("*** invariant broken for %p this small msize=%d - size is too large\n", |
2125 | ptr, msize_and_free); | |
9385eb3d A |
2126 | return 0; |
2127 | } | |
59e0d9fe | 2128 | ptr += SMALL_BYTES_FOR_MSIZE(msize); |
9385eb3d A |
2129 | prev_free = 0; |
2130 | } else { | |
2131 | // free pointer | |
59e0d9fe A |
2132 | msize = msize_and_free & ~ SMALL_IS_FREE; |
2133 | free_head = (free_list_t *)ptr; | |
2134 | follower = (msize_t *)FOLLOWING_SMALL_PTR(ptr, msize); | |
2135 | if (!msize) { | |
2136 | malloc_printf("*** invariant broken for free block %p this msize=%d\n", ptr, msize); | |
9385eb3d A |
2137 | return 0; |
2138 | } | |
2139 | if (prev_free) { | |
59e0d9fe | 2140 | malloc_printf("*** invariant broken for %p (2 free in a row)\n", ptr); |
9385eb3d A |
2141 | return 0; |
2142 | } | |
2143 | free_list_checksum(szone, free_head, __PRETTY_FUNCTION__); | |
59e0d9fe A |
2144 | if (free_head->previous && !SMALL_PTR_IS_FREE(free_head->previous)) { |
2145 | malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n", | |
2146 | ptr, free_head->previous); | |
9385eb3d A |
2147 | return 0; |
2148 | } | |
59e0d9fe A |
2149 | if (free_head->next && !SMALL_PTR_IS_FREE(free_head->next)) { |
2150 | malloc_printf("*** invariant broken for %p (next is not a free pointer)\n", ptr); | |
9385eb3d | 2151 | return 0; |
e9ce8d39 | 2152 | } |
9385eb3d | 2153 | if (SMALL_PREVIOUS_MSIZE(follower) != msize) { |
59e0d9fe A |
2154 | malloc_printf("*** invariant broken for small free %p followed by %p in region [%p-%p] " |
2155 | "(end marker incorrect) should be %d; in fact %d\n", | |
2156 | ptr, follower, SMALL_REGION_ADDRESS(*region), region_end, msize, SMALL_PREVIOUS_MSIZE(follower)); | |
9385eb3d | 2157 | return 0; |
e9ce8d39 | 2158 | } |
59e0d9fe | 2159 | ptr = (unsigned char *)follower; |
9385eb3d A |
2160 | prev_free = SMALL_IS_FREE; |
2161 | } | |
e9ce8d39 A |
2162 | } |
2163 | return 1; | |
2164 | } | |
2165 | ||
9385eb3d | 2166 | static kern_return_t |
59e0d9fe A |
2167 | small_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t region_address, unsigned short num_regions, size_t small_bytes_free_at_end, memory_reader_t reader, vm_range_recorder_t recorder) |
2168 | { | |
9385eb3d | 2169 | small_region_t *regions; |
e9ce8d39 A |
2170 | unsigned index = 0; |
2171 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
2172 | unsigned count = 0; | |
2173 | kern_return_t err; | |
59e0d9fe A |
2174 | small_region_t region; |
2175 | vm_range_t range; | |
2176 | vm_range_t admin_range; | |
2177 | vm_range_t ptr_range; | |
2178 | unsigned char *mapped_region; | |
2179 | msize_t *block_header; | |
2180 | unsigned block_index; | |
2181 | unsigned block_limit; | |
2182 | msize_t msize_and_free; | |
2183 | msize_t msize; | |
2184 | ||
9385eb3d | 2185 | err = reader(task, region_address, sizeof(small_region_t) * num_regions, (void **)®ions); |
e9ce8d39 A |
2186 | if (err) return err; |
2187 | while (index < num_regions) { | |
59e0d9fe A |
2188 | region = regions[index]; |
2189 | range.address = (vm_address_t)SMALL_REGION_ADDRESS(region); | |
2190 | range.size = SMALL_REGION_SIZE; | |
9385eb3d | 2191 | if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { |
59e0d9fe A |
2192 | admin_range.address = range.address + (1 << SMALL_BLOCKS_ALIGN); |
2193 | admin_range.size = range.size - (1 << SMALL_BLOCKS_ALIGN); | |
9385eb3d A |
2194 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1); |
2195 | } | |
2196 | if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { | |
59e0d9fe A |
2197 | ptr_range.address = range.address; |
2198 | ptr_range.size = 1 << SMALL_BLOCKS_ALIGN; | |
9385eb3d A |
2199 | recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); |
2200 | } | |
2201 | if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { | |
9385eb3d A |
2202 | err = reader(task, range.address, range.size, (void **)&mapped_region); |
2203 | if (err) return err; | |
59e0d9fe A |
2204 | block_header = (msize_t *)(mapped_region + (1 << SMALL_BLOCKS_ALIGN)); |
2205 | block_index = 0; | |
2206 | block_limit = NUM_SMALL_BLOCKS; | |
9385eb3d | 2207 | if (index == num_regions - 1) |
59e0d9fe | 2208 | block_limit -= SMALL_MSIZE_FOR_BYTES(small_bytes_free_at_end); |
9385eb3d | 2209 | while (block_index < block_limit) { |
59e0d9fe A |
2210 | msize_and_free = block_header[block_index]; |
2211 | msize = msize_and_free & ~ SMALL_IS_FREE; | |
9385eb3d A |
2212 | if (! (msize_and_free & SMALL_IS_FREE)) { |
2213 | // Block in use | |
59e0d9fe A |
2214 | buffer[count].address = range.address + SMALL_BYTES_FOR_MSIZE(block_index); |
2215 | buffer[count].size = SMALL_BYTES_FOR_MSIZE(msize); | |
9385eb3d A |
2216 | count++; |
2217 | if (count >= MAX_RECORDER_BUFFER) { | |
2218 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
2219 | count = 0; | |
2220 | } | |
2221 | } | |
2222 | block_index += msize; | |
2223 | } | |
9385eb3d A |
2224 | } |
2225 | index++; | |
2226 | } | |
2227 | if (count) { | |
2228 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
2229 | } | |
e9ce8d39 A |
2230 | return 0; |
2231 | } | |
2232 | ||
9385eb3d | 2233 | static INLINE void * |
59e0d9fe A |
2234 | small_malloc_from_free_list(szone_t *szone, msize_t msize) |
2235 | { | |
9385eb3d A |
2236 | grain_t grain = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1; |
2237 | unsigned bitmap = szone->small_bitmap & ~ ((1 << grain) - 1); | |
e9ce8d39 A |
2238 | void *ptr; |
2239 | msize_t this_msize; | |
59e0d9fe A |
2240 | free_list_t **free_list; |
2241 | free_list_t **limit; | |
2242 | free_list_t *next; | |
2243 | msize_t leftover_msize; | |
2244 | void *leftover_ptr; | |
2245 | msize_t *meta_headers; | |
2246 | unsigned leftover_index; | |
2247 | ||
2248 | // Assumes locked | |
2249 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
2250 | ||
9385eb3d A |
2251 | if (!bitmap) goto try_small_from_end; |
2252 | grain = BITMAP32_FFS(bitmap) - 1; | |
e9ce8d39 | 2253 | // first try the small grains |
59e0d9fe | 2254 | limit = szone->small_free_list + NUM_SMALL_SLOTS - 1; |
9385eb3d | 2255 | free_list = szone->small_free_list + grain; |
e9ce8d39 | 2256 | while (free_list < limit) { |
9385eb3d A |
2257 | // try bigger grains |
2258 | ptr = *free_list; | |
2259 | if (ptr) { | |
9385eb3d A |
2260 | next = ((free_list_t *)ptr)->next; |
2261 | if (next) { | |
2262 | next->previous = NULL; | |
2263 | free_list_set_checksum(szone, next); | |
2264 | } | |
2265 | *free_list = next; | |
59e0d9fe | 2266 | this_msize = SMALL_PTR_SIZE(ptr); |
9385eb3d A |
2267 | goto add_leftover_and_proceed; |
2268 | } | |
2269 | free_list++; | |
e9ce8d39 A |
2270 | } |
2271 | // We now check the large grains for one that is big enough | |
e9ce8d39 A |
2272 | ptr = *free_list; |
2273 | while (ptr) { | |
59e0d9fe | 2274 | this_msize = SMALL_PTR_SIZE(ptr); |
9385eb3d | 2275 | if (this_msize >= msize) { |
9385eb3d A |
2276 | small_free_list_remove_ptr(szone, ptr, this_msize); |
2277 | goto add_leftover_and_proceed; | |
2278 | } | |
2279 | ptr = ((free_list_t *)ptr)->next; | |
2280 | } | |
2281 | try_small_from_end: | |
2282 | // Let's see if we can use szone->small_bytes_free_at_end | |
59e0d9fe | 2283 | if (szone->small_bytes_free_at_end >= SMALL_BYTES_FOR_MSIZE(msize)) { |
9385eb3d | 2284 | ptr = (void *)(SMALL_REGION_END(szone->small_regions[szone->num_small_regions-1]) - szone->small_bytes_free_at_end); |
59e0d9fe | 2285 | szone->small_bytes_free_at_end -= SMALL_BYTES_FOR_MSIZE(msize); |
9385eb3d A |
2286 | if (szone->small_bytes_free_at_end) { |
2287 | // let's mark this block as in use to serve as boundary | |
59e0d9fe | 2288 | *SMALL_METADATA_FOR_PTR(ptr + SMALL_BYTES_FOR_MSIZE(msize)) = SMALL_MSIZE_FOR_BYTES(szone->small_bytes_free_at_end); |
9385eb3d A |
2289 | } |
2290 | this_msize = msize; | |
2291 | goto return_small_alloc; | |
e9ce8d39 A |
2292 | } |
2293 | return NULL; | |
2294 | add_leftover_and_proceed: | |
9385eb3d | 2295 | if (this_msize > msize) { |
59e0d9fe A |
2296 | leftover_msize = this_msize - msize; |
2297 | leftover_ptr = ptr + SMALL_BYTES_FOR_MSIZE(msize); | |
9385eb3d A |
2298 | #if DEBUG_MALLOC |
2299 | if (LOG(szone,ptr)) { | |
59e0d9fe | 2300 | malloc_printf("in small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize); |
9385eb3d A |
2301 | } |
2302 | #endif | |
2303 | small_free_list_add_ptr(szone, leftover_ptr, leftover_msize); | |
59e0d9fe A |
2304 | meta_headers = SMALL_META_HEADER_FOR_PTR(leftover_ptr); |
2305 | leftover_index = SMALL_META_INDEX_FOR_PTR(leftover_ptr); | |
9385eb3d A |
2306 | small_meta_header_set_is_free(meta_headers, leftover_index, leftover_msize); |
2307 | this_msize = msize; | |
e9ce8d39 | 2308 | } |
9385eb3d | 2309 | return_small_alloc: |
e9ce8d39 | 2310 | szone->num_small_objects++; |
59e0d9fe | 2311 | szone->num_bytes_in_small_objects += SMALL_BYTES_FOR_MSIZE(this_msize); |
e9ce8d39 | 2312 | #if DEBUG_MALLOC |
9385eb3d | 2313 | if (LOG(szone,ptr)) { |
59e0d9fe | 2314 | malloc_printf("in small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize); |
9385eb3d | 2315 | } |
e9ce8d39 | 2316 | #endif |
59e0d9fe | 2317 | *SMALL_METADATA_FOR_PTR(ptr) = this_msize; |
e9ce8d39 A |
2318 | return ptr; |
2319 | } | |
2320 | ||
9385eb3d | 2321 | static INLINE void * |
59e0d9fe A |
2322 | small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) |
2323 | { | |
e9ce8d39 | 2324 | boolean_t locked = 0; |
59e0d9fe | 2325 | #if SMALL_CACHE |
e9ce8d39 | 2326 | void *ptr; |
59e0d9fe A |
2327 | #endif |
2328 | ||
9385eb3d A |
2329 | #if SMALL_CACHE |
2330 | ptr = (void *)szone->last_small_free; | |
59e0d9fe | 2331 | if ((((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) == msize) { |
9385eb3d A |
2332 | // we have a candidate - let's lock to make sure |
2333 | LOCK_AND_NOTE_LOCKED(szone, locked); | |
2334 | if (ptr == (void *)szone->last_small_free) { | |
2335 | szone->last_small_free = NULL; | |
9385eb3d A |
2336 | SZONE_UNLOCK(szone); |
2337 | CHECK(szone, __PRETTY_FUNCTION__); | |
59e0d9fe | 2338 | ptr = (void *)((uintptr_t)ptr & ~ (SMALL_QUANTUM - 1)); |
9385eb3d | 2339 | if (cleared_requested) { |
59e0d9fe | 2340 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
2341 | } |
2342 | return ptr; | |
2343 | } | |
e9ce8d39 A |
2344 | } |
2345 | #endif | |
59e0d9fe A |
2346 | // Except in rare occasions where we need to add a new region, we are going to end up locking, |
2347 | // so we might as well lock right away to avoid doing unnecessary optimistic probes | |
9385eb3d A |
2348 | if (!locked) LOCK_AND_NOTE_LOCKED(szone, locked); |
2349 | ptr = small_malloc_from_free_list(szone, msize); | |
e9ce8d39 | 2350 | if (ptr) { |
9385eb3d A |
2351 | SZONE_UNLOCK(szone); |
2352 | CHECK(szone, __PRETTY_FUNCTION__); | |
2353 | if (cleared_requested) { | |
59e0d9fe | 2354 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); |
9385eb3d A |
2355 | } |
2356 | return ptr; | |
e9ce8d39 | 2357 | } |
9385eb3d A |
2358 | ptr = small_malloc_from_region_no_lock(szone, msize); |
2359 | // we don't clear because this freshly allocated space is pristine | |
2360 | SZONE_UNLOCK(szone); | |
2361 | CHECK(szone, __PRETTY_FUNCTION__); | |
e9ce8d39 A |
2362 | return ptr; |
2363 | } | |
2364 | ||
59e0d9fe | 2365 | // tries to allocate a small, cleared block |
9385eb3d | 2366 | static INLINE void * |
59e0d9fe A |
2367 | small_malloc_cleared_no_lock(szone_t *szone, msize_t msize) |
2368 | { | |
2369 | void *ptr; | |
2370 | ||
9385eb3d A |
2371 | // Assumes already locked |
2372 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d | 2373 | ptr = small_malloc_from_free_list(szone, msize); |
e9ce8d39 | 2374 | if (ptr) { |
59e0d9fe | 2375 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); |
9385eb3d | 2376 | return ptr; |
e9ce8d39 | 2377 | } else { |
9385eb3d A |
2378 | ptr = small_malloc_from_region_no_lock(szone, msize); |
2379 | // we don't clear because this freshly allocated space is pristine | |
e9ce8d39 A |
2380 | } |
2381 | return ptr; | |
2382 | } | |
2383 | ||
9385eb3d | 2384 | static INLINE void |
59e0d9fe A |
2385 | free_small(szone_t *szone, void *ptr, small_region_t *small_region) |
2386 | { | |
9385eb3d | 2387 | msize_t msize_and_free; |
59e0d9fe A |
2388 | #if SMALL_CACHE |
2389 | void *ptr2; | |
2390 | #endif | |
2391 | ||
2392 | // ptr is known to be in small_region | |
2393 | msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); | |
9385eb3d A |
2394 | if (msize_and_free & SMALL_IS_FREE) { |
2395 | szone_error(szone, "Object already freed being freed", ptr); | |
2396 | return; | |
2397 | } | |
2398 | CHECK(szone, __PRETTY_FUNCTION__); | |
9385eb3d A |
2399 | SZONE_LOCK(szone); |
2400 | #if SMALL_CACHE | |
59e0d9fe A |
2401 | ptr2 = szone->last_small_free; |
2402 | szone->last_small_free = (void *)(((uintptr_t)ptr) | msize_and_free); | |
9385eb3d | 2403 | if (!ptr2) { |
9385eb3d A |
2404 | SZONE_UNLOCK(szone); |
2405 | CHECK(szone, __PRETTY_FUNCTION__); | |
2406 | return; | |
2407 | } | |
59e0d9fe A |
2408 | msize_and_free = (uintptr_t)ptr2 & (SMALL_QUANTUM - 1); |
2409 | ptr = (void *)(((uintptr_t)ptr2) & ~ (SMALL_QUANTUM - 1)); | |
9385eb3d A |
2410 | small_region = small_region_for_ptr_no_lock(szone, ptr); |
2411 | if (!small_region) { | |
59e0d9fe A |
2412 | szone_error(szone, "double free (small cache)", ptr); |
2413 | return; | |
9385eb3d A |
2414 | } |
2415 | #endif | |
2416 | small_free_no_lock(szone, small_region, ptr, msize_and_free); | |
2417 | SZONE_UNLOCK(szone); | |
2418 | CHECK(szone, __PRETTY_FUNCTION__); | |
2419 | } | |
2420 | ||
2421 | static void | |
59e0d9fe A |
2422 | print_small_free_list(szone_t *szone) |
2423 | { | |
9385eb3d | 2424 | grain_t grain = 0; |
59e0d9fe A |
2425 | free_list_t *ptr; |
2426 | ||
2427 | malloc_printf("small free sizes: "); | |
9385eb3d | 2428 | while (grain < NUM_SMALL_SLOTS) { |
59e0d9fe | 2429 | ptr = szone->small_free_list[grain]; |
9385eb3d A |
2430 | if (ptr) { |
2431 | malloc_printf("%s%y[%d]; ", (grain == NUM_SMALL_SLOTS-1) ? ">=" : "", (grain + 1) * SMALL_QUANTUM, free_list_count(ptr)); | |
2432 | } | |
2433 | grain++; | |
2434 | } | |
2435 | malloc_printf("\n"); | |
2436 | } | |
2437 | ||
2438 | static void | |
59e0d9fe A |
2439 | print_small_region(szone_t *szone, boolean_t verbose, small_region_t *region, size_t bytes_at_end) |
2440 | { | |
2441 | unsigned counts[1024]; | |
2442 | unsigned in_use = 0; | |
2443 | void *start = SMALL_REGION_ADDRESS(*region); | |
2444 | void *limit = SMALL_REGION_END(*region) - bytes_at_end; | |
2445 | msize_t msize_and_free; | |
2446 | msize_t msize; | |
2447 | unsigned ci; | |
2448 | ||
9385eb3d A |
2449 | memset(counts, 0, 1024 * sizeof(unsigned)); |
2450 | while (start < limit) { | |
59e0d9fe A |
2451 | msize_and_free = *SMALL_METADATA_FOR_PTR(start); |
2452 | msize = msize_and_free & ~ SMALL_IS_FREE; | |
9385eb3d A |
2453 | if (!(msize_and_free & SMALL_IS_FREE)) { |
2454 | // block in use | |
59e0d9fe A |
2455 | if (msize < 1024) |
2456 | counts[msize]++; | |
9385eb3d A |
2457 | in_use++; |
2458 | } | |
59e0d9fe | 2459 | start += SMALL_BYTES_FOR_MSIZE(msize); |
9385eb3d | 2460 | } |
59e0d9fe A |
2461 | malloc_printf("Small region [%p-%p, %y]\tIn_use=%d ", |
2462 | SMALL_REGION_ADDRESS(*region), SMALL_REGION_END(*region), (int)SMALL_REGION_SIZE, in_use); | |
2463 | if (bytes_at_end) | |
2464 | malloc_printf("Untouched=%y ", bytes_at_end); | |
9385eb3d | 2465 | if (verbose && in_use) { |
9385eb3d | 2466 | malloc_printf("\n\tSizes in use: "); |
59e0d9fe A |
2467 | for (ci = 0; ci < 1024; ci++) |
2468 | if (counts[ci]) | |
2469 | malloc_printf("%d[%d] ", SMALL_BYTES_FOR_MSIZE(ci), counts[ci]); | |
9385eb3d A |
2470 | } |
2471 | malloc_printf("\n"); | |
2472 | } | |
2473 | ||
2474 | static boolean_t | |
59e0d9fe A |
2475 | small_free_list_check(szone_t *szone, grain_t grain) |
2476 | { | |
9385eb3d A |
2477 | unsigned count = 0; |
2478 | free_list_t *ptr = szone->small_free_list[grain]; | |
2479 | free_list_t *previous = NULL; | |
59e0d9fe A |
2480 | msize_t msize_and_free; |
2481 | ||
2482 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); | |
9385eb3d | 2483 | while (ptr) { |
59e0d9fe | 2484 | msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); |
9385eb3d A |
2485 | count++; |
2486 | if (!(msize_and_free & SMALL_IS_FREE)) { | |
59e0d9fe | 2487 | malloc_printf("*** in-use ptr in free list grain=%d count=%d ptr=%p\n", grain, count, ptr); |
9385eb3d A |
2488 | return 0; |
2489 | } | |
59e0d9fe A |
2490 | if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) { |
2491 | malloc_printf("*** unaligned ptr in free list grain=%d count=%d ptr=%p\n", grain, count, ptr); | |
9385eb3d A |
2492 | return 0; |
2493 | } | |
2494 | if (!small_region_for_ptr_no_lock(szone, ptr)) { | |
59e0d9fe | 2495 | malloc_printf("*** ptr not in szone grain=%d count=%d ptr=%p\n", grain, count, ptr); |
9385eb3d A |
2496 | return 0; |
2497 | } | |
2498 | free_list_checksum(szone, ptr, __PRETTY_FUNCTION__); | |
2499 | if (ptr->previous != previous) { | |
59e0d9fe | 2500 | malloc_printf("*** previous incorrectly set grain=%d count=%d ptr=%p\n", grain, count, ptr); |
9385eb3d A |
2501 | return 0; |
2502 | } | |
2503 | previous = ptr; | |
2504 | ptr = ptr->next; | |
2505 | } | |
2506 | return 1; | |
2507 | } | |
2508 | ||
e9ce8d39 A |
2509 | /********************* LARGE ENTRY UTILITIES ************************/ |
2510 | ||
2511 | #if DEBUG_MALLOC | |
2512 | ||
9385eb3d | 2513 | static void |
59e0d9fe A |
2514 | large_debug_print(szone_t *szone) |
2515 | { | |
2516 | unsigned num_large_entries = szone->num_large_entries; | |
2517 | unsigned index = num_large_entries; | |
2518 | large_entry_t *range; | |
2519 | ||
2520 | for (index = 0, range = szone->large_entries; index < szone->num_large_entries; index++, range++) | |
2521 | if (!LARGE_ENTRY_IS_EMPTY(*range)) | |
2522 | malloc_printf("%d: %p(%y); ", index, LARGE_ENTRY_ADDRESS(*range), LARGE_ENTRY_SIZE(*range)); | |
2523 | ||
e9ce8d39 A |
2524 | malloc_printf("\n"); |
2525 | } | |
2526 | #endif | |
2527 | ||
59e0d9fe A |
2528 | /* |
2529 | * Scan the hash ring looking for an entry for the given pointer. | |
2530 | */ | |
9385eb3d | 2531 | static large_entry_t * |
59e0d9fe A |
2532 | large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr) |
2533 | { | |
2534 | // result only valid with lock held | |
2535 | unsigned num_large_entries = szone->num_large_entries; | |
2536 | unsigned hash_index; | |
2537 | unsigned index; | |
2538 | large_entry_t *range; | |
2539 | ||
2540 | if (!num_large_entries) | |
2541 | return NULL; | |
2542 | hash_index = ((uintptr_t)ptr >> vm_page_shift) % num_large_entries; | |
e9ce8d39 A |
2543 | index = hash_index; |
2544 | do { | |
59e0d9fe A |
2545 | range = szone->large_entries + index; |
2546 | if (LARGE_ENTRY_MATCHES(*range, ptr)) | |
2547 | return range; | |
2548 | if (LARGE_ENTRY_IS_EMPTY(*range)) | |
2549 | return NULL; // end of chain | |
2550 | index++; | |
2551 | if (index == num_large_entries) | |
2552 | index = 0; | |
e9ce8d39 A |
2553 | } while (index != hash_index); |
2554 | return NULL; | |
2555 | } | |
2556 | ||
9385eb3d | 2557 | static void |
59e0d9fe A |
2558 | large_entry_insert_no_lock(szone_t *szone, large_entry_t range) |
2559 | { | |
2560 | unsigned num_large_entries = szone->num_large_entries; | |
2561 | unsigned hash_index = (range.address_and_num_pages >> vm_page_shift) % num_large_entries; | |
2562 | unsigned index = hash_index; | |
2563 | large_entry_t *entry; | |
2564 | ||
e9ce8d39 | 2565 | do { |
59e0d9fe | 2566 | entry = szone->large_entries + index; |
9385eb3d A |
2567 | if (LARGE_ENTRY_IS_EMPTY(*entry)) { |
2568 | *entry = range; | |
2569 | return; // end of chain | |
2570 | } | |
59e0d9fe A |
2571 | index++; |
2572 | if (index == num_large_entries) | |
2573 | index = 0; | |
e9ce8d39 A |
2574 | } while (index != hash_index); |
2575 | } | |
2576 | ||
9385eb3d | 2577 | static INLINE void |
59e0d9fe A |
2578 | large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry) |
2579 | { | |
2580 | unsigned num_large_entries = szone->num_large_entries; | |
2581 | unsigned hash_index = entry - szone->large_entries; | |
2582 | unsigned index = hash_index; | |
2583 | large_entry_t range; | |
2584 | ||
e9ce8d39 | 2585 | do { |
59e0d9fe A |
2586 | index++; |
2587 | if (index == num_large_entries) | |
2588 | index = 0; | |
9385eb3d | 2589 | range = szone->large_entries[index]; |
59e0d9fe A |
2590 | if (LARGE_ENTRY_IS_EMPTY(range)) |
2591 | return; | |
9385eb3d A |
2592 | szone->large_entries[index].address_and_num_pages = 0; |
2593 | large_entry_insert_no_lock(szone, range); // this will reinsert in the | |
2594 | // proper place | |
e9ce8d39 A |
2595 | } while (index != hash_index); |
2596 | } | |
2597 | ||
9385eb3d | 2598 | static INLINE large_entry_t * |
59e0d9fe A |
2599 | large_entries_alloc_no_lock(szone_t *szone, unsigned num) |
2600 | { | |
e9ce8d39 A |
2601 | size_t size = num * sizeof(large_entry_t); |
2602 | boolean_t is_vm_allocation = size >= LARGE_THRESHOLD; | |
59e0d9fe | 2603 | |
e9ce8d39 | 2604 | if (is_vm_allocation) { |
9385eb3d A |
2605 | // Note that we allocate memory (via a system call) under a spin lock |
2606 | // That is certainly evil, however it's very rare in the lifetime of a process | |
2607 | // The alternative would slow down the normal case | |
2608 | return (void *)allocate_pages(szone, round_page(size), 0, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE)); | |
e9ce8d39 | 2609 | } else { |
59e0d9fe | 2610 | return small_malloc_cleared_no_lock(szone, SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1)); |
e9ce8d39 A |
2611 | } |
2612 | } | |
2613 | ||
9385eb3d | 2614 | static void |
59e0d9fe A |
2615 | large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate) |
2616 | { | |
9385eb3d | 2617 | // returns range to deallocate |
59e0d9fe A |
2618 | size_t size = num * sizeof(large_entry_t); |
2619 | boolean_t is_vm_allocation = size >= LARGE_THRESHOLD; | |
2620 | small_region_t *region; | |
2621 | msize_t msize_and_free; | |
2622 | ||
e9ce8d39 | 2623 | if (is_vm_allocation) { |
9385eb3d A |
2624 | range_to_deallocate->address = (vm_address_t)entries; |
2625 | range_to_deallocate->size = round_page(size); | |
e9ce8d39 | 2626 | } else { |
9385eb3d | 2627 | range_to_deallocate->size = 0; |
59e0d9fe A |
2628 | region = small_region_for_ptr_no_lock(szone, entries); |
2629 | msize_and_free = *SMALL_METADATA_FOR_PTR(entries); | |
9385eb3d | 2630 | if (msize_and_free & SMALL_IS_FREE) { |
59e0d9fe | 2631 | szone_error(szone, "object already freed being freed", entries); |
9385eb3d A |
2632 | return; |
2633 | } | |
2634 | small_free_no_lock(szone, region, entries, msize_and_free); | |
e9ce8d39 A |
2635 | } |
2636 | } | |
2637 | ||
9385eb3d | 2638 | static void |
59e0d9fe A |
2639 | large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate) |
2640 | { | |
9385eb3d | 2641 | // sets range_to_deallocate |
e9ce8d39 A |
2642 | unsigned old_num_entries = szone->num_large_entries; |
2643 | large_entry_t *old_entries = szone->large_entries; | |
59e0d9fe | 2644 | unsigned new_num_entries = (old_num_entries) ? old_num_entries * 2 + 1 : 63; // always an odd number for good hashing |
e9ce8d39 A |
2645 | large_entry_t *new_entries = large_entries_alloc_no_lock(szone, new_num_entries); |
2646 | unsigned index = old_num_entries; | |
59e0d9fe A |
2647 | large_entry_t oldRange; |
2648 | ||
e9ce8d39 A |
2649 | szone->num_large_entries = new_num_entries; |
2650 | szone->large_entries = new_entries; | |
59e0d9fe A |
2651 | |
2652 | /* rehash entries into the new list */ | |
e9ce8d39 | 2653 | while (index--) { |
59e0d9fe | 2654 | oldRange = old_entries[index]; |
9385eb3d A |
2655 | if (!LARGE_ENTRY_IS_EMPTY(oldRange)) { |
2656 | large_entry_insert_no_lock(szone, oldRange); | |
2657 | } | |
2658 | } | |
2659 | if (old_entries) { | |
2660 | large_entries_free_no_lock(szone, old_entries, old_num_entries, range_to_deallocate); | |
2661 | } else { | |
2662 | range_to_deallocate->size = 0; | |
e9ce8d39 | 2663 | } |
e9ce8d39 A |
2664 | } |
2665 | ||
59e0d9fe A |
2666 | // frees the specific entry in the size table |
2667 | // returns a range to truly deallocate | |
9385eb3d | 2668 | static vm_range_t |
59e0d9fe A |
2669 | large_free_no_lock(szone_t *szone, large_entry_t *entry) |
2670 | { | |
e9ce8d39 | 2671 | vm_range_t range; |
59e0d9fe A |
2672 | |
2673 | range.address = (vm_address_t)LARGE_ENTRY_ADDRESS(*entry); | |
2674 | range.size = (vm_size_t)LARGE_ENTRY_SIZE(*entry); | |
2675 | szone->num_large_objects_in_use--; | |
e9ce8d39 A |
2676 | szone->num_bytes_in_large_objects -= range.size; |
2677 | if (szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) { | |
59e0d9fe A |
2678 | protect(szone, (void *)range.address, range.size, VM_PROT_READ | VM_PROT_WRITE, szone->debug_flags); |
2679 | range.address -= vm_page_size; | |
2680 | range.size += 2 * vm_page_size; | |
2681 | } | |
e9ce8d39 A |
2682 | entry->address_and_num_pages = 0; |
2683 | large_entries_rehash_after_entry_no_lock(szone, entry); | |
2684 | #if DEBUG_MALLOC | |
2685 | if (large_entry_for_pointer_no_lock(szone, (void *)range.address)) { | |
59e0d9fe A |
2686 | malloc_printf("*** freed entry %p still in use; num_large_entries=%d\n", |
2687 | range.address, szone->num_large_entries); | |
9385eb3d A |
2688 | large_debug_print(szone); |
2689 | szone_sleep(); | |
e9ce8d39 A |
2690 | } |
2691 | #endif | |
3b2a1fe8 A |
2692 | return range; |
2693 | } | |
2694 | ||
9385eb3d | 2695 | static kern_return_t |
59e0d9fe A |
2696 | large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder) |
2697 | { | |
e9ce8d39 A |
2698 | unsigned index = 0; |
2699 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
2700 | unsigned count = 0; | |
2701 | large_entry_t *entries; | |
2702 | kern_return_t err; | |
59e0d9fe A |
2703 | vm_range_t range; |
2704 | large_entry_t entry; | |
2705 | ||
2706 | err = reader(task, large_entries_address, sizeof(large_entry_t) * num_entries, (void **)&entries); | |
2707 | if (err) | |
2708 | return err; | |
e9ce8d39 | 2709 | index = num_entries; |
59e0d9fe A |
2710 | if ((type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) && |
2711 | (num_entries * sizeof(large_entry_t) >= LARGE_THRESHOLD)) { | |
9385eb3d A |
2712 | range.address = large_entries_address; |
2713 | range.size = round_page(num_entries * sizeof(large_entry_t)); | |
2714 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &range, 1); | |
2715 | } | |
59e0d9fe A |
2716 | if (type_mask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE)) |
2717 | while (index--) { | |
2718 | entry = entries[index]; | |
2719 | if (!LARGE_ENTRY_IS_EMPTY(entry)) { | |
2720 | range.address = (vm_address_t)LARGE_ENTRY_ADDRESS(entry); | |
2721 | range.size = (vm_size_t)LARGE_ENTRY_SIZE(entry); | |
2722 | buffer[count++] = range; | |
2723 | if (count >= MAX_RECORDER_BUFFER) { | |
2724 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, buffer, count); | |
2725 | count = 0; | |
2726 | } | |
9385eb3d A |
2727 | } |
2728 | } | |
9385eb3d A |
2729 | if (count) { |
2730 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | |
2731 | | MALLOC_PTR_REGION_RANGE_TYPE, buffer, count); | |
2732 | } | |
e9ce8d39 A |
2733 | return 0; |
2734 | } | |
2735 | ||
2736 | /********************* HUGE ENTRY UTILITIES ************************/ | |
2737 | ||
9385eb3d | 2738 | static huge_entry_t * |
59e0d9fe A |
2739 | huge_entry_for_pointer_no_lock(szone_t *szone, const void *ptr) |
2740 | { | |
2741 | unsigned index; | |
2742 | huge_entry_t *huge; | |
2743 | ||
2744 | for (index = szone->num_huge_entries, huge = szone->huge_entries; | |
2745 | index > 0; | |
2746 | index--, huge++) { | |
2747 | ||
2748 | if ((void *)huge->address == ptr) | |
2749 | return huge; | |
e9ce8d39 A |
2750 | } |
2751 | return NULL; | |
2752 | } | |
2753 | ||
9385eb3d | 2754 | static boolean_t |
59e0d9fe A |
2755 | huge_entry_append(szone_t *szone, huge_entry_t huge) |
2756 | { | |
2757 | huge_entry_t *new_huge_entries = NULL, *old_huge_entries; | |
2758 | unsigned num_huge_entries; | |
2759 | ||
9385eb3d A |
2760 | // We do a little dance with locking because doing allocation (even in the |
2761 | // default szone) may cause something to get freed in this szone, with a | |
2762 | // deadlock | |
2763 | // Returns 1 on success | |
e9ce8d39 | 2764 | SZONE_LOCK(szone); |
59e0d9fe | 2765 | for (;;) { |
9385eb3d A |
2766 | num_huge_entries = szone->num_huge_entries; |
2767 | SZONE_UNLOCK(szone); | |
59e0d9fe A |
2768 | /* check for counter wrap */ |
2769 | if ((num_huge_entries + 1) < num_huge_entries) | |
2770 | return 0; | |
2771 | /* stale allocation from last time around the loop? */ | |
2772 | if (new_huge_entries) | |
2773 | szone_free(szone, new_huge_entries); | |
9385eb3d | 2774 | new_huge_entries = szone_malloc(szone, (num_huge_entries + 1) * sizeof(huge_entry_t)); |
59e0d9fe A |
2775 | if (new_huge_entries == NULL) |
2776 | return 0; | |
9385eb3d A |
2777 | SZONE_LOCK(szone); |
2778 | if (num_huge_entries == szone->num_huge_entries) { | |
2779 | // No change - our malloc still applies | |
59e0d9fe | 2780 | old_huge_entries = szone->huge_entries; |
9385eb3d A |
2781 | if (num_huge_entries) { |
2782 | memcpy(new_huge_entries, old_huge_entries, num_huge_entries * sizeof(huge_entry_t)); | |
2783 | } | |
2784 | new_huge_entries[szone->num_huge_entries++] = huge; | |
2785 | szone->huge_entries = new_huge_entries; | |
2786 | SZONE_UNLOCK(szone); | |
2787 | szone_free(szone, old_huge_entries); | |
5b2abdfb | 2788 | return 1; |
9385eb3d A |
2789 | } |
2790 | // try again! | |
2791 | } | |
2792 | } | |
2793 | ||
2794 | static kern_return_t | |
59e0d9fe A |
2795 | huge_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t huge_entries_address, unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder) |
2796 | { | |
e9ce8d39 A |
2797 | huge_entry_t *entries; |
2798 | kern_return_t err; | |
59e0d9fe A |
2799 | |
2800 | err = reader(task, huge_entries_address, sizeof(huge_entry_t) * num_entries, (void **)&entries); | |
2801 | if (err) | |
2802 | return err; | |
2803 | if (num_entries) | |
2804 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, entries, num_entries); | |
2805 | ||
e9ce8d39 A |
2806 | return 0; |
2807 | } | |
2808 | ||
9385eb3d | 2809 | static void * |
59e0d9fe A |
2810 | large_and_huge_malloc(szone_t *szone, unsigned num_pages) |
2811 | { | |
2812 | void *addr; | |
9385eb3d | 2813 | vm_range_t range_to_deallocate; |
59e0d9fe A |
2814 | huge_entry_t huge_entry; |
2815 | size_t size; | |
2816 | large_entry_t large_entry; | |
2817 | ||
2818 | if (!num_pages) | |
2819 | num_pages = 1; // minimal allocation size for this szone | |
2820 | size = (size_t)num_pages << vm_page_shift; | |
9385eb3d | 2821 | range_to_deallocate.size = 0; |
e9ce8d39 | 2822 | if (num_pages >= (1 << vm_page_shift)) { |
59e0d9fe A |
2823 | addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MAKE_TAG(VM_MEMORY_MALLOC_HUGE)); |
2824 | if (addr == NULL) | |
2825 | return NULL; | |
2826 | huge_entry.size = size; | |
2827 | huge_entry.address = (vm_address_t)addr; | |
2828 | if (!huge_entry_append(szone, huge_entry)) | |
2829 | return NULL; // we are leaking the allocation here | |
9385eb3d | 2830 | SZONE_LOCK(szone); |
59e0d9fe | 2831 | szone->num_bytes_in_huge_objects += size; |
e9ce8d39 | 2832 | } else { |
59e0d9fe | 2833 | |
9385eb3d A |
2834 | addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MAKE_TAG(VM_MEMORY_MALLOC_LARGE)); |
2835 | #if DEBUG_MALLOC | |
59e0d9fe A |
2836 | if (LOG(szone, addr)) |
2837 | malloc_printf("in szone_malloc true large allocation at %p for %y\n", (void *)addr, size); | |
9385eb3d | 2838 | #endif |
5b2abdfb | 2839 | SZONE_LOCK(szone); |
59e0d9fe | 2840 | if (addr == NULL) { |
5b2abdfb A |
2841 | SZONE_UNLOCK(szone); |
2842 | return NULL; | |
e9ce8d39 | 2843 | } |
e9ce8d39 | 2844 | #if DEBUG_MALLOC |
59e0d9fe A |
2845 | if (large_entry_for_pointer_no_lock(szone, addr)) { |
2846 | malloc_printf("freshly allocated is already in use: %p\n", addr); | |
5b2abdfb | 2847 | large_debug_print(szone); |
9385eb3d | 2848 | szone_sleep(); |
5b2abdfb | 2849 | } |
e9ce8d39 | 2850 | #endif |
9385eb3d A |
2851 | if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) { |
2852 | // density of hash table too high; grow table | |
2853 | // we do that under lock to avoid a race | |
9385eb3d A |
2854 | large_entries_grow_no_lock(szone, &range_to_deallocate); |
2855 | } | |
59e0d9fe | 2856 | large_entry.address_and_num_pages = (uintptr_t)addr | num_pages; |
e9ce8d39 | 2857 | #if DEBUG_MALLOC |
59e0d9fe A |
2858 | if (large_entry_for_pointer_no_lock(szone, addr)) { |
2859 | malloc_printf("entry about to be added already in use: %p\n", addr); | |
9385eb3d A |
2860 | large_debug_print(szone); |
2861 | szone_sleep(); | |
2862 | } | |
e9ce8d39 | 2863 | #endif |
59e0d9fe | 2864 | large_entry_insert_no_lock(szone, large_entry); |
e9ce8d39 | 2865 | #if DEBUG_MALLOC |
9385eb3d | 2866 | if (!large_entry_for_pointer_no_lock(szone, (void *)addr)) { |
59e0d9fe | 2867 | malloc_printf("can't find entry just added\n"); |
9385eb3d A |
2868 | large_debug_print(szone); |
2869 | szone_sleep(); | |
2870 | } | |
e9ce8d39 | 2871 | #endif |
9385eb3d A |
2872 | szone->num_large_objects_in_use ++; |
2873 | szone->num_bytes_in_large_objects += size; | |
e9ce8d39 A |
2874 | } |
2875 | SZONE_UNLOCK(szone); | |
9385eb3d | 2876 | if (range_to_deallocate.size) { |
59e0d9fe | 2877 | deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0); // we deallocate outside the lock |
9385eb3d | 2878 | } |
e9ce8d39 A |
2879 | return (void *)addr; |
2880 | } | |
2881 | ||
9385eb3d | 2882 | static INLINE void |
59e0d9fe A |
2883 | free_large_or_huge(szone_t *szone, void *ptr) |
2884 | { | |
9385eb3d | 2885 | // We have established ptr is page-aligned and not tiny nor small |
e9ce8d39 A |
2886 | large_entry_t *entry; |
2887 | vm_range_t vm_range_to_deallocate; | |
2888 | huge_entry_t *huge; | |
59e0d9fe | 2889 | |
e9ce8d39 A |
2890 | SZONE_LOCK(szone); |
2891 | entry = large_entry_for_pointer_no_lock(szone, ptr); | |
2892 | if (entry) { | |
9385eb3d | 2893 | vm_range_to_deallocate = large_free_no_lock(szone, entry); |
e9ce8d39 | 2894 | #if DEBUG_MALLOC |
9385eb3d | 2895 | if (large_entry_for_pointer_no_lock(szone, ptr)) { |
59e0d9fe | 2896 | malloc_printf("*** just after freeing %p still in use num_large_entries=%d\n", ptr, szone->num_large_entries); |
9385eb3d A |
2897 | large_debug_print(szone); |
2898 | szone_sleep(); | |
2899 | } | |
e9ce8d39 A |
2900 | #endif |
2901 | } else if ((huge = huge_entry_for_pointer_no_lock(szone, ptr))) { | |
9385eb3d A |
2902 | vm_range_to_deallocate = *huge; |
2903 | *huge = szone->huge_entries[--szone->num_huge_entries]; // last entry fills that spot | |
59e0d9fe | 2904 | szone->num_bytes_in_huge_objects -= (size_t)vm_range_to_deallocate.size; |
e9ce8d39 A |
2905 | } else { |
2906 | #if DEBUG_MALLOC | |
9385eb3d | 2907 | large_debug_print(szone); |
e9ce8d39 | 2908 | #endif |
59e0d9fe | 2909 | szone_error(szone, "pointer being freed was not allocated", ptr); |
9385eb3d | 2910 | return; |
e9ce8d39 | 2911 | } |
e9ce8d39 | 2912 | SZONE_UNLOCK(szone); // we release the lock asap |
9385eb3d | 2913 | CHECK(szone, __PRETTY_FUNCTION__); |
e9ce8d39 A |
2914 | // we deallocate_pages, including guard pages |
2915 | if (vm_range_to_deallocate.address) { | |
9385eb3d | 2916 | #if DEBUG_MALLOC |
59e0d9fe A |
2917 | if (large_entry_for_pointer_no_lock(szone, (void *)vm_range_to_deallocate.address)) { |
2918 | malloc_printf("*** invariant broken: %p still in use num_large_entries=%d\n", vm_range_to_deallocate.address, szone->num_large_entries); | |
9385eb3d A |
2919 | large_debug_print(szone); |
2920 | szone_sleep(); | |
2921 | } | |
2922 | #endif | |
59e0d9fe | 2923 | deallocate_pages(szone, (void *)vm_range_to_deallocate.address, (size_t)vm_range_to_deallocate.size, 0); |
9385eb3d A |
2924 | } |
2925 | } | |
2926 | ||
2927 | static INLINE int | |
59e0d9fe A |
2928 | try_realloc_large_or_huge_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) |
2929 | { | |
9385eb3d | 2930 | vm_address_t addr = (vm_address_t)ptr + old_size; |
59e0d9fe A |
2931 | large_entry_t *large_entry, saved_entry; |
2932 | huge_entry_t *huge_entry, huge; | |
9385eb3d | 2933 | kern_return_t err; |
59e0d9fe | 2934 | |
e9ce8d39 | 2935 | #if DEBUG_MALLOC |
9385eb3d A |
2936 | if (old_size != ((old_size >> vm_page_shift) << vm_page_shift)) { |
2937 | malloc_printf("*** old_size is %d\n", old_size); | |
2938 | } | |
e9ce8d39 | 2939 | #endif |
9385eb3d | 2940 | SZONE_LOCK(szone); |
59e0d9fe | 2941 | large_entry = large_entry_for_pointer_no_lock(szone, (void *)addr); |
9385eb3d | 2942 | SZONE_UNLOCK(szone); |
59e0d9fe A |
2943 | if (large_entry) { |
2944 | return 0; // large pointer already exists in table - extension is not going to work | |
9385eb3d A |
2945 | } |
2946 | new_size = round_page(new_size); | |
9385eb3d A |
2947 | /* |
2948 | * Ask for allocation at a specific address, and mark as realloc | |
2949 | * to request coalescing with previous realloc'ed extensions. | |
2950 | */ | |
2951 | err = vm_allocate(mach_task_self(), &addr, new_size - old_size, VM_MAKE_TAG(VM_MEMORY_REALLOC)); | |
2952 | if (err != KERN_SUCCESS) { | |
2953 | return 0; | |
2954 | } | |
2955 | SZONE_LOCK(szone); | |
2956 | /* | |
2957 | * If the new size is still under the large/huge threshold, we can just | |
2958 | * extend the existing large block. | |
2959 | * | |
2960 | * Note: this logic is predicated on the understanding that an allocated | |
2961 | * block can never really shrink, so that the new size will always be | |
2962 | * larger than the old size. | |
59e0d9fe A |
2963 | * |
2964 | * Note: the use of 1 << vm_page_shift here has to do with the subdivision | |
2965 | * of the bits in the large_entry_t, and not the size of a page (directly). | |
9385eb3d A |
2966 | */ |
2967 | if ((new_size >> vm_page_shift) < (1 << vm_page_shift)) { | |
2968 | /* extend existing large entry */ | |
59e0d9fe A |
2969 | large_entry = large_entry_for_pointer_no_lock(szone, ptr); |
2970 | if (!large_entry) { | |
9385eb3d A |
2971 | szone_error(szone, "large entry reallocated is not properly in table", ptr); |
2972 | /* XXX will cause fault on next reference to entry */ | |
2973 | } | |
59e0d9fe | 2974 | large_entry->address_and_num_pages = (uintptr_t)ptr | (new_size >> vm_page_shift); |
9385eb3d A |
2975 | szone->num_bytes_in_large_objects += new_size - old_size; |
2976 | } else if ((old_size >> vm_page_shift) >= (1 << vm_page_shift)) { | |
2977 | /* extend existing huge entry */ | |
59e0d9fe | 2978 | huge_entry = huge_entry_for_pointer_no_lock(szone, ptr); |
9385eb3d A |
2979 | if (!huge_entry) { |
2980 | szone_error(szone, "huge entry reallocated is not properly in table", ptr); | |
2981 | /* XXX will cause fault on next reference to huge_entry */ | |
2982 | } | |
2983 | huge_entry->size = new_size; | |
2984 | szone->num_bytes_in_huge_objects += new_size - old_size; | |
2985 | } else { | |
2986 | /* need to convert large entry to huge entry */ | |
9385eb3d A |
2987 | |
2988 | /* release large entry, note we still have the VM allocation */ | |
59e0d9fe A |
2989 | large_entry = large_entry_for_pointer_no_lock(szone, ptr); |
2990 | saved_entry = *large_entry; // in case we need to put it back | |
2991 | large_free_no_lock(szone, large_entry); | |
9385eb3d A |
2992 | szone->num_bytes_in_large_objects -= old_size; |
2993 | ||
2994 | /* and get a huge entry */ | |
2995 | huge.address = (vm_address_t)ptr; | |
2996 | huge.size = new_size; /* fix up size */ | |
2997 | SZONE_UNLOCK(szone); | |
2998 | if (huge_entry_append(szone, huge)) { | |
2999 | szone->num_bytes_in_huge_objects += new_size; | |
3000 | return 1; // success! | |
3001 | } | |
3002 | SZONE_LOCK(szone); | |
3003 | // we leak memory (the extra space appended) but data structures are correct | |
3004 | large_entry_insert_no_lock(szone, saved_entry); // this will reinsert the large entry | |
e9ce8d39 | 3005 | } |
9385eb3d A |
3006 | SZONE_UNLOCK(szone); // we release the lock asap |
3007 | return 1; | |
e9ce8d39 A |
3008 | } |
3009 | ||
9385eb3d A |
3010 | /********************* Zone call backs ************************/ |
3011 | ||
3012 | static void | |
59e0d9fe A |
3013 | szone_free(szone_t *szone, void *ptr) |
3014 | { | |
3015 | tiny_region_t *tiny_region; | |
3016 | small_region_t *small_region; | |
3017 | ||
e9ce8d39 | 3018 | #if DEBUG_MALLOC |
59e0d9fe A |
3019 | if (LOG(szone, ptr)) |
3020 | malloc_printf("in szone_free with %p\n", ptr); | |
e9ce8d39 | 3021 | #endif |
59e0d9fe A |
3022 | if (!ptr) |
3023 | return; | |
3024 | /* | |
3025 | * Try to free to a tiny region. | |
3026 | */ | |
3027 | if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) { | |
9385eb3d A |
3028 | szone_error(szone, "Non-aligned pointer being freed", ptr); |
3029 | return; | |
3030 | } | |
59e0d9fe | 3031 | if ((tiny_region = tiny_region_for_ptr_no_lock(szone, ptr)) != NULL) { |
9385eb3d A |
3032 | free_tiny(szone, ptr, tiny_region); |
3033 | return; | |
3034 | } | |
59e0d9fe A |
3035 | |
3036 | /* | |
3037 | * Try to free to a small region. | |
3038 | */ | |
3039 | if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) { | |
9385eb3d A |
3040 | szone_error(szone, "Non-aligned pointer being freed (2)", ptr); |
3041 | return; | |
3042 | } | |
59e0d9fe | 3043 | if ((small_region = small_region_for_ptr_no_lock(szone, ptr)) != NULL) { |
9385eb3d A |
3044 | free_small(szone, ptr, small_region); |
3045 | return; | |
3046 | } | |
59e0d9fe A |
3047 | |
3048 | /* check that it's a legal large/huge allocation */ | |
3049 | if ((uintptr_t)ptr & (vm_page_size - 1)) { | |
3050 | szone_error(szone, "non-page-aligned, non-allocated pointer being freed", ptr); | |
9385eb3d A |
3051 | return; |
3052 | } | |
3053 | free_large_or_huge(szone, ptr); | |
3054 | } | |
3055 | ||
3056 | static INLINE void * | |
59e0d9fe A |
3057 | szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested) |
3058 | { | |
9385eb3d | 3059 | void *ptr; |
59e0d9fe A |
3060 | msize_t msize; |
3061 | unsigned num_pages; | |
3062 | ||
9385eb3d A |
3063 | if (size <= 31*TINY_QUANTUM) { |
3064 | // think tiny | |
59e0d9fe A |
3065 | msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); |
3066 | if (!msize) | |
3067 | msize = 1; | |
9385eb3d A |
3068 | ptr = tiny_malloc_should_clear(szone, msize, cleared_requested); |
3069 | } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (size < LARGE_THRESHOLD)) { | |
3070 | // think small | |
59e0d9fe | 3071 | msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); |
9385eb3d A |
3072 | if (! msize) msize = 1; |
3073 | ptr = small_malloc_should_clear(szone, msize, cleared_requested); | |
e9ce8d39 | 3074 | } else { |
59e0d9fe | 3075 | // large or huge |
9385eb3d A |
3076 | num_pages = round_page(size) >> vm_page_shift; |
3077 | ptr = large_and_huge_malloc(szone, num_pages); | |
e9ce8d39 | 3078 | } |
9385eb3d | 3079 | #if DEBUG_MALLOC |
59e0d9fe A |
3080 | if (LOG(szone, ptr)) |
3081 | malloc_printf("szone_malloc returned %p\n", ptr); | |
9385eb3d | 3082 | #endif |
59e0d9fe A |
3083 | /* |
3084 | * If requested, scribble on allocated memory. | |
3085 | */ | |
3086 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && ptr && !cleared_requested && size) | |
3087 | memset(ptr, 0xaa, size); | |
3088 | ||
e9ce8d39 A |
3089 | return ptr; |
3090 | } | |
3091 | ||
9385eb3d A |
3092 | static void * |
3093 | szone_malloc(szone_t *szone, size_t size) { | |
59e0d9fe | 3094 | return szone_malloc_should_clear(szone, size, 0); |
e9ce8d39 A |
3095 | } |
3096 | ||
9385eb3d | 3097 | static void * |
59e0d9fe A |
3098 | szone_calloc(szone_t *szone, size_t num_items, size_t size) |
3099 | { | |
3100 | return szone_malloc_should_clear(szone, num_items * size, 1); | |
e9ce8d39 A |
3101 | } |
3102 | ||
9385eb3d | 3103 | static void * |
59e0d9fe A |
3104 | szone_valloc(szone_t *szone, size_t size) |
3105 | { | |
e9ce8d39 A |
3106 | void *ptr; |
3107 | unsigned num_pages; | |
59e0d9fe | 3108 | |
e9ce8d39 | 3109 | num_pages = round_page(size) >> vm_page_shift; |
5b2abdfb | 3110 | ptr = large_and_huge_malloc(szone, num_pages); |
9385eb3d | 3111 | #if DEBUG_MALLOC |
59e0d9fe A |
3112 | if (LOG(szone, ptr)) |
3113 | malloc_printf("szone_valloc returned %p\n", ptr); | |
9385eb3d | 3114 | #endif |
e9ce8d39 A |
3115 | return ptr; |
3116 | } | |
3117 | ||
9385eb3d | 3118 | static size_t |
59e0d9fe A |
3119 | szone_size(szone_t *szone, const void *ptr) |
3120 | { | |
e9ce8d39 | 3121 | size_t size = 0; |
59e0d9fe A |
3122 | boolean_t is_free; |
3123 | msize_t msize, msize_and_free; | |
e9ce8d39 A |
3124 | large_entry_t *entry; |
3125 | huge_entry_t *huge; | |
59e0d9fe A |
3126 | |
3127 | if (!ptr) | |
3128 | return 0; | |
9385eb3d A |
3129 | #if DEBUG_MALLOC |
3130 | if (LOG(szone, ptr)) { | |
59e0d9fe A |
3131 | malloc_printf("in szone_size for %p (szone=%p)\n", ptr, szone); |
3132 | } | |
3133 | #endif | |
3134 | ||
3135 | /* | |
3136 | * Look for it in a tiny region. | |
3137 | */ | |
3138 | if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) | |
3139 | return 0; | |
3140 | if (tiny_region_for_ptr_no_lock(szone, ptr)) { | |
3141 | msize = get_tiny_meta_header(ptr, &is_free); | |
3142 | return (is_free) ? 0 : TINY_BYTES_FOR_MSIZE(msize); | |
3143 | } | |
3144 | ||
3145 | /* | |
3146 | * Look for it in a small region. | |
3147 | */ | |
3148 | if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) | |
9385eb3d | 3149 | return 0; |
59e0d9fe A |
3150 | if (small_region_for_ptr_no_lock(szone, ptr)) { |
3151 | msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); | |
3152 | return (msize_and_free & SMALL_IS_FREE) ? 0 : SMALL_BYTES_FOR_MSIZE(msize_and_free); | |
e9ce8d39 | 3153 | } |
59e0d9fe A |
3154 | |
3155 | /* | |
3156 | * If not page-aligned, it cannot have come from a large or huge allocation. | |
3157 | */ | |
3158 | if ((uintptr_t)ptr & (vm_page_size - 1)) | |
3159 | return(0); | |
3160 | ||
3161 | /* | |
3162 | * Look for it in a large or huge entry. | |
3163 | */ | |
e9ce8d39 A |
3164 | SZONE_LOCK(szone); |
3165 | entry = large_entry_for_pointer_no_lock(szone, ptr); | |
3166 | if (entry) { | |
9385eb3d | 3167 | size = LARGE_ENTRY_SIZE(*entry); |
e9ce8d39 | 3168 | } else if ((huge = huge_entry_for_pointer_no_lock(szone, ptr))) { |
9385eb3d | 3169 | size = huge->size; |
e9ce8d39 A |
3170 | } |
3171 | SZONE_UNLOCK(szone); | |
9385eb3d A |
3172 | #if DEBUG_MALLOC |
3173 | if (LOG(szone, ptr)) { | |
3174 | malloc_printf("szone_size for %p returned %d\n", ptr, (unsigned)size); | |
3175 | } | |
3176 | #endif | |
e9ce8d39 A |
3177 | return size; |
3178 | } | |
3179 | ||
9385eb3d | 3180 | static void * |
59e0d9fe A |
3181 | szone_realloc(szone_t *szone, void *ptr, size_t new_size) |
3182 | { | |
3183 | size_t old_size; | |
9385eb3d | 3184 | void *new_ptr; |
59e0d9fe | 3185 | |
e9ce8d39 | 3186 | #if DEBUG_MALLOC |
9385eb3d | 3187 | if (LOG(szone, ptr)) { |
59e0d9fe | 3188 | malloc_printf("in szone_realloc for %p, %d\n", ptr, (unsigned)new_size); |
e9ce8d39 A |
3189 | } |
3190 | #endif | |
9385eb3d A |
3191 | if (!ptr) { |
3192 | ptr = szone_malloc(szone, new_size); | |
9385eb3d A |
3193 | return ptr; |
3194 | } | |
3195 | old_size = szone_size(szone, ptr); | |
3196 | if (!old_size) { | |
59e0d9fe | 3197 | szone_error(szone, "pointer being reallocated was not allocated", ptr); |
9385eb3d | 3198 | return NULL; |
e9ce8d39 | 3199 | } |
9385eb3d | 3200 | /* we never shrink an allocation */ |
59e0d9fe A |
3201 | if (old_size >= new_size) |
3202 | return ptr; | |
3203 | ||
3204 | /* | |
3205 | * If the old and new sizes both suit the tiny allocator, try to reallocate in-place. | |
3206 | */ | |
9385eb3d | 3207 | if ((new_size + TINY_QUANTUM - 1) <= 31 * TINY_QUANTUM) { |
9385eb3d | 3208 | if (try_realloc_tiny_in_place(szone, ptr, old_size, new_size)) { |
9385eb3d A |
3209 | return ptr; |
3210 | } | |
59e0d9fe A |
3211 | |
3212 | /* | |
3213 | * If the old and new sizes both suit the small allocator, and we're not protecting the | |
3214 | * small allocations, try to reallocate in-place. | |
3215 | */ | |
3216 | } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && | |
3217 | ((new_size + SMALL_QUANTUM - 1) < LARGE_THRESHOLD) && | |
3218 | (old_size > 31 * TINY_QUANTUM)) { | |
9385eb3d | 3219 | if (try_realloc_small_in_place(szone, ptr, old_size, new_size)) { |
9385eb3d A |
3220 | return ptr; |
3221 | } | |
59e0d9fe A |
3222 | |
3223 | /* | |
3224 | * If the allocation's a large or huge allocation, try to reallocate in-place there. | |
3225 | */ | |
9385eb3d A |
3226 | } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (old_size > LARGE_THRESHOLD)) { |
3227 | if (try_realloc_large_or_huge_in_place(szone, ptr, old_size, new_size)) { | |
3228 | return ptr; | |
3229 | } | |
3230 | } | |
59e0d9fe A |
3231 | |
3232 | /* | |
3233 | * Can't reallocate in place for whatever reason; allocate a new buffer and copy. | |
3234 | */ | |
9385eb3d | 3235 | new_ptr = szone_malloc(szone, new_size); |
59e0d9fe A |
3236 | if (new_ptr == NULL) |
3237 | return NULL; | |
3238 | ||
3239 | /* | |
3240 | * If the allocation's large enough, try to copy using VM. If that fails, or | |
3241 | * if it's too small, just copy by hand. | |
3242 | */ | |
3243 | if ((old_size < VM_COPY_THRESHOLD) || | |
3244 | vm_copy(mach_task_self(), (vm_address_t)ptr, old_size, (vm_address_t)new_ptr)) | |
9385eb3d | 3245 | memcpy(new_ptr, ptr, old_size); |
9385eb3d | 3246 | szone_free(szone, ptr); |
59e0d9fe | 3247 | |
e9ce8d39 | 3248 | #if DEBUG_MALLOC |
9385eb3d A |
3249 | if (LOG(szone, ptr)) { |
3250 | malloc_printf("szone_realloc returned %p for %d\n", new_ptr, (unsigned)new_size); | |
e9ce8d39 | 3251 | } |
e9ce8d39 | 3252 | #endif |
9385eb3d | 3253 | return new_ptr; |
e9ce8d39 A |
3254 | } |
3255 | ||
59e0d9fe A |
3256 | // given a size, returns pointers capable of holding that size |
3257 | // returns the number of pointers allocated | |
3258 | // may return 0 - this function will do best attempts, but just that | |
3259 | static unsigned | |
3260 | szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count) | |
3261 | { | |
3262 | msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); | |
3263 | size_t chunk_size = TINY_BYTES_FOR_MSIZE(msize); | |
3264 | free_list_t **free_list = szone->tiny_free_list + msize - 1; | |
3265 | free_list_t *ptr = *free_list; | |
3266 | unsigned found = 0; | |
3267 | ||
3268 | if (size > 31*TINY_QUANTUM) | |
3269 | return 0; // only bother implementing this for tiny | |
3270 | if (!msize) | |
3271 | msize = 1; | |
9385eb3d A |
3272 | CHECK(szone, __PRETTY_FUNCTION__); |
3273 | SZONE_LOCK(szone); // might as well lock right here to avoid concurrency issues | |
9385eb3d | 3274 | while (found < count) { |
59e0d9fe A |
3275 | if (!ptr) |
3276 | break; | |
3277 | *results++ = ptr; | |
3278 | found++; | |
9385eb3d | 3279 | set_tiny_meta_header_in_use(ptr, msize); |
59e0d9fe | 3280 | ptr = ptr->next; |
9385eb3d A |
3281 | } |
3282 | if (ptr) { | |
59e0d9fe A |
3283 | ptr->previous = NULL; |
3284 | free_list_set_checksum(szone, ptr); | |
9385eb3d | 3285 | } |
59e0d9fe A |
3286 | *free_list = ptr; |
3287 | ||
9385eb3d A |
3288 | // Note that we could allocate from the free lists for larger msize |
3289 | // But that may un-necessarily fragment - so we might as well let the client do that | |
3290 | // We could also allocate from szone->tiny_bytes_free_at_end | |
3291 | // But that means we'll "eat-up" the untouched area faster, increasing the working set | |
3292 | // So we just return what we have and just that | |
3293 | szone->num_tiny_objects += found; | |
3294 | szone->num_bytes_in_tiny_objects += chunk_size * found; | |
5b2abdfb | 3295 | SZONE_UNLOCK(szone); |
9385eb3d | 3296 | return found; |
5b2abdfb A |
3297 | } |
3298 | ||
59e0d9fe A |
3299 | static void |
3300 | szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count) | |
3301 | { | |
3302 | unsigned cc = 0; | |
3303 | void *ptr; | |
3304 | tiny_region_t *tiny_region; | |
3305 | boolean_t is_free; | |
3306 | msize_t msize; | |
3307 | ||
9385eb3d A |
3308 | // frees all the pointers in to_be_freed |
3309 | // note that to_be_freed may be overwritten during the process | |
59e0d9fe A |
3310 | if (!count) |
3311 | return; | |
9385eb3d A |
3312 | CHECK(szone, __PRETTY_FUNCTION__); |
3313 | SZONE_LOCK(szone); | |
3314 | while (cc < count) { | |
59e0d9fe A |
3315 | ptr = to_be_freed[cc]; |
3316 | /* XXX this really slows us down */ | |
3317 | tiny_region = tiny_region_for_ptr_no_lock(szone, ptr); | |
9385eb3d A |
3318 | if (tiny_region) { |
3319 | // this is a tiny pointer | |
59e0d9fe A |
3320 | msize = get_tiny_meta_header(ptr, &is_free); |
3321 | if (is_free) | |
3322 | break; // a double free; let the standard free deal with it | |
9385eb3d A |
3323 | tiny_free_no_lock(szone, tiny_region, ptr, msize); |
3324 | to_be_freed[cc] = NULL; | |
3325 | } | |
3326 | cc++; | |
e9ce8d39 | 3327 | } |
9385eb3d A |
3328 | SZONE_UNLOCK(szone); |
3329 | CHECK(szone, __PRETTY_FUNCTION__); | |
3330 | while (count--) { | |
59e0d9fe A |
3331 | ptr = to_be_freed[count]; |
3332 | if (ptr) | |
3333 | szone_free(szone, ptr); | |
e9ce8d39 | 3334 | } |
e9ce8d39 A |
3335 | } |
3336 | ||
9385eb3d | 3337 | static void |
59e0d9fe A |
3338 | szone_destroy(szone_t *szone) |
3339 | { | |
3340 | unsigned index; | |
9385eb3d | 3341 | small_region_t pended_region = 0; |
59e0d9fe A |
3342 | large_entry_t *large; |
3343 | vm_range_t range_to_deallocate; | |
3344 | huge_entry_t *huge; | |
3345 | tiny_region_t tiny_region; | |
3346 | small_region_t small_region; | |
3347 | ||
3348 | /* destroy large entries */ | |
e9ce8d39 A |
3349 | index = szone->num_large_entries; |
3350 | while (index--) { | |
59e0d9fe A |
3351 | large = szone->large_entries + index; |
3352 | if (!LARGE_ENTRY_IS_EMPTY(*large)) { | |
9385eb3d | 3353 | // we deallocate_pages, including guard pages |
59e0d9fe | 3354 | deallocate_pages(szone, (void *)LARGE_ENTRY_ADDRESS(*large), LARGE_ENTRY_SIZE(*large), szone->debug_flags); |
9385eb3d A |
3355 | } |
3356 | } | |
3357 | if (szone->num_large_entries * sizeof(large_entry_t) >= LARGE_THRESHOLD) { | |
59e0d9fe A |
3358 | // we do not free in the small chunk case |
3359 | large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate); | |
3360 | if (range_to_deallocate.size) | |
3361 | deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0); | |
9385eb3d | 3362 | } |
59e0d9fe A |
3363 | |
3364 | /* destroy huge entries */ | |
e9ce8d39 A |
3365 | index = szone->num_huge_entries; |
3366 | while (index--) { | |
59e0d9fe A |
3367 | huge = szone->huge_entries + index; |
3368 | deallocate_pages(szone, (void *)huge->address, huge->size, szone->debug_flags); | |
9385eb3d | 3369 | } |
59e0d9fe A |
3370 | |
3371 | /* destroy tiny regions */ | |
9385eb3d A |
3372 | index = szone->num_tiny_regions; |
3373 | while (index--) { | |
59e0d9fe A |
3374 | tiny_region = szone->tiny_regions[index]; |
3375 | deallocate_pages(szone, TINY_REGION_ADDRESS(tiny_region), TINY_REGION_SIZE, 0); | |
e9ce8d39 | 3376 | } |
59e0d9fe | 3377 | /* destroy small regions; region 0 must go last as it contains the szone */ |
9385eb3d A |
3378 | index = szone->num_small_regions; |
3379 | while (index--) { | |
59e0d9fe A |
3380 | small_region = szone->small_regions[index]; |
3381 | /* | |
3382 | * If we've allocated more than the basic set of small regions, avoid destroying the | |
3383 | * region that contains the array. | |
3384 | */ | |
3385 | if ((index > 0) && | |
3386 | (SMALL_REGION_FOR_PTR(szone->small_regions) == SMALL_REGION_ADDRESS(small_region))) { | |
3387 | pended_region = small_region; | |
9385eb3d | 3388 | } else { |
59e0d9fe | 3389 | deallocate_pages(szone, (void *)SMALL_REGION_ADDRESS(small_region), SMALL_REGION_SIZE, 0); |
9385eb3d A |
3390 | } |
3391 | } | |
59e0d9fe A |
3392 | if (pended_region) |
3393 | deallocate_pages(NULL, (void *)SMALL_REGION_ADDRESS(pended_region), SMALL_REGION_SIZE, 0); | |
e9ce8d39 A |
3394 | } |
3395 | ||
9385eb3d | 3396 | static size_t |
59e0d9fe A |
3397 | szone_good_size(szone_t *szone, size_t size) |
3398 | { | |
3399 | msize_t msize; | |
3400 | unsigned num_pages; | |
3401 | ||
9385eb3d A |
3402 | if (size <= 31 * TINY_QUANTUM) { |
3403 | // think tiny | |
59e0d9fe | 3404 | msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); |
9385eb3d | 3405 | if (! msize) msize = 1; |
59e0d9fe | 3406 | return TINY_BYTES_FOR_MSIZE(msize << SHIFT_TINY_QUANTUM); |
9385eb3d | 3407 | } |
e9ce8d39 | 3408 | if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (size < LARGE_THRESHOLD)) { |
9385eb3d | 3409 | // think small |
59e0d9fe | 3410 | msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); |
9385eb3d | 3411 | if (! msize) msize = 1; |
59e0d9fe | 3412 | return SMALL_BYTES_FOR_MSIZE(msize); |
e9ce8d39 | 3413 | } else { |
9385eb3d | 3414 | num_pages = round_page(size) >> vm_page_shift; |
59e0d9fe A |
3415 | if (!num_pages) |
3416 | num_pages = 1; // minimal allocation size for this | |
9385eb3d | 3417 | return num_pages << vm_page_shift; |
e9ce8d39 A |
3418 | } |
3419 | } | |
3420 | ||
3421 | unsigned szone_check_counter = 0; | |
3422 | unsigned szone_check_start = 0; | |
3423 | unsigned szone_check_modulo = 1; | |
3424 | ||
9385eb3d | 3425 | static boolean_t |
59e0d9fe A |
3426 | szone_check_all(szone_t *szone, const char *function) |
3427 | { | |
3428 | int index; | |
3429 | tiny_region_t *tiny; | |
3430 | small_region_t *small; | |
3431 | ||
e9ce8d39 | 3432 | SZONE_LOCK(szone); |
9385eb3d | 3433 | CHECK_LOCKED(szone, __PRETTY_FUNCTION__); |
59e0d9fe A |
3434 | |
3435 | /* check tiny regions - chould check region count */ | |
3436 | for (index = szone->num_tiny_regions - 1, tiny = szone->tiny_regions; | |
3437 | index >= 0; | |
3438 | index--, tiny++) { | |
3439 | if (!tiny_check_region(szone, tiny)) { | |
9385eb3d A |
3440 | SZONE_UNLOCK(szone); |
3441 | szone->debug_flags &= ~ CHECK_REGIONS; | |
59e0d9fe A |
3442 | malloc_printf("*** tiny region %d incorrect szone_check_all(%s) counter=%d\n", |
3443 | szone->num_tiny_regions - index, function, szone_check_counter); | |
3444 | szone_error(szone, "check: tiny region incorrect", NULL); | |
9385eb3d A |
3445 | return 0; |
3446 | } | |
3447 | } | |
3448 | ||
59e0d9fe A |
3449 | for (index = NUM_TINY_SLOTS - 1; index >= 0; index--) { |
3450 | if (!tiny_free_list_check(szone, index)) { | |
9385eb3d A |
3451 | SZONE_UNLOCK(szone); |
3452 | szone->debug_flags &= ~ CHECK_REGIONS; | |
59e0d9fe A |
3453 | malloc_printf("*** tiny free list incorrect (slot=%d) szone_check_all(%s) counter=%d\n", |
3454 | index, function, szone_check_counter); | |
3455 | szone_error(szone, "check: tiny free list incorrect", NULL); | |
9385eb3d A |
3456 | return 0; |
3457 | } | |
9385eb3d A |
3458 | } |
3459 | ||
59e0d9fe A |
3460 | /* check small regions - could check region count */ |
3461 | for (index = szone->num_small_regions - 1, small = szone->small_regions; | |
3462 | index >= 0; | |
3463 | index--, small++) { | |
3464 | if (!szone_check_small_region(szone, small)) { | |
9385eb3d A |
3465 | SZONE_UNLOCK(szone); |
3466 | szone->debug_flags &= ~ CHECK_REGIONS; | |
59e0d9fe A |
3467 | malloc_printf("*** small region %d incorrect szone_check_all(%s) counter=%d\n", |
3468 | szone->num_small_regions, index, function, szone_check_counter); | |
3469 | szone_error(szone, "check: small region incorrect", NULL); | |
9385eb3d A |
3470 | return 0; |
3471 | } | |
e9ce8d39 | 3472 | } |
59e0d9fe A |
3473 | for (index = NUM_SMALL_SLOTS - 1; index >= 0; index--) { |
3474 | if (!small_free_list_check(szone, index)) { | |
9385eb3d A |
3475 | SZONE_UNLOCK(szone); |
3476 | szone->debug_flags &= ~ CHECK_REGIONS; | |
59e0d9fe A |
3477 | malloc_printf("*** small free list incorrect (grain=%d) szone_check_all(%s) counter=%d\n", index, function, szone_check_counter); |
3478 | szone_error(szone, "check: small free list incorrect", NULL); | |
9385eb3d A |
3479 | return 0; |
3480 | } | |
e9ce8d39 A |
3481 | } |
3482 | SZONE_UNLOCK(szone); | |
9385eb3d | 3483 | // szone_print(szone, 1); |
e9ce8d39 A |
3484 | return 1; |
3485 | } | |
3486 | ||
9385eb3d | 3487 | static boolean_t |
59e0d9fe A |
3488 | szone_check(szone_t *szone) |
3489 | { | |
3490 | ||
3491 | if ((++szone_check_counter % 10000) == 0) | |
3492 | malloc_printf("at szone_check counter=%d\n", szone_check_counter); | |
3493 | if (szone_check_counter < szone_check_start) | |
3494 | return 1; | |
3495 | if (szone_check_counter % szone_check_modulo) | |
3496 | return 1; | |
e9ce8d39 A |
3497 | return szone_check_all(szone, ""); |
3498 | } | |
3499 | ||
9385eb3d | 3500 | static kern_return_t |
59e0d9fe A |
3501 | szone_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder) |
3502 | { | |
e9ce8d39 A |
3503 | szone_t *szone; |
3504 | kern_return_t err; | |
59e0d9fe | 3505 | |
e9ce8d39 | 3506 | if (!reader) reader = _szone_default_reader; |
e9ce8d39 A |
3507 | err = reader(task, zone_address, sizeof(szone_t), (void **)&szone); |
3508 | if (err) return err; | |
9385eb3d A |
3509 | err = tiny_in_use_enumerator(task, context, type_mask, |
3510 | (vm_address_t)szone->tiny_regions, szone->num_tiny_regions, szone->tiny_bytes_free_at_end , reader, recorder); | |
e9ce8d39 | 3511 | if (err) return err; |
9385eb3d A |
3512 | err = small_in_use_enumerator(task, context, type_mask, |
3513 | (vm_address_t)szone->small_regions, szone->num_small_regions, szone->small_bytes_free_at_end , reader, recorder); | |
e9ce8d39 | 3514 | if (err) return err; |
9385eb3d A |
3515 | err = large_in_use_enumerator(task, context, type_mask, |
3516 | (vm_address_t)szone->large_entries, szone->num_large_entries, reader, | |
3517 | recorder); | |
3518 | if (err) return err; | |
9385eb3d A |
3519 | err = huge_in_use_enumerator(task, context, type_mask, |
3520 | (vm_address_t)szone->huge_entries, szone->num_huge_entries, reader, | |
3521 | recorder); | |
e9ce8d39 A |
3522 | return err; |
3523 | } | |
3524 | ||
9385eb3d A |
3525 | // Following method is deprecated: use scalable_zone_statistics instead |
3526 | void | |
59e0d9fe A |
3527 | scalable_zone_info(malloc_zone_t *zone, unsigned *info_to_fill, unsigned count) |
3528 | { | |
9385eb3d A |
3529 | szone_t *szone = (void *)zone; |
3530 | unsigned info[13]; | |
59e0d9fe | 3531 | |
9385eb3d A |
3532 | // We do not lock to facilitate debug |
3533 | info[4] = szone->num_tiny_objects; | |
3534 | info[5] = szone->num_bytes_in_tiny_objects; | |
3535 | info[6] = szone->num_small_objects; | |
3536 | info[7] = szone->num_bytes_in_small_objects; | |
3537 | info[8] = szone->num_large_objects_in_use; | |
3538 | info[9] = szone->num_bytes_in_large_objects; | |
3539 | info[10] = szone->num_huge_entries; | |
3540 | info[11] = szone->num_bytes_in_huge_objects; | |
3541 | info[12] = szone->debug_flags; | |
3542 | info[0] = info[4] + info[6] + info[8] + info[10]; | |
3543 | info[1] = info[5] + info[7] + info[9] + info[11]; | |
3544 | info[3] = szone->num_tiny_regions * TINY_REGION_SIZE + szone->num_small_regions * SMALL_REGION_SIZE + info[9] + info[11]; | |
3545 | info[2] = info[3] - szone->tiny_bytes_free_at_end - szone->small_bytes_free_at_end; | |
3546 | memcpy(info_to_fill, info, sizeof(unsigned)*count); | |
e9ce8d39 A |
3547 | } |
3548 | ||
9385eb3d | 3549 | static void |
59e0d9fe A |
3550 | szone_print(szone_t *szone, boolean_t verbose) |
3551 | { | |
3552 | unsigned info[13]; | |
3553 | unsigned index = 0; | |
3554 | tiny_region_t *region; | |
3555 | ||
9385eb3d A |
3556 | SZONE_LOCK(szone); |
3557 | scalable_zone_info((void *)szone, info, 13); | |
59e0d9fe A |
3558 | malloc_printf("Scalable zone %p: inUse=%d(%y) touched=%y allocated=%y flags=%d\n", |
3559 | szone, info[0], info[1], info[2], info[3], info[12]); | |
3560 | malloc_printf("\ttiny=%d(%y) small=%d(%y) large=%d(%y) huge=%d(%y)\n", | |
3561 | info[4], info[5], info[6], info[7], info[8], info[9], info[10], info[11]); | |
9385eb3d A |
3562 | // tiny |
3563 | malloc_printf("%d tiny regions: \n", szone->num_tiny_regions); | |
3564 | while (index < szone->num_tiny_regions) { | |
59e0d9fe | 3565 | region = szone->tiny_regions + index; |
9385eb3d A |
3566 | print_tiny_region(verbose, *region, (index == szone->num_tiny_regions - 1) ? szone->tiny_bytes_free_at_end : 0); |
3567 | index++; | |
3568 | } | |
3569 | if (verbose) print_tiny_free_list(szone); | |
3570 | // small | |
3571 | malloc_printf("%d small regions: \n", szone->num_small_regions); | |
3572 | index = 0; | |
3573 | while (index < szone->num_small_regions) { | |
59e0d9fe | 3574 | region = szone->small_regions + index; |
9385eb3d A |
3575 | print_small_region(szone, verbose, region, (index == szone->num_small_regions - 1) ? szone->small_bytes_free_at_end : 0); |
3576 | index++; | |
3577 | } | |
59e0d9fe A |
3578 | if (verbose) |
3579 | print_small_free_list(szone); | |
9385eb3d A |
3580 | SZONE_UNLOCK(szone); |
3581 | } | |
3582 | ||
3583 | static void | |
59e0d9fe A |
3584 | szone_log(malloc_zone_t *zone, void *log_address) |
3585 | { | |
3586 | szone_t *szone = (szone_t *)zone; | |
3587 | ||
e9ce8d39 A |
3588 | szone->log_address = log_address; |
3589 | } | |
3590 | ||
9385eb3d | 3591 | static void |
59e0d9fe A |
3592 | szone_force_lock(szone_t *szone) |
3593 | { | |
e9ce8d39 A |
3594 | SZONE_LOCK(szone); |
3595 | } | |
3596 | ||
9385eb3d | 3597 | static void |
59e0d9fe A |
3598 | szone_force_unlock(szone_t *szone) |
3599 | { | |
e9ce8d39 A |
3600 | SZONE_UNLOCK(szone); |
3601 | } | |
3602 | ||
9385eb3d | 3603 | boolean_t |
59e0d9fe A |
3604 | scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone) |
3605 | { | |
3606 | szone_t *szone = (szone_t *)zone; | |
3607 | ||
9385eb3d A |
3608 | switch (subzone) { |
3609 | case 0: | |
3610 | stats->blocks_in_use = szone->num_tiny_objects; | |
3611 | stats->size_in_use = szone->num_bytes_in_tiny_objects; | |
3612 | stats->size_allocated = szone->num_tiny_regions * TINY_REGION_SIZE; | |
3613 | stats->max_size_in_use = stats->size_allocated - szone->tiny_bytes_free_at_end; | |
3614 | return 1; | |
3615 | case 1: | |
3616 | stats->blocks_in_use = szone->num_small_objects; | |
3617 | stats->size_in_use = szone->num_bytes_in_small_objects; | |
3618 | stats->size_allocated = szone->num_small_regions * SMALL_REGION_SIZE; | |
3619 | stats->max_size_in_use = stats->size_allocated - szone->small_bytes_free_at_end; | |
3620 | return 1; | |
3621 | case 2: | |
3622 | stats->blocks_in_use = szone->num_large_objects_in_use; | |
3623 | stats->size_in_use = szone->num_bytes_in_large_objects; | |
3624 | stats->max_size_in_use = stats->size_allocated = stats->size_in_use; | |
3625 | return 1; | |
3626 | case 3: | |
3627 | stats->blocks_in_use = szone->num_huge_entries; | |
3628 | stats->size_in_use = szone->num_bytes_in_huge_objects; | |
3629 | stats->max_size_in_use = stats->size_allocated = stats->size_in_use; | |
3630 | return 1; | |
3631 | } | |
3632 | return 0; | |
3633 | } | |
3634 | ||
3635 | static void | |
59e0d9fe A |
3636 | szone_statistics(szone_t *szone, malloc_statistics_t *stats) |
3637 | { | |
3638 | size_t big_and_huge; | |
3639 | ||
3640 | stats->blocks_in_use = | |
3641 | szone->num_tiny_objects + | |
3642 | szone->num_small_objects + | |
3643 | szone->num_large_objects_in_use + | |
3644 | szone->num_huge_entries; | |
3645 | big_and_huge = szone->num_bytes_in_large_objects + szone->num_bytes_in_huge_objects; | |
9385eb3d | 3646 | stats->size_in_use = szone->num_bytes_in_tiny_objects + szone->num_bytes_in_small_objects + big_and_huge; |
59e0d9fe A |
3647 | stats->max_size_in_use = stats->size_allocated = |
3648 | szone->num_tiny_regions * TINY_REGION_SIZE + | |
3649 | szone->num_small_regions * SMALL_REGION_SIZE + | |
3650 | big_and_huge ; | |
3651 | ||
9385eb3d A |
3652 | // Now we account for the untouched areas |
3653 | stats->max_size_in_use -= szone->tiny_bytes_free_at_end; | |
3654 | stats->max_size_in_use -= szone->small_bytes_free_at_end; | |
3655 | } | |
e9ce8d39 | 3656 | |
9385eb3d A |
3657 | static const struct malloc_introspection_t szone_introspect = { |
3658 | (void *)szone_ptr_in_use_enumerator, | |
3659 | (void *)szone_good_size, | |
3660 | (void *)szone_check, | |
3661 | (void *)szone_print, | |
3662 | szone_log, | |
3663 | (void *)szone_force_lock, | |
3664 | (void *)szone_force_unlock, | |
3665 | (void *)szone_statistics | |
3666 | }; // marked as const to spare the DATA section | |
3667 | ||
3668 | malloc_zone_t * | |
59e0d9fe A |
3669 | create_scalable_zone(size_t initial_size, unsigned debug_flags) |
3670 | { | |
e9ce8d39 | 3671 | szone_t *szone; |
e9ce8d39 | 3672 | size_t msize; |
59e0d9fe A |
3673 | size_t msize_used; |
3674 | msize_t free_msize; | |
3675 | ||
3676 | /* | |
3677 | * Sanity-check our build-time assumptions about the size of a page. | |
3678 | * Since we have sized various things assuming the default page size, | |
3679 | * attempting to determine it dynamically is not useful. | |
3680 | */ | |
3681 | if ((vm_page_size != _vm_page_size) || (vm_page_shift != _vm_page_shift)) { | |
3682 | malloc_printf("*** FATAL ERROR - machine page size does not match our assumptions.\n"); | |
9385eb3d A |
3683 | exit(-1); |
3684 | } | |
59e0d9fe A |
3685 | |
3686 | /* get memory for the zone */ | |
3687 | szone = allocate_pages(NULL, SMALL_REGION_SIZE, SMALL_BLOCKS_ALIGN, 0, VM_MAKE_TAG(VM_MEMORY_MALLOC)); | |
3688 | if (!szone) | |
3689 | return NULL; | |
3690 | /* set up the szone structure */ | |
9385eb3d A |
3691 | szone->tiny_regions = szone->initial_tiny_regions; |
3692 | szone->small_regions = szone->initial_small_regions; | |
59e0d9fe | 3693 | msize_used = msize; szone->num_small_objects++; |
9385eb3d | 3694 | szone->basic_zone.version = 3; |
e9ce8d39 A |
3695 | szone->basic_zone.size = (void *)szone_size; |
3696 | szone->basic_zone.malloc = (void *)szone_malloc; | |
3697 | szone->basic_zone.calloc = (void *)szone_calloc; | |
3698 | szone->basic_zone.valloc = (void *)szone_valloc; | |
3699 | szone->basic_zone.free = (void *)szone_free; | |
3700 | szone->basic_zone.realloc = (void *)szone_realloc; | |
3701 | szone->basic_zone.destroy = (void *)szone_destroy; | |
9385eb3d A |
3702 | szone->basic_zone.batch_malloc = (void *)szone_batch_malloc; |
3703 | szone->basic_zone.batch_free = (void *)szone_batch_free; | |
3704 | szone->basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect; | |
59e0d9fe A |
3705 | szone->debug_flags = debug_flags; |
3706 | ||
3707 | /* as the szone is allocated out of the first tiny, region, reflect that allocation */ | |
3708 | szone->small_regions[0] = szone; | |
3709 | szone->num_small_regions = 1; | |
3710 | msize = SMALL_MSIZE_FOR_BYTES(sizeof(szone_t) + SMALL_QUANTUM - 1); | |
3711 | free_msize = NUM_SMALL_BLOCKS - msize; | |
3712 | *SMALL_METADATA_FOR_PTR(szone) = msize; | |
3713 | *(SMALL_METADATA_FOR_PTR(szone) + msize) = free_msize; | |
3714 | szone->small_bytes_free_at_end = SMALL_BYTES_FOR_MSIZE(free_msize); | |
3715 | ||
e9ce8d39 | 3716 | LOCK_INIT(szone->lock); |
9385eb3d A |
3717 | #if 0 |
3718 | #warning CHECK_REGIONS enabled | |
3719 | debug_flags |= CHECK_REGIONS; | |
3720 | #endif | |
3721 | #if 0 | |
3722 | #warning LOG enabled | |
3723 | szone->log_address = ~0; | |
3724 | #endif | |
e9ce8d39 A |
3725 | CHECK(szone, __PRETTY_FUNCTION__); |
3726 | return (malloc_zone_t *)szone; | |
3727 | } | |
3728 | ||
5b2abdfb A |
3729 | /********* Support code for emacs unexec ************/ |
3730 | ||
3731 | /* History of freezedry version numbers: | |
3732 | * | |
3733 | * 1) Old malloc (before the scalable malloc implementation in this file | |
3734 | * existed). | |
3735 | * 2) Original freezedrying code for scalable malloc. This code was apparently | |
3736 | * based on the old freezedrying code and was fundamentally flawed in its | |
3737 | * assumption that tracking allocated memory regions was adequate to fake | |
3738 | * operations on freezedried memory. This doesn't work, since scalable | |
3739 | * malloc does not store flags in front of large page-aligned allocations. | |
3740 | * 3) Original szone-based freezedrying code. | |
9385eb3d | 3741 | * 4) Fresher malloc with tiny zone |
59e0d9fe | 3742 | * 5) 32/64bit compatible malloc |
5b2abdfb A |
3743 | * |
3744 | * No version backward compatibility is provided, but the version number does | |
3745 | * make it possible for malloc_jumpstart() to return an error if the application | |
3746 | * was freezedried with an older version of malloc. | |
3747 | */ | |
59e0d9fe | 3748 | #define MALLOC_FREEZEDRY_VERSION 5 |
5b2abdfb A |
3749 | |
3750 | typedef struct { | |
3751 | unsigned version; | |
3752 | unsigned nszones; | |
3753 | szone_t *szones; | |
3754 | } malloc_frozen; | |
3755 | ||
9385eb3d | 3756 | static void * |
59e0d9fe A |
3757 | frozen_malloc(szone_t *zone, size_t new_size) |
3758 | { | |
5b2abdfb A |
3759 | return malloc(new_size); |
3760 | } | |
3761 | ||
9385eb3d | 3762 | static void * |
59e0d9fe A |
3763 | frozen_calloc(szone_t *zone, size_t num_items, size_t size) |
3764 | { | |
5b2abdfb A |
3765 | return calloc(num_items, size); |
3766 | } | |
3767 | ||
9385eb3d | 3768 | static void * |
59e0d9fe A |
3769 | frozen_valloc(szone_t *zone, size_t new_size) |
3770 | { | |
5b2abdfb A |
3771 | return valloc(new_size); |
3772 | } | |
3773 | ||
9385eb3d | 3774 | static void * |
59e0d9fe A |
3775 | frozen_realloc(szone_t *zone, void *ptr, size_t new_size) |
3776 | { | |
5b2abdfb A |
3777 | size_t old_size = szone_size(zone, ptr); |
3778 | void *new_ptr; | |
59e0d9fe | 3779 | |
5b2abdfb A |
3780 | if (new_size <= old_size) { |
3781 | return ptr; | |
3782 | } | |
5b2abdfb | 3783 | new_ptr = malloc(new_size); |
5b2abdfb A |
3784 | if (old_size > 0) { |
3785 | memcpy(new_ptr, ptr, old_size); | |
3786 | } | |
5b2abdfb A |
3787 | return new_ptr; |
3788 | } | |
3789 | ||
9385eb3d | 3790 | static void |
59e0d9fe A |
3791 | frozen_free(szone_t *zone, void *ptr) |
3792 | { | |
5b2abdfb A |
3793 | } |
3794 | ||
9385eb3d | 3795 | static void |
59e0d9fe A |
3796 | frozen_destroy(szone_t *zone) |
3797 | { | |
5b2abdfb A |
3798 | } |
3799 | ||
3800 | /********* Pseudo-private API for emacs unexec ************/ | |
3801 | ||
3802 | /* | |
3803 | * malloc_freezedry() records all of the szones in use, so that they can be | |
3804 | * partially reconstituted by malloc_jumpstart(). Due to the differences | |
3805 | * between reconstituted memory regions and those created by the szone code, | |
3806 | * care is taken not to reallocate from the freezedried memory, except in the | |
3807 | * case of a non-growing realloc(). | |
3808 | * | |
3809 | * Due to the flexibility provided by the zone registration mechanism, it is | |
3810 | * impossible to implement generic freezedrying for any zone type. This code | |
3811 | * only handles applications that use the szone allocator, so malloc_freezedry() | |
3812 | * returns 0 (error) if any non-szone zones are encountered. | |
3813 | */ | |
3814 | ||
59e0d9fe A |
3815 | uintptr_t |
3816 | malloc_freezedry(void) | |
3817 | { | |
5b2abdfb A |
3818 | extern unsigned malloc_num_zones; |
3819 | extern malloc_zone_t **malloc_zones; | |
3820 | malloc_frozen *data; | |
3821 | unsigned i; | |
3822 | ||
3823 | /* Allocate space in which to store the freezedry state. */ | |
3824 | data = (malloc_frozen *) malloc(sizeof(malloc_frozen)); | |
3825 | ||
9385eb3d | 3826 | /* Set freezedry version number so that malloc_jumpstart() can check for compatibility. */ |
5b2abdfb A |
3827 | data->version = MALLOC_FREEZEDRY_VERSION; |
3828 | ||
3829 | /* Allocate the array of szone pointers. */ | |
3830 | data->nszones = malloc_num_zones; | |
3831 | data->szones = (szone_t *) calloc(malloc_num_zones, sizeof(szone_t)); | |
3832 | ||
59e0d9fe A |
3833 | /* |
3834 | * Fill in the array of szone structures. They are copied rather than | |
5b2abdfb | 3835 | * referenced, since the originals are likely to be clobbered during malloc |
59e0d9fe A |
3836 | * initialization. |
3837 | */ | |
5b2abdfb A |
3838 | for (i = 0; i < malloc_num_zones; i++) { |
3839 | if (strcmp(malloc_zones[i]->zone_name, "DefaultMallocZone")) { | |
3840 | /* Unknown zone type. */ | |
3841 | free(data->szones); | |
3842 | free(data); | |
3843 | return 0; | |
3844 | } | |
3845 | memcpy(&data->szones[i], malloc_zones[i], sizeof(szone_t)); | |
3846 | } | |
3847 | ||
59e0d9fe | 3848 | return((uintptr_t)data); |
5b2abdfb A |
3849 | } |
3850 | ||
9385eb3d | 3851 | int |
59e0d9fe A |
3852 | malloc_jumpstart(uintptr_t cookie) |
3853 | { | |
3854 | malloc_frozen *data = (malloc_frozen *)cookie; | |
5b2abdfb A |
3855 | unsigned i; |
3856 | ||
3857 | if (data->version != MALLOC_FREEZEDRY_VERSION) { | |
3858 | /* Unsupported freezedry version. */ | |
3859 | return 1; | |
3860 | } | |
3861 | ||
3862 | for (i = 0; i < data->nszones; i++) { | |
3863 | /* Set function pointers. Even the functions that stay the same must be | |
3864 | * set, since there are no guarantees that they will be mapped to the | |
3865 | * same addresses. */ | |
3866 | data->szones[i].basic_zone.size = (void *) szone_size; | |
3867 | data->szones[i].basic_zone.malloc = (void *) frozen_malloc; | |
3868 | data->szones[i].basic_zone.calloc = (void *) frozen_calloc; | |
3869 | data->szones[i].basic_zone.valloc = (void *) frozen_valloc; | |
3870 | data->szones[i].basic_zone.free = (void *) frozen_free; | |
3871 | data->szones[i].basic_zone.realloc = (void *) frozen_realloc; | |
3872 | data->szones[i].basic_zone.destroy = (void *) frozen_destroy; | |
9385eb3d | 3873 | data->szones[i].basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect; |
5b2abdfb A |
3874 | |
3875 | /* Register the freezedried zone. */ | |
3876 | malloc_zone_register(&data->szones[i].basic_zone); | |
3877 | } | |
3878 | ||
3879 | return 0; | |
3880 | } | |
59e0d9fe | 3881 |