]>
Commit | Line | Data |
---|---|---|
34e8f829 A |
1 | /* |
2 | * Copyright (c) 1999, 2006, 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
24 | /* Author: Bertrand Serlet, August 1999 */ | |
25 | ||
26 | /* | |
27 | Multithread enhancements for "tiny" allocations introduced February 2008. | |
28 | These are in the spirit of "Hoard". See: | |
29 | Berger, E.D.; McKinley, K.S.; Blumofe, R.D.; Wilson, P.R. (2000). | |
30 | "Hoard: a scalable memory allocator for multithreaded applications". | |
31 | ACM SIGPLAN Notices 35 (11): 117-128. Berger2000. | |
32 | <http://portal.acm.org/citation.cfm?id=356989.357000> | |
33 | Retrieved on 2008-02-22. | |
34 | */ | |
35 | ||
36 | /* gcc -g -O3 magazine_malloc.c malloc.c -o libmagmalloc.dylib -I. \ | |
37 | -I/System/Library/Frameworks/System.framework/PrivateHeaders/ -funit-at-a-time \ | |
38 | -dynamiclib -Wall -arch x86_64 -arch i386 -arch ppc */ | |
39 | ||
511daa4c A |
40 | #include <TargetConditionals.h> |
41 | ||
34e8f829 A |
42 | #include "scalable_malloc.h" |
43 | #include "malloc_printf.h" | |
44 | #include "_simple.h" | |
45 | #include "magmallocProvider.h" | |
46 | ||
47 | #include <pthread_internals.h> /* for pthread_lock_t SPI */ | |
48 | #include <pthread.h> /* for pthread API */ | |
49 | ||
50 | #include <stdint.h> | |
51 | #include <unistd.h> | |
52 | #include <mach/vm_statistics.h> | |
53 | #include <mach/mach_init.h> | |
54 | #include <sys/types.h> | |
55 | #include <sys/mman.h> | |
1f2f436a A |
56 | #include <sys/param.h> |
57 | ||
58 | #if defined(__i386__) || defined(__x86_64__) | |
59 | #define __APPLE_API_PRIVATE | |
60 | #include <machine/cpu_capabilities.h> | |
61 | #define _COMM_PAGE_VERSION_REQD 9 | |
62 | #undef __APPLE_API_PRIVATE | |
63 | #else | |
34e8f829 | 64 | #include <sys/sysctl.h> |
1f2f436a A |
65 | #endif |
66 | ||
34e8f829 | 67 | #include <libkern/OSAtomic.h> |
1f2f436a A |
68 | #include <mach-o/dyld.h> /* for NSVersionOfLinkTimeLibrary() */ |
69 | #include <mach-o/dyld_priv.h> /* for _dyld_get_image_slide() */ | |
70 | #include <crt_externs.h> /* for _NSGetMachExecuteHeader() */ | |
71 | #include <mach/vm_param.h> | |
72 | #include <sys/vmparam.h> | |
73 | ||
74 | #include <CrashReporterClient.h> | |
34e8f829 A |
75 | |
76 | /********************* DEFINITIONS ************************/ | |
77 | ||
78 | #define DEBUG_MALLOC 0 // set to one to debug malloc itself | |
79 | ||
80 | #define DEBUG_CLIENT 0 // set to one to debug malloc client | |
81 | ||
7ba935f9 A |
82 | #define DEBUG_MADVISE 0 |
83 | ||
34e8f829 A |
84 | #if DEBUG_MALLOC |
85 | #warning DEBUG_MALLOC ENABLED | |
86 | # define INLINE | |
87 | # define ALWAYSINLINE | |
88 | # define CHECK_MAGAZINE_PTR_LOCKED(szone, mag_ptr, fun) \ | |
89 | do { \ | |
90 | if (__is_threaded && TRY_LOCK(mag_ptr->magazine_lock)) { \ | |
91 | malloc_printf("*** magazine_lock was not set %p in %s\n", \ | |
92 | mag_ptr->magazine_lock, fun); \ | |
93 | } \ | |
94 | } while (0) | |
95 | #else | |
96 | # define INLINE __inline__ | |
97 | # define ALWAYSINLINE __attribute__((always_inline)) | |
98 | # define CHECK_MAGAZINE_PTR_LOCKED(szone, mag_ptr, fun) {} | |
99 | #endif | |
100 | ||
101 | # define NOINLINE __attribute__((noinline)) | |
102 | ||
103 | #if defined(__i386__) || defined(__x86_64__) | |
104 | #define CACHE_ALIGN __attribute__ ((aligned (128) )) /* Future-proofing at 128B */ | |
105 | #elif defined(__ppc__) || defined(__ppc64__) | |
106 | #define CACHE_ALIGN __attribute__ ((aligned (128) )) | |
107 | #else | |
108 | #define CACHE_ALIGN | |
109 | #endif | |
110 | ||
1f2f436a A |
111 | #if !__LP64__ |
112 | #define ASLR_INTERNAL 1 | |
113 | #endif | |
114 | ||
34e8f829 A |
115 | /* |
116 | * Access to global variables is slow, so optimise our handling of vm_page_size | |
117 | * and vm_page_shift. | |
118 | */ | |
119 | #define _vm_page_size vm_page_size /* to get to the originals */ | |
120 | #define _vm_page_shift vm_page_shift | |
121 | #define vm_page_size 4096 /* our normal working sizes */ | |
122 | #define vm_page_shift 12 | |
123 | ||
124 | /* | |
125 | * msize - a type to refer to the number of quanta of a tiny or small | |
126 | * allocation. A tiny block with an msize of 3 would be 3 << SHIFT_TINY_QUANTUM | |
127 | * bytes in size. | |
128 | */ | |
129 | typedef unsigned short msize_t; | |
130 | ||
131 | typedef union { | |
132 | void *p; | |
133 | uintptr_t u; | |
134 | } ptr_union; | |
135 | ||
136 | typedef struct { | |
137 | ptr_union previous; | |
138 | ptr_union next; | |
139 | } free_list_t; | |
140 | ||
141 | typedef unsigned int grain_t; // N.B. wide enough to index all free slots | |
142 | ||
143 | typedef int mag_index_t; | |
144 | ||
145 | #define CHECK_REGIONS (1 << 31) | |
1f2f436a | 146 | #define DISABLE_ASLR (1 << 30) |
34e8f829 A |
147 | |
148 | #define MAX_RECORDER_BUFFER 256 | |
149 | ||
150 | /********************* DEFINITIONS for tiny ************************/ | |
151 | ||
152 | /* | |
153 | * Memory in the Tiny range is allocated from regions (heaps) pointed to by the | |
154 | * szone's hashed_regions pointer. | |
155 | * | |
156 | * Each region is laid out as a heap, followed by a header block, all within | |
157 | * a 1MB (2^20) block. This means there are 64520 16-byte blocks and the header | |
158 | * is 16138 bytes, making the total 1048458 bytes, leaving 118 bytes unused. | |
159 | * | |
160 | * The header block is arranged as in struct tiny_region defined just below, and | |
161 | * consists of two bitfields (or bit arrays) interleaved 32 bits by 32 bits. | |
162 | * | |
163 | * Each bitfield comprises NUM_TINY_BLOCKS bits, and refers to the corresponding | |
164 | * TINY_QUANTUM block within the heap. | |
165 | * | |
166 | * The bitfields are used to encode the state of memory within the heap. The header bit indicates | |
167 | * that the corresponding quantum is the first quantum in a block (either in use or free). The | |
168 | * in-use bit is set for the header if the block has been handed out (allocated). If the header | |
169 | * bit is not set, the in-use bit is invalid. | |
170 | * | |
171 | * The szone maintains an array of NUM_TINY_SLOTS freelists, each of which is used to hold | |
172 | * free objects of the corresponding quantum size. | |
173 | * | |
174 | * A free block is laid out depending on its size, in order to fit all free | |
175 | * blocks in 16 bytes, on both 32 and 64 bit platforms. One quantum blocks do | |
176 | * not store their size in the block, instead relying on the header information | |
177 | * to determine their size. Blocks of two or more quanta have room to store | |
178 | * their size in the block, and store it both after the 'next' pointer, and in | |
179 | * the last 2 bytes of the block. | |
180 | * | |
181 | * 1-quantum block | |
182 | * Offset (32-bit mode) (64-bit mode) | |
183 | * 0x0 0x0 : previous | |
184 | * 0x4 0x08 : next | |
185 | * end end | |
186 | * | |
187 | * >1-quantum block | |
188 | * Offset (32-bit mode) (64-bit mode) | |
189 | * 0x0 0x0 : previous | |
190 | * 0x4 0x08 : next | |
191 | * 0x8 0x10 : size (in quantum counts) | |
192 | * end - 2 end - 2 : size (in quantum counts) | |
193 | * end end | |
194 | * | |
195 | * All fields are pointer-sized, except for the size which is an unsigned short. | |
196 | * | |
197 | */ | |
198 | ||
199 | #define SHIFT_TINY_QUANTUM 4 // Required for AltiVec | |
200 | #define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM) | |
201 | ||
202 | #define FOLLOWING_TINY_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM)) | |
203 | ||
204 | #ifdef __LP64__ | |
205 | #define NUM_TINY_SLOTS 64 // number of slots for free-lists | |
206 | #else | |
207 | #define NUM_TINY_SLOTS 32 // number of slots for free-lists | |
208 | #endif | |
209 | ||
210 | #define NUM_TINY_BLOCKS 64520 | |
211 | #define SHIFT_TINY_CEIL_BLOCKS 16 // ceil(log2(NUM_TINY_BLOCKS)) | |
212 | #define NUM_TINY_CEIL_BLOCKS (1 << SHIFT_TINY_CEIL_BLOCKS) | |
213 | #define TINY_BLOCKS_ALIGN (SHIFT_TINY_CEIL_BLOCKS + SHIFT_TINY_QUANTUM) // 20 | |
214 | ||
1f2f436a A |
215 | #define TINY_ENTROPY_BITS 15 |
216 | #define TINY_ENTROPY_MASK ((1 << TINY_ENTROPY_BITS) - 1) | |
217 | ||
218 | /* | |
219 | * Avoid having so much entropy that the end of a valid tiny allocation | |
220 | * might overrun the end of the tiny region. | |
221 | */ | |
222 | #if TINY_ENTROPY_MASK + NUM_TINY_SLOTS > NUM_TINY_BLOCKS | |
223 | #error Too many entropy bits for tiny region requested | |
224 | #endif | |
225 | ||
34e8f829 A |
226 | /* |
227 | * Enough room for the data, followed by the bit arrays (2-bits per block) | |
228 | * plus rounding to the nearest page. | |
229 | */ | |
230 | #define CEIL_NUM_TINY_BLOCKS_WORDS (((NUM_TINY_BLOCKS + 31) & ~31) >> 5) | |
231 | #define TINY_METADATA_SIZE (sizeof(region_trailer_t) + sizeof(tiny_header_inuse_pair_t) * CEIL_NUM_TINY_BLOCKS_WORDS) | |
232 | #define TINY_REGION_SIZE \ | |
233 | ((NUM_TINY_BLOCKS * TINY_QUANTUM + TINY_METADATA_SIZE + vm_page_size - 1) & ~ (vm_page_size - 1)) | |
234 | ||
235 | #define TINY_METADATA_START (NUM_TINY_BLOCKS * TINY_QUANTUM) | |
236 | ||
237 | /* | |
238 | * Beginning and end pointers for a region's heap. | |
239 | */ | |
240 | #define TINY_REGION_ADDRESS(region) ((void *)(region)) | |
241 | #define TINY_REGION_END(region) ((void *)(((uintptr_t)(region)) + (NUM_TINY_BLOCKS * TINY_QUANTUM))) | |
242 | ||
243 | /* | |
244 | * Locate the heap base for a pointer known to be within a tiny region. | |
245 | */ | |
246 | #define TINY_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << TINY_BLOCKS_ALIGN) - 1))) | |
247 | ||
248 | /* | |
249 | * Convert between byte and msize units. | |
250 | */ | |
251 | #define TINY_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_TINY_QUANTUM) | |
252 | #define TINY_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_TINY_QUANTUM) | |
253 | ||
254 | #ifdef __LP64__ | |
255 | # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[8]) | |
256 | #else | |
257 | # define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[4]) | |
258 | #endif | |
259 | #define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1] | |
260 | ||
261 | /* | |
262 | * Layout of a tiny region | |
263 | */ | |
264 | typedef uint32_t tiny_block_t[4]; // assert(TINY_QUANTUM == sizeof(tiny_block_t)) | |
265 | ||
266 | typedef struct tiny_header_inuse_pair | |
267 | { | |
268 | uint32_t header; | |
269 | uint32_t inuse; | |
270 | } tiny_header_inuse_pair_t; | |
271 | ||
272 | typedef struct region_trailer | |
273 | { | |
274 | struct region_trailer *prev; | |
275 | struct region_trailer *next; | |
276 | boolean_t recirc_suitable; | |
7ba935f9 | 277 | boolean_t failedREUSE; |
1f2f436a | 278 | volatile int pinned_to_depot; |
34e8f829 A |
279 | unsigned bytes_used; |
280 | mag_index_t mag_index; | |
281 | } region_trailer_t; | |
282 | ||
283 | typedef struct tiny_region | |
284 | { | |
285 | tiny_block_t blocks[NUM_TINY_BLOCKS]; | |
286 | ||
287 | region_trailer_t trailer; | |
288 | ||
289 | // The interleaved bit arrays comprising the header and inuse bitfields. | |
290 | // The unused bits of each component in the last pair will be initialized to sentinel values. | |
291 | tiny_header_inuse_pair_t pairs[CEIL_NUM_TINY_BLOCKS_WORDS]; | |
292 | ||
293 | uint8_t pad[TINY_REGION_SIZE - (NUM_TINY_BLOCKS * sizeof(tiny_block_t)) - TINY_METADATA_SIZE]; | |
294 | } *tiny_region_t; | |
295 | ||
296 | /* | |
297 | * Per-region meta data for tiny allocator | |
298 | */ | |
299 | #define REGION_TRAILER_FOR_TINY_REGION(r) (&(((tiny_region_t)(r))->trailer)) | |
300 | #define MAGAZINE_INDEX_FOR_TINY_REGION(r) (REGION_TRAILER_FOR_TINY_REGION(r)->mag_index) | |
301 | #define BYTES_USED_FOR_TINY_REGION(r) (REGION_TRAILER_FOR_TINY_REGION(r)->bytes_used) | |
302 | ||
303 | /* | |
304 | * Locate the block header for a pointer known to be within a tiny region. | |
305 | */ | |
306 | #define TINY_BLOCK_HEADER_FOR_PTR(_p) ((void *)&(((tiny_region_t)TINY_REGION_FOR_PTR(_p))->pairs)) | |
307 | ||
308 | /* | |
309 | * Locate the inuse map for a given block header pointer. | |
310 | */ | |
311 | #define TINY_INUSE_FOR_HEADER(_h) ((void *)&(((tiny_header_inuse_pair_t *)(_h))->inuse)) | |
312 | ||
313 | /* | |
314 | * Compute the bitmap index for a pointer known to be within a tiny region. | |
315 | */ | |
316 | #define TINY_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_TINY_QUANTUM) & (NUM_TINY_CEIL_BLOCKS - 1)) | |
317 | ||
318 | #define TINY_CACHE 1 // This governs a last-free cache of 1 that bypasses the free-list | |
319 | ||
320 | #if ! TINY_CACHE | |
321 | #warning TINY_CACHE turned off | |
322 | #endif | |
323 | ||
324 | #define TINY_REGION_PAYLOAD_BYTES (NUM_TINY_BLOCKS * TINY_QUANTUM) | |
325 | ||
326 | /********************* DEFINITIONS for small ************************/ | |
327 | ||
328 | /* | |
329 | * Memory in the Small range is allocated from regions (heaps) pointed to by the szone's hashed_regions | |
330 | * pointer. | |
331 | * | |
332 | * Each region is laid out as a heap, followed by the metadata array, all within an 8MB (2^23) block. | |
333 | * The array is arranged as an array of shorts, one for each SMALL_QUANTUM in the heap. | |
334 | * This means there are 16320 512-blocks and the array is 16320*2 bytes, which totals 8388480, leaving | |
335 | * 128 bytes unused. | |
336 | * | |
337 | * The MSB of each short is set for the first quantum in a free block. The low 15 bits encode the | |
338 | * block size (in SMALL_QUANTUM units), or are zero if the quantum is not the first in a block. | |
339 | * | |
340 | * The szone maintains an array of 32 freelists, each of which is used to hold free objects | |
341 | * of the corresponding quantum size. | |
342 | * | |
343 | * A free block is laid out as: | |
344 | * | |
345 | * Offset (32-bit mode) (64-bit mode) | |
346 | * 0x0 0x0 : previous | |
347 | * 0x4 0x08 : next | |
348 | * 0x8 0x10 : size (in quantum counts) | |
349 | * end - 2 end - 2 : size (in quantum counts) | |
350 | * end end | |
351 | * | |
352 | * All fields are pointer-sized, except for the size which is an unsigned short. | |
353 | * | |
354 | */ | |
355 | ||
356 | #define SMALL_IS_FREE (1 << 15) | |
357 | ||
358 | #define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9 | |
359 | #define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes | |
360 | ||
361 | #define FOLLOWING_SMALL_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM)) | |
362 | ||
363 | /* | |
364 | * The number of slots in the free-list for small blocks. To avoid going to | |
365 | * vm system as often on large memory machines, increase the number of free list | |
366 | * spots above some amount of RAM installed in the system. | |
367 | */ | |
368 | #define NUM_SMALL_SLOTS 32 | |
369 | #define NUM_SMALL_SLOTS_LARGEMEM 256 | |
370 | #define SMALL_BITMAP_WORDS 8 | |
371 | ||
372 | /* | |
373 | * We can only represent up to 1<<15 for msize; but we choose to stay even below that to avoid the | |
374 | * convention msize=0 => msize = (1<<15) | |
375 | */ | |
376 | #define NUM_SMALL_BLOCKS 16320 | |
377 | #define SHIFT_SMALL_CEIL_BLOCKS 14 // ceil(log2(NUM_SMALL_BLOCKs)) | |
378 | #define NUM_SMALL_CEIL_BLOCKS (1 << SHIFT_SMALL_CEIL_BLOCKS) | |
379 | #define SMALL_BLOCKS_ALIGN (SHIFT_SMALL_CEIL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23 | |
380 | ||
1f2f436a A |
381 | #define SMALL_ENTROPY_BITS 13 |
382 | #define SMALL_ENTROPY_MASK ((1 << SMALL_ENTROPY_BITS) - 1) | |
383 | ||
384 | /* | |
385 | * Avoid having so much entropy that the end of a valid small allocation | |
386 | * might overrun the end of the small region. | |
387 | */ | |
388 | #if SMALL_ENTROPY_MASK + NUM_SMALL_SLOTS > NUM_SMALL_BLOCKS | |
389 | #error Too many entropy bits for small region requested | |
390 | #endif | |
391 | ||
34e8f829 A |
392 | #define SMALL_METADATA_SIZE (sizeof(region_trailer_t) + NUM_SMALL_BLOCKS * sizeof(msize_t)) |
393 | #define SMALL_REGION_SIZE \ | |
394 | ((NUM_SMALL_BLOCKS * SMALL_QUANTUM + SMALL_METADATA_SIZE + vm_page_size - 1) & ~ (vm_page_size - 1)) | |
395 | ||
396 | #define SMALL_METADATA_START (NUM_SMALL_BLOCKS * SMALL_QUANTUM) | |
397 | ||
398 | /* | |
399 | * Beginning and end pointers for a region's heap. | |
400 | */ | |
401 | #define SMALL_REGION_ADDRESS(region) ((unsigned char *)region) | |
402 | #define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region) + (NUM_SMALL_BLOCKS * SMALL_QUANTUM)) | |
403 | ||
404 | /* | |
405 | * Locate the heap base for a pointer known to be within a small region. | |
406 | */ | |
407 | #define SMALL_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << SMALL_BLOCKS_ALIGN) - 1))) | |
408 | ||
409 | /* | |
410 | * Convert between byte and msize units. | |
411 | */ | |
412 | #define SMALL_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_SMALL_QUANTUM) | |
413 | #define SMALL_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_SMALL_QUANTUM) | |
414 | ||
415 | #define SMALL_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1] | |
416 | ||
417 | /* | |
418 | * Layout of a small region | |
419 | */ | |
420 | typedef uint32_t small_block_t[SMALL_QUANTUM/sizeof(uint32_t)]; | |
421 | ||
422 | typedef struct small_region | |
423 | { | |
424 | small_block_t blocks[NUM_SMALL_BLOCKS]; | |
425 | ||
426 | region_trailer_t trailer; | |
427 | ||
428 | msize_t small_meta_words[NUM_SMALL_BLOCKS]; | |
429 | ||
430 | uint8_t pad[SMALL_REGION_SIZE - (NUM_SMALL_BLOCKS * sizeof(small_block_t)) - SMALL_METADATA_SIZE]; | |
431 | } *small_region_t; | |
432 | ||
433 | /* | |
434 | * Per-region meta data for small allocator | |
435 | */ | |
436 | #define REGION_TRAILER_FOR_SMALL_REGION(r) (&(((small_region_t)(r))->trailer)) | |
437 | #define MAGAZINE_INDEX_FOR_SMALL_REGION(r) (REGION_TRAILER_FOR_SMALL_REGION(r)->mag_index) | |
438 | #define BYTES_USED_FOR_SMALL_REGION(r) (REGION_TRAILER_FOR_SMALL_REGION(r)->bytes_used) | |
439 | ||
440 | /* | |
441 | * Locate the metadata base for a pointer known to be within a small region. | |
442 | */ | |
443 | #define SMALL_META_HEADER_FOR_PTR(_p) (((small_region_t)SMALL_REGION_FOR_PTR(_p))->small_meta_words) | |
444 | ||
445 | /* | |
446 | * Compute the metadata index for a pointer known to be within a small region. | |
447 | */ | |
448 | #define SMALL_META_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_SMALL_QUANTUM) & (NUM_SMALL_CEIL_BLOCKS - 1)) | |
449 | ||
450 | /* | |
451 | * Find the metadata word for a pointer known to be within a small region. | |
452 | */ | |
453 | #define SMALL_METADATA_FOR_PTR(_p) (SMALL_META_HEADER_FOR_PTR(_p) + SMALL_META_INDEX_FOR_PTR(_p)) | |
454 | ||
455 | /* | |
456 | * Determine whether a pointer known to be within a small region points to memory which is free. | |
457 | */ | |
458 | #define SMALL_PTR_IS_FREE(_p) (*SMALL_METADATA_FOR_PTR(_p) & SMALL_IS_FREE) | |
459 | ||
460 | /* | |
461 | * Extract the msize value for a pointer known to be within a small region. | |
462 | */ | |
463 | #define SMALL_PTR_SIZE(_p) (*SMALL_METADATA_FOR_PTR(_p) & ~SMALL_IS_FREE) | |
464 | ||
34e8f829 A |
465 | #define SMALL_CACHE 1 |
466 | #if !SMALL_CACHE | |
467 | #warning SMALL_CACHE turned off | |
468 | #endif | |
469 | ||
470 | #define SMALL_REGION_PAYLOAD_BYTES (NUM_SMALL_BLOCKS * SMALL_QUANTUM) | |
471 | ||
472 | /************************* DEFINITIONS for large ****************************/ | |
473 | ||
474 | #define LARGE_THRESHOLD (15 * 1024) // strictly above this use "large" | |
475 | #define LARGE_THRESHOLD_LARGEMEM (127 * 1024) | |
476 | ||
477 | #if (LARGE_THRESHOLD > NUM_SMALL_SLOTS * SMALL_QUANTUM) | |
478 | #error LARGE_THRESHOLD should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM | |
479 | #endif | |
480 | ||
481 | #if (LARGE_THRESHOLD_LARGEMEM > NUM_SMALL_SLOTS_LARGEMEM * SMALL_QUANTUM) | |
482 | #error LARGE_THRESHOLD_LARGEMEM should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM | |
483 | #endif | |
484 | ||
485 | /* | |
486 | * When all memory is touched after a copy, vm_copy() is always a lose | |
487 | * But if the memory is only read, vm_copy() wins over memmove() at 3 or 4 pages | |
488 | * (on a G3/300MHz) | |
489 | * | |
490 | * This must be larger than LARGE_THRESHOLD | |
491 | */ | |
492 | #define VM_COPY_THRESHOLD (40 * 1024) | |
493 | #define VM_COPY_THRESHOLD_LARGEMEM (128 * 1024) | |
494 | ||
495 | typedef struct { | |
496 | vm_address_t address; | |
497 | vm_size_t size; | |
498 | boolean_t did_madvise_reusable; | |
499 | } large_entry_t; | |
500 | ||
511daa4c | 501 | #if !TARGET_OS_EMBEDDED |
34e8f829 | 502 | #define LARGE_CACHE 1 |
511daa4c A |
503 | #else |
504 | #define LARGE_CACHE 0 | |
505 | #endif | |
34e8f829 A |
506 | #if !LARGE_CACHE |
507 | #warning LARGE_CACHE turned off | |
508 | #endif | |
509 | #if defined(__LP64__) | |
510 | #define LARGE_ENTRY_CACHE_SIZE 16 | |
511 | #define LARGE_CACHE_SIZE_LIMIT ((vm_size_t)0x80000000) /* 2Gb */ | |
512 | #else | |
513 | #define LARGE_ENTRY_CACHE_SIZE 8 | |
514 | #define LARGE_CACHE_SIZE_LIMIT ((vm_size_t)0x02000000) /* 32Mb */ | |
515 | #endif | |
516 | #define LARGE_CACHE_SIZE_ENTRY_LIMIT (LARGE_CACHE_SIZE_LIMIT/LARGE_ENTRY_CACHE_SIZE) | |
517 | ||
1f2f436a A |
518 | #define SZONE_FLOTSAM_THRESHOLD_LOW (1024 * 512) |
519 | #define SZONE_FLOTSAM_THRESHOLD_HIGH (1024 * 1024) | |
520 | ||
34e8f829 A |
521 | /******************************************************************************* |
522 | * Definitions for region hash | |
523 | ******************************************************************************/ | |
524 | ||
525 | typedef void * region_t; | |
526 | typedef region_t * rgnhdl_t; /* A pointer into hashed_regions array. */ | |
527 | ||
528 | #define INITIAL_NUM_REGIONS_SHIFT 6 // log2(INITIAL_NUM_REGIONS) | |
529 | #define INITIAL_NUM_REGIONS (1 << INITIAL_NUM_REGIONS_SHIFT) // Must be a power of 2! | |
530 | #define HASHRING_OPEN_ENTRY ((region_t) 0) // Initial value and sentinel marking end of collision chain | |
531 | #define HASHRING_REGION_DEALLOCATED ((region_t)-1) // Region at this slot reclaimed by OS | |
532 | #define HASH_BLOCKS_ALIGN TINY_BLOCKS_ALIGN // MIN( TINY_BLOCKS_ALIGN, SMALL_BLOCKS_ALIGN, ... ) | |
533 | ||
534 | typedef struct region_hash_generation { | |
535 | size_t num_regions_allocated; | |
536 | size_t num_regions_allocated_shift; // log2(num_regions_allocated) | |
537 | region_t *hashed_regions; // hashed by location | |
538 | struct region_hash_generation *nextgen; | |
539 | } region_hash_generation_t; | |
540 | ||
541 | /******************************************************************************* | |
542 | * Per-processor magazine for tiny and small allocators | |
543 | ******************************************************************************/ | |
544 | ||
545 | typedef struct { // vm_allocate()'d, so the array of magazines is page-aligned to begin with. | |
546 | // Take magazine_lock first, Depot lock when needed for recirc, then szone->{tiny,small}_regions_lock when needed for alloc | |
547 | pthread_lock_t magazine_lock CACHE_ALIGN; | |
1f2f436a A |
548 | // Protection for the crtical section that does allocate_pages outside the magazine_lock |
549 | volatile boolean_t alloc_underway; | |
34e8f829 A |
550 | |
551 | // One element deep "death row", optimizes malloc/free/malloc for identical size. | |
552 | void *mag_last_free; // low SHIFT_{TINY,SMALL}_QUANTUM bits indicate the msize | |
553 | region_t mag_last_free_rgn; // holds the region for mag_last_free | |
554 | ||
555 | free_list_t *mag_free_list[256]; // assert( 256 >= MAX( NUM_TINY_SLOTS, NUM_SMALL_SLOTS_LARGEMEM )) | |
556 | unsigned mag_bitmap[8]; // assert( sizeof(mag_bitmap) << 3 >= sizeof(mag_free_list)/sizeof(free_list_t) ) | |
557 | ||
1f2f436a | 558 | // the first and last free region in the last block are treated as big blocks in use that are not accounted for |
34e8f829 | 559 | size_t mag_bytes_free_at_end; |
1f2f436a A |
560 | size_t mag_bytes_free_at_start; |
561 | region_t mag_last_region; // Valid iff mag_bytes_free_at_end || mag_bytes_free_at_start > 0 | |
34e8f829 A |
562 | |
563 | // bean counting ... | |
564 | unsigned mag_num_objects; | |
565 | size_t mag_num_bytes_in_objects; | |
566 | size_t num_bytes_in_magazine; | |
567 | ||
568 | // recirculation list -- invariant: all regions owned by this magazine that meet the emptiness criteria | |
569 | // are located nearer to the head of the list than any region that doesn't satisfy that criteria. | |
570 | // Doubly linked list for efficient extraction. | |
571 | unsigned recirculation_entries; | |
572 | region_trailer_t *firstNode; | |
573 | region_trailer_t *lastNode; | |
574 | ||
575 | #if __LP64__ | |
1f2f436a | 576 | uint64_t pad[48]; // So sizeof(magazine_t) is 2560 bytes. FIXME: assert this at compile time |
34e8f829 | 577 | #else |
1f2f436a | 578 | uint32_t pad[12]; // So sizeof(magazine_t) is 1280 bytes. FIXME: assert this at compile time |
34e8f829 A |
579 | #endif |
580 | } magazine_t; | |
581 | ||
7ba935f9 | 582 | #define TINY_MAX_MAGAZINES 32 /* MUST BE A POWER OF 2! */ |
34e8f829 A |
583 | #define TINY_MAGAZINE_PAGED_SIZE \ |
584 | (((sizeof(magazine_t) * (TINY_MAX_MAGAZINES + 1)) + vm_page_size - 1) &\ | |
585 | ~ (vm_page_size - 1)) /* + 1 for the Depot */ | |
586 | ||
7ba935f9 | 587 | #define SMALL_MAX_MAGAZINES 32 /* MUST BE A POWER OF 2! */ |
34e8f829 A |
588 | #define SMALL_MAGAZINE_PAGED_SIZE \ |
589 | (((sizeof(magazine_t) * (SMALL_MAX_MAGAZINES + 1)) + vm_page_size - 1) &\ | |
590 | ~ (vm_page_size - 1)) /* + 1 for the Depot */ | |
591 | ||
592 | #define DEPOT_MAGAZINE_INDEX -1 | |
593 | ||
594 | /****************************** zone itself ***********************************/ | |
595 | ||
596 | /* | |
597 | * Note that objects whose adddress are held in pointers here must be pursued | |
598 | * individually in the {tiny,small}_in_use_enumeration() routines. See for | |
599 | * example the treatment of region_hash_generation and tiny_magazines below. | |
600 | */ | |
601 | ||
602 | typedef struct szone_s { // vm_allocate()'d, so page-aligned to begin with. | |
1f2f436a A |
603 | malloc_zone_t basic_zone; // first page will be given read-only protection |
604 | uint8_t pad[vm_page_size - sizeof(malloc_zone_t)]; | |
605 | ||
606 | pthread_key_t cpu_id_key; // remainder of structure is R/W (contains no function pointers) | |
34e8f829 A |
607 | unsigned debug_flags; |
608 | void *log_address; | |
609 | ||
610 | /* Regions for tiny objects */ | |
611 | pthread_lock_t tiny_regions_lock CACHE_ALIGN; | |
612 | size_t num_tiny_regions; | |
613 | size_t num_tiny_regions_dealloc; | |
614 | region_hash_generation_t *tiny_region_generation; | |
615 | region_hash_generation_t trg[2]; | |
616 | ||
617 | int num_tiny_magazines; | |
618 | unsigned num_tiny_magazines_mask; | |
619 | int num_tiny_magazines_mask_shift; | |
620 | magazine_t *tiny_magazines; // array of per-processor magazines | |
621 | ||
511daa4c A |
622 | #if TARGET_OS_EMBEDDED |
623 | uintptr_t last_tiny_advise; | |
624 | #endif | |
625 | ||
34e8f829 A |
626 | /* Regions for small objects */ |
627 | pthread_lock_t small_regions_lock CACHE_ALIGN; | |
628 | size_t num_small_regions; | |
629 | size_t num_small_regions_dealloc; | |
630 | region_hash_generation_t *small_region_generation; | |
631 | region_hash_generation_t srg[2]; | |
632 | ||
633 | unsigned num_small_slots; // determined by physmem size | |
634 | ||
635 | int num_small_magazines; | |
636 | unsigned num_small_magazines_mask; | |
637 | int num_small_magazines_mask_shift; | |
638 | magazine_t *small_magazines; // array of per-processor magazines | |
639 | ||
511daa4c A |
640 | #if TARGET_OS_EMBEDDED |
641 | uintptr_t last_small_advise; | |
642 | #endif | |
643 | ||
34e8f829 A |
644 | /* large objects: all the rest */ |
645 | pthread_lock_t large_szone_lock CACHE_ALIGN; // One customer at a time for large | |
646 | unsigned num_large_objects_in_use; | |
647 | unsigned num_large_entries; | |
648 | large_entry_t *large_entries; // hashed by location; null entries don't count | |
649 | size_t num_bytes_in_large_objects; | |
650 | ||
651 | #if LARGE_CACHE | |
652 | int large_entry_cache_oldest; | |
653 | int large_entry_cache_newest; | |
654 | large_entry_t large_entry_cache[LARGE_ENTRY_CACHE_SIZE]; // "death row" for large malloc/free | |
655 | boolean_t large_legacy_reset_mprotect; | |
1f2f436a A |
656 | size_t large_entry_cache_reserve_bytes; |
657 | size_t large_entry_cache_reserve_limit; | |
658 | size_t large_entry_cache_bytes; // total size of death row, bytes | |
34e8f829 A |
659 | #endif |
660 | ||
661 | /* flag and limits pertaining to altered malloc behavior for systems with | |
662 | large amounts of physical memory */ | |
663 | unsigned is_largemem; | |
664 | unsigned large_threshold; | |
665 | unsigned vm_copy_threshold; | |
666 | ||
667 | /* security cookie */ | |
668 | uintptr_t cookie; | |
669 | ||
670 | /* Initial region list */ | |
671 | region_t initial_tiny_regions[INITIAL_NUM_REGIONS]; | |
672 | region_t initial_small_regions[INITIAL_NUM_REGIONS]; | |
673 | ||
674 | /* The purgeable zone constructed by create_purgeable_zone() would like to hand off tiny and small | |
675 | * allocations to the default scalable zone. Record the latter as the "helper" zone here. */ | |
676 | struct szone_s *helper_zone; | |
1f2f436a A |
677 | |
678 | boolean_t flotsam_enabled; | |
34e8f829 A |
679 | } szone_t; |
680 | ||
681 | #define SZONE_PAGED_SIZE ((sizeof(szone_t) + vm_page_size - 1) & ~ (vm_page_size - 1)) | |
682 | ||
683 | #if DEBUG_MALLOC || DEBUG_CLIENT | |
684 | static void szone_sleep(void); | |
685 | #endif | |
686 | __private_extern__ void malloc_error_break(void); | |
687 | ||
688 | // msg prints after fmt, ... | |
689 | static NOINLINE void szone_error(szone_t *szone, int is_corruption, const char *msg, const void *ptr, const char *fmt, ...) | |
690 | __printflike(5, 6); | |
691 | ||
692 | static void protect(void *address, size_t size, unsigned protection, unsigned debug_flags); | |
693 | static void *allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, | |
694 | int vm_page_label); | |
1f2f436a A |
695 | static void *allocate_pages_securely(szone_t *szone, size_t size, unsigned char align, |
696 | int vm_page_label); | |
34e8f829 | 697 | static void deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags); |
511daa4c A |
698 | #if TARGET_OS_EMBEDDED |
699 | static int madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last); | |
700 | #else | |
34e8f829 | 701 | static int madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi); |
511daa4c | 702 | #endif |
34e8f829 A |
703 | static kern_return_t _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr); |
704 | ||
705 | static INLINE mag_index_t mag_get_thread_index(szone_t *szone) ALWAYSINLINE; | |
706 | static magazine_t *mag_lock_zine_for_region_trailer(szone_t *szone, magazine_t *magazines, region_trailer_t *trailer, | |
707 | mag_index_t mag_index); | |
708 | ||
709 | static INLINE rgnhdl_t hash_lookup_region_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r) | |
710 | ALWAYSINLINE; | |
711 | static void hash_region_insert_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r); | |
712 | static region_t *hash_regions_alloc_no_lock(szone_t *szone, size_t num_entries); | |
713 | static region_t *hash_regions_grow_no_lock(szone_t *szone, region_t *regions, size_t old_size, | |
714 | size_t *mutable_shift, size_t *new_size); | |
715 | ||
716 | static INLINE uintptr_t free_list_gen_checksum(uintptr_t ptr) ALWAYSINLINE; | |
717 | static INLINE uintptr_t free_list_checksum_ptr(szone_t *szone, void *p) ALWAYSINLINE; | |
718 | static INLINE void *free_list_unchecksum_ptr(szone_t *szone, ptr_union *ptr) ALWAYSINLINE; | |
719 | static unsigned free_list_count(szone_t *szone, free_list_t *ptr); | |
720 | ||
721 | static INLINE void recirc_list_extract(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) ALWAYSINLINE; | |
722 | static INLINE void recirc_list_splice_last(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) ALWAYSINLINE; | |
723 | static INLINE void recirc_list_splice_first(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) ALWAYSINLINE; | |
724 | ||
725 | static INLINE void BITARRAY_SET(uint32_t *bits, msize_t index) ALWAYSINLINE; | |
726 | static INLINE void BITARRAY_CLR(uint32_t *bits, msize_t index) ALWAYSINLINE; | |
727 | static INLINE boolean_t BITARRAY_BIT(uint32_t *bits, msize_t index) ALWAYSINLINE; | |
728 | ||
729 | static msize_t get_tiny_free_size(const void *ptr); | |
730 | static msize_t get_tiny_previous_free_msize(const void *ptr); | |
731 | static INLINE msize_t get_tiny_meta_header(const void *ptr, boolean_t *is_free) ALWAYSINLINE; | |
732 | static INLINE void set_tiny_meta_header_in_use(const void *ptr, msize_t msize) ALWAYSINLINE; | |
733 | static INLINE void set_tiny_meta_header_in_use_1(const void *ptr) ALWAYSINLINE; | |
734 | static INLINE void set_tiny_meta_header_middle(const void *ptr) ALWAYSINLINE; | |
735 | static INLINE void set_tiny_meta_header_free(const void *ptr, msize_t msize) ALWAYSINLINE; | |
736 | static INLINE boolean_t tiny_meta_header_is_free(const void *ptr) ALWAYSINLINE; | |
737 | static INLINE void *tiny_previous_preceding_free(void *ptr, msize_t *prev_msize) ALWAYSINLINE; | |
738 | ||
739 | static void tiny_free_list_add_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize); | |
740 | static void tiny_free_list_remove_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize); | |
741 | static INLINE region_t tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr) ALWAYSINLINE; | |
742 | ||
743 | static void tiny_finalize_region(szone_t *szone, magazine_t *tiny_mag_ptr); | |
744 | static int tiny_free_detach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r); | |
745 | static size_t tiny_free_reattach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r); | |
746 | static void tiny_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r); | |
1f2f436a A |
747 | static region_t tiny_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node); |
748 | static boolean_t tiny_free_do_recirc_to_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index); | |
749 | static region_t tiny_find_msize_region(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize); | |
7ba935f9 | 750 | static boolean_t tiny_get_region_from_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize); |
34e8f829 | 751 | |
1f2f436a | 752 | static INLINE boolean_t tiny_free_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, region_t region, |
34e8f829 A |
753 | void *ptr, msize_t msize) ALWAYSINLINE; |
754 | static void *tiny_malloc_from_region_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, | |
1f2f436a | 755 | msize_t msize, void *fresh_region); |
34e8f829 A |
756 | static boolean_t tiny_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size); |
757 | static boolean_t tiny_check_region(szone_t *szone, region_t region); | |
758 | static kern_return_t tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, | |
759 | memory_reader_t reader, vm_range_recorder_t recorder); | |
760 | static void *tiny_malloc_from_free_list(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, | |
761 | msize_t msize); | |
762 | static INLINE void *tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) ALWAYSINLINE; | |
763 | static INLINE void free_tiny(szone_t *szone, void *ptr, region_t tiny_region, size_t known_size) ALWAYSINLINE; | |
764 | static void print_tiny_free_list(szone_t *szone); | |
1f2f436a | 765 | static void print_tiny_region(boolean_t verbose, region_t region, size_t bytes_at_start, size_t bytes_at_end); |
34e8f829 A |
766 | static boolean_t tiny_free_list_check(szone_t *szone, grain_t slot); |
767 | ||
768 | static INLINE void small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize) ALWAYSINLINE; | |
769 | static INLINE void small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize) ALWAYSINLINE; | |
770 | static INLINE void small_meta_header_set_middle(msize_t *meta_headers, msize_t index) ALWAYSINLINE; | |
771 | static void small_free_list_add_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize); | |
772 | static void small_free_list_remove_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize); | |
773 | static INLINE region_t small_region_for_ptr_no_lock(szone_t *szone, const void *ptr) ALWAYSINLINE; | |
774 | ||
775 | static void small_finalize_region(szone_t *szone, magazine_t *small_mag_ptr); | |
776 | static int small_free_detach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r); | |
777 | static size_t small_free_reattach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r); | |
1f2f436a A |
778 | static void small_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r); |
779 | static region_t small_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node); | |
780 | static boolean_t small_free_do_recirc_to_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index); | |
781 | static region_t small_find_msize_region(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize); | |
7ba935f9 | 782 | static boolean_t small_get_region_from_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize); |
1f2f436a | 783 | static INLINE boolean_t small_free_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, region_t region, |
34e8f829 A |
784 | void *ptr, msize_t msize) ALWAYSINLINE; |
785 | static void *small_malloc_from_region_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, | |
1f2f436a | 786 | msize_t msize, void *fresh_region); |
34e8f829 A |
787 | static boolean_t small_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size); |
788 | static boolean_t small_check_region(szone_t *szone, region_t region); | |
789 | static kern_return_t small_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, | |
790 | memory_reader_t reader, vm_range_recorder_t recorder); | |
791 | static void *small_malloc_from_free_list(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, | |
792 | msize_t msize); | |
793 | static INLINE void *small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) ALWAYSINLINE; | |
794 | static INLINE void free_small(szone_t *szone, void *ptr, region_t small_region, size_t known_size) ALWAYSINLINE; | |
795 | static void print_small_free_list(szone_t *szone); | |
1f2f436a | 796 | static void print_small_region(szone_t *szone, boolean_t verbose, region_t region, size_t bytes_at_start, size_t bytes_at_end); |
34e8f829 A |
797 | static boolean_t small_free_list_check(szone_t *szone, grain_t grain); |
798 | ||
799 | #if DEBUG_MALLOC | |
800 | static void large_debug_print(szone_t *szone); | |
801 | #endif | |
802 | static large_entry_t *large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr); | |
803 | static void large_entry_insert_no_lock(szone_t *szone, large_entry_t range); | |
804 | static INLINE void large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry) ALWAYSINLINE; | |
805 | static INLINE large_entry_t *large_entries_alloc_no_lock(szone_t *szone, unsigned num) ALWAYSINLINE; | |
806 | static void large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, | |
807 | vm_range_t *range_to_deallocate); | |
808 | static large_entry_t *large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate); | |
809 | static vm_range_t large_entry_free_no_lock(szone_t *szone, large_entry_t *entry); | |
810 | static NOINLINE kern_return_t large_in_use_enumerator(task_t task, void *context, | |
811 | unsigned type_mask, vm_address_t large_entries_address, | |
1f2f436a A |
812 | unsigned num_entries, memory_reader_t reader, |
813 | vm_range_recorder_t recorder); | |
34e8f829 A |
814 | static void *large_malloc(szone_t *szone, size_t num_pages, unsigned char alignment, boolean_t cleared_requested); |
815 | static NOINLINE void free_large(szone_t *szone, void *ptr); | |
816 | static INLINE int large_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) ALWAYSINLINE; | |
817 | ||
818 | /* | |
819 | * Mark these NOINLINE to avoid bloating the purgeable zone call backs | |
820 | */ | |
821 | static NOINLINE void szone_free(szone_t *szone, void *ptr); | |
822 | static NOINLINE void *szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested); | |
823 | static NOINLINE void *szone_malloc(szone_t *szone, size_t size); | |
824 | static NOINLINE void *szone_calloc(szone_t *szone, size_t num_items, size_t size); | |
825 | static NOINLINE void *szone_valloc(szone_t *szone, size_t size); | |
826 | static NOINLINE size_t szone_size_try_large(szone_t *szone, const void *ptr); | |
827 | static NOINLINE size_t szone_size(szone_t *szone, const void *ptr); | |
828 | static NOINLINE void *szone_realloc(szone_t *szone, void *ptr, size_t new_size); | |
829 | static NOINLINE void *szone_memalign(szone_t *szone, size_t alignment, size_t size); | |
830 | static NOINLINE void szone_free_definite_size(szone_t *szone, void *ptr, size_t size); | |
831 | static NOINLINE unsigned szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count); | |
832 | static NOINLINE void szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count); | |
833 | static void szone_destroy(szone_t *szone); | |
834 | static NOINLINE size_t szone_good_size(szone_t *szone, size_t size); | |
835 | ||
836 | static NOINLINE boolean_t szone_check_all(szone_t *szone, const char *function); | |
837 | static boolean_t szone_check(szone_t *szone); | |
838 | static kern_return_t szone_ptr_in_use_enumerator(task_t task, void *context, | |
839 | unsigned type_mask, vm_address_t zone_address, | |
840 | memory_reader_t reader, vm_range_recorder_t recorder); | |
841 | static NOINLINE void szone_print(szone_t *szone, boolean_t verbose); | |
842 | static void szone_log(malloc_zone_t *zone, void *log_address); | |
843 | static void szone_force_lock(szone_t *szone); | |
844 | static void szone_force_unlock(szone_t *szone); | |
845 | static boolean_t szone_locked(szone_t *szone); | |
846 | ||
847 | static void szone_statistics(szone_t *szone, malloc_statistics_t *stats); | |
848 | ||
849 | static void purgeable_free(szone_t *szone, void *ptr); | |
850 | static void *purgeable_malloc(szone_t *szone, size_t size); | |
851 | static void *purgeable_calloc(szone_t *szone, size_t num_items, size_t size); | |
852 | static void *purgeable_valloc(szone_t *szone, size_t size); | |
853 | static size_t purgeable_size(szone_t *szone, const void *ptr); | |
854 | static void *purgeable_realloc(szone_t *szone, void *ptr, size_t new_size); | |
855 | static void *purgeable_memalign(szone_t *szone, size_t alignment, size_t size); | |
856 | static void purgeable_free_definite_size(szone_t *szone, void *ptr, size_t size); | |
857 | static unsigned purgeable_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count); | |
858 | static void purgeable_batch_free(szone_t *szone, void **to_be_freed, unsigned count); | |
859 | static void purgeable_destroy(szone_t *szone); | |
860 | static size_t purgeable_good_size(szone_t *szone, size_t size); | |
861 | ||
862 | static boolean_t purgeable_check(szone_t *szone); | |
863 | static kern_return_t purgeable_ptr_in_use_enumerator(task_t task, void *context, | |
864 | unsigned type_mask, vm_address_t zone_address, | |
865 | memory_reader_t reader, vm_range_recorder_t recorder); | |
866 | static void purgeable_print(szone_t *szone, boolean_t verbose); | |
867 | static void purgeable_log(malloc_zone_t *zone, void *log_address); | |
868 | static void purgeable_force_lock(szone_t *szone); | |
869 | static void purgeable_force_unlock(szone_t *szone); | |
870 | static boolean_t purgeable_locked(szone_t *szone); | |
871 | ||
872 | static void purgeable_statistics(szone_t *szone, malloc_statistics_t *stats); | |
873 | ||
874 | static void *frozen_malloc(szone_t *zone, size_t new_size); | |
875 | static void *frozen_calloc(szone_t *zone, size_t num_items, size_t size); | |
876 | static void *frozen_valloc(szone_t *zone, size_t new_size); | |
877 | static void *frozen_realloc(szone_t *zone, void *ptr, size_t new_size); | |
878 | static void frozen_free(szone_t *zone, void *ptr); | |
879 | static void frozen_destroy(szone_t *zone); | |
880 | ||
1f2f436a A |
881 | static volatile uintptr_t entropic_address = 0; |
882 | static volatile uintptr_t entropic_limit = 0; | |
883 | #define ENTROPIC_KABILLION 0x10000000 /* 256Mb */ | |
884 | ||
885 | __private_extern__ uint64_t malloc_entropy[2]; | |
886 | ||
34e8f829 A |
887 | #define SZONE_LOCK(szone) \ |
888 | do { \ | |
889 | LOCK(szone->large_szone_lock); \ | |
890 | } while (0) | |
891 | ||
892 | #define SZONE_UNLOCK(szone) \ | |
893 | do { \ | |
894 | UNLOCK(szone->large_szone_lock); \ | |
895 | } while (0) | |
896 | ||
897 | #define SZONE_TRY_LOCK(szone) \ | |
898 | TRY_LOCK(szone->large_szone_lock); | |
899 | ||
900 | #define SZONE_MAGAZINE_PTR_LOCK(szone, mag_ptr) \ | |
901 | do { \ | |
902 | LOCK(mag_ptr->magazine_lock); \ | |
903 | } while(0) | |
904 | ||
905 | #define SZONE_MAGAZINE_PTR_UNLOCK(szone, mag_ptr) \ | |
906 | do { \ | |
907 | UNLOCK(mag_ptr->magazine_lock); \ | |
908 | } while(0) | |
909 | ||
910 | #define SZONE_MAGAZINE_PTR_TRY_LOCK(szone, mag_ptr) \ | |
911 | TRY_LOCK(mag_ptr->magazine_lock); | |
912 | ||
913 | #if DEBUG_MALLOC | |
914 | # define LOG(szone,ptr) \ | |
915 | (szone->log_address && (((uintptr_t)szone->log_address == -1) || \ | |
916 | (szone->log_address == (void *)(ptr)))) | |
917 | #else | |
918 | # define LOG(szone,ptr) 0 | |
919 | #endif | |
920 | ||
921 | #if DEBUG_MALLOC || DEBUG_CLIENT | |
922 | # define CHECK(szone,fun) \ | |
923 | if ((szone)->debug_flags & CHECK_REGIONS) \ | |
924 | szone_check_all(szone, fun) | |
925 | #else | |
926 | # define CHECK(szone,fun) \ | |
927 | do {} while (0) | |
928 | #endif | |
929 | ||
930 | /********************* VERY LOW LEVEL UTILITIES ************************/ | |
931 | ||
932 | #if DEBUG_MALLOC || DEBUG_CLIENT | |
933 | static void | |
934 | szone_sleep(void) | |
935 | { | |
936 | ||
937 | if (getenv("MallocErrorSleep")) { | |
938 | _malloc_printf(ASL_LEVEL_NOTICE, "*** sleeping to help debug\n"); | |
939 | sleep(3600); // to help debug | |
940 | } | |
941 | } | |
942 | #endif | |
943 | ||
34e8f829 A |
944 | // msg prints after fmt, ... |
945 | static NOINLINE void | |
946 | szone_error(szone_t *szone, int is_corruption, const char *msg, const void *ptr, const char *fmt, ...) | |
947 | { | |
948 | va_list ap; | |
949 | _SIMPLE_STRING b = _simple_salloc(); | |
950 | ||
951 | if (szone) SZONE_UNLOCK(szone); // FIXME: unlock magazine and region locks? | |
952 | if (b) { | |
953 | if (fmt) { | |
954 | va_start(ap, fmt); | |
955 | _simple_vsprintf(b, fmt, ap); | |
956 | va_end(ap); | |
957 | } | |
958 | if (ptr) { | |
959 | _simple_sprintf(b, "*** error for object %p: %s\n", ptr, msg); | |
960 | } else { | |
961 | _simple_sprintf(b, "*** error: %s\n", msg); | |
962 | } | |
963 | malloc_printf("%s*** set a breakpoint in malloc_error_break to debug\n", _simple_string(b)); | |
964 | } else { | |
965 | /* | |
966 | * Should only get here if vm_allocate() can't get a single page of | |
967 | * memory, implying _simple_asl_log() would also fail. So we just | |
968 | * print to the file descriptor. | |
969 | */ | |
970 | if (fmt) { | |
971 | va_start(ap, fmt); | |
972 | _malloc_vprintf(MALLOC_PRINTF_NOLOG, fmt, ap); | |
973 | va_end(ap); | |
974 | } | |
975 | if (ptr) { | |
976 | _malloc_printf(MALLOC_PRINTF_NOLOG, "*** error for object %p: %s\n", ptr, msg); | |
977 | } else { | |
978 | _malloc_printf(MALLOC_PRINTF_NOLOG, "*** error: %s\n", msg); | |
979 | } | |
980 | _malloc_printf(MALLOC_PRINTF_NOLOG, "*** set a breakpoint in malloc_error_break to debug\n"); | |
981 | } | |
982 | malloc_error_break(); | |
983 | #if DEBUG_MALLOC | |
984 | szone_print(szone, 1); | |
985 | szone_sleep(); | |
986 | #endif | |
987 | #if DEBUG_CLIENT | |
988 | szone_sleep(); | |
989 | #endif | |
990 | // Call abort() if this is a memory corruption error and the abort on | |
991 | // corruption flag is set, or if any error should abort. | |
992 | if ((is_corruption && (szone->debug_flags & SCALABLE_MALLOC_ABORT_ON_CORRUPTION)) || | |
993 | (szone->debug_flags & SCALABLE_MALLOC_ABORT_ON_ERROR)) { | |
1f2f436a | 994 | CRSetCrashLogMessage(b ? _simple_string(b) : msg); |
34e8f829 A |
995 | abort(); |
996 | } else if (b) { | |
997 | _simple_sfree(b); | |
998 | } | |
999 | } | |
1000 | ||
1001 | static void | |
1002 | protect(void *address, size_t size, unsigned protection, unsigned debug_flags) | |
1003 | { | |
1004 | kern_return_t err; | |
1005 | ||
1006 | if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_PRELUDE)) { | |
1f2f436a | 1007 | err = mprotect((void *)((uintptr_t)address - vm_page_size), vm_page_size, protection); |
34e8f829 A |
1008 | if (err) { |
1009 | malloc_printf("*** can't protect(%p) region for prelude guard page at %p\n", | |
1010 | protection,(uintptr_t)address - (1 << vm_page_shift)); | |
1011 | } | |
1012 | } | |
1013 | if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE)) { | |
1f2f436a | 1014 | err = mprotect((void *)((uintptr_t)address + size), vm_page_size, protection); |
34e8f829 A |
1015 | if (err) { |
1016 | malloc_printf("*** can't protect(%p) region for postlude guard page at %p\n", | |
1017 | protection, (uintptr_t)address + size); | |
1018 | } | |
1019 | } | |
1020 | } | |
1021 | ||
1022 | static void * | |
1023 | allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label) | |
1024 | { | |
1025 | // align specifies a desired alignment (as a log) or 0 if no alignment requested | |
1026 | void *vm_addr; | |
1027 | uintptr_t addr = 0, aligned_address; | |
1028 | boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES; | |
1029 | boolean_t purgeable = debug_flags & SCALABLE_MALLOC_PURGEABLE; | |
1030 | size_t allocation_size = round_page(size); | |
1031 | size_t delta; | |
1f2f436a | 1032 | int alloc_flags = VM_MAKE_TAG(vm_page_label); |
34e8f829 A |
1033 | |
1034 | if (align) add_guard_pages = 0; // too cumbersome to deal with that | |
1035 | if (!allocation_size) allocation_size = 1 << vm_page_shift; | |
1036 | if (add_guard_pages) allocation_size += 2 * (1 << vm_page_shift); | |
1037 | if (align) allocation_size += (size_t)1 << align; | |
1f2f436a | 1038 | if (purgeable) alloc_flags |= VM_FLAGS_PURGABLE; |
34e8f829 A |
1039 | |
1040 | if (allocation_size < size) // size_t arithmetic wrapped! | |
1041 | return NULL; | |
1042 | ||
1f2f436a A |
1043 | vm_addr = mmap(0 /* addr */, |
1044 | allocation_size /* size */, | |
1045 | PROT_READ | PROT_WRITE /* prot */, | |
1046 | MAP_ANON | MAP_PRIVATE /* flags */, | |
1047 | alloc_flags /* fd being used to pass "purgeable" and "vm_page_label" */, | |
1048 | 0 /* offset */); | |
34e8f829 A |
1049 | if ((uintptr_t)vm_addr == -1) { |
1050 | szone_error(szone, 0, "can't allocate region", NULL, "*** mmap(size=%lu) failed (error code=%d)\n", | |
1051 | allocation_size, errno); | |
1052 | return NULL; | |
1053 | } | |
1054 | addr = (uintptr_t)vm_addr; | |
1055 | ||
1056 | if (align) { | |
1057 | aligned_address = (addr + ((uintptr_t)1 << align) - 1) & ~ (((uintptr_t)1 << align) - 1); | |
1058 | if (aligned_address != addr) { | |
1059 | delta = aligned_address - addr; | |
1060 | if (munmap((void *)addr, delta) == -1) | |
1061 | malloc_printf("*** munmap unaligned header failed with %d\n", errno); | |
1062 | addr = aligned_address; | |
1063 | allocation_size -= delta; | |
1064 | } | |
1065 | if (allocation_size > size) { | |
1066 | if (munmap((void *)(addr + size), allocation_size - size) == -1) | |
1067 | malloc_printf("*** munmap unaligned footer failed with %d\n", errno); | |
1068 | } | |
1069 | } | |
1070 | if (add_guard_pages) { | |
1071 | addr += (uintptr_t)1 << vm_page_shift; | |
1f2f436a A |
1072 | protect((void *)addr, size, PROT_NONE, debug_flags); |
1073 | } | |
1074 | return (void *)addr; | |
1075 | } | |
1076 | ||
1077 | static void * | |
1078 | allocate_pages_securely(szone_t *szone, size_t size, unsigned char align, int vm_page_label) | |
1079 | { | |
1080 | // align specifies a desired alignment (as a log) or 0 if no alignment requested | |
1081 | void *vm_addr; | |
1082 | uintptr_t addr, aligned_address; | |
1083 | size_t delta, allocation_size = MAX(round_page(size), vm_page_size); | |
1084 | int alloc_flags = VM_MAKE_TAG(vm_page_label); | |
1085 | ||
1086 | if (szone->debug_flags & DISABLE_ASLR) | |
1087 | return allocate_pages(szone, size, align, 0, vm_page_label); | |
1088 | ||
1089 | if (align) | |
1090 | allocation_size += (size_t)1 << align; | |
1091 | ||
1092 | if (allocation_size < size) // size_t arithmetic wrapped! | |
1093 | return NULL; | |
1094 | ||
1095 | retry: | |
1096 | vm_addr = mmap((void *)entropic_address /* kernel finds next available range at or above this address */, | |
1097 | allocation_size /* size */, | |
1098 | PROT_READ | PROT_WRITE /* prot */, | |
1099 | MAP_ANON | MAP_PRIVATE /* flags */, | |
1100 | alloc_flags /* fd being used to pass "vm_page_label" */, | |
1101 | 0 /* offset */); | |
1102 | if (MAP_FAILED == vm_addr) { | |
1103 | szone_error(szone, 0, "can't allocate region securely", NULL, "*** mmap(size=%lu) failed (error code=%d)\n", | |
1104 | size, errno); | |
1105 | return NULL; | |
1106 | } | |
1107 | addr = (uintptr_t)vm_addr; | |
1108 | ||
1109 | // Don't allow allocation to rise above entropic_limit (for tidiness). | |
1110 | if (addr + allocation_size > entropic_limit) { // Exhausted current range? | |
1111 | uintptr_t t = entropic_address; | |
1112 | uintptr_t u = t - ENTROPIC_KABILLION; | |
1113 | ||
1114 | if (u < t) { // provided we don't wrap, unmap and retry, in the expanded entropic range | |
1115 | munmap((void *)addr, allocation_size); | |
1116 | (void)__sync_bool_compare_and_swap(&entropic_address, t, u); // Just one reduction please | |
1117 | goto retry; | |
1118 | } | |
1119 | // fall through to use what we got | |
1120 | } | |
1121 | ||
1122 | if (addr < entropic_address) { // mmap wrapped to find this allocation, expand the entropic range | |
1123 | uintptr_t t = entropic_address; | |
1124 | uintptr_t u = t - ENTROPIC_KABILLION; | |
1125 | if (u < t) | |
1126 | (void)__sync_bool_compare_and_swap(&entropic_address, t, u); // Just one reduction please | |
1127 | // fall through to use what we got | |
1128 | } | |
1129 | ||
1130 | // unmap any excess address range used for alignment padding | |
1131 | if (align) { | |
1132 | aligned_address = (addr + ((uintptr_t)1 << align) - 1) & ~ (((uintptr_t)1 << align) - 1); | |
1133 | if (aligned_address != addr) { | |
1134 | delta = aligned_address - addr; | |
1135 | if (munmap((void *)addr, delta) == -1) | |
1136 | malloc_printf("*** munmap unaligned header failed with %d\n", errno); | |
1137 | addr = aligned_address; | |
1138 | allocation_size -= delta; | |
1139 | } | |
1140 | if (allocation_size > size) { | |
1141 | if (munmap((void *)(addr + size), allocation_size - size) == -1) | |
1142 | malloc_printf("*** munmap unaligned footer failed with %d\n", errno); | |
1143 | } | |
34e8f829 A |
1144 | } |
1145 | return (void *)addr; | |
1146 | } | |
1147 | ||
1148 | static void | |
1149 | deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags) | |
1150 | { | |
1151 | int err; | |
1152 | boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES; | |
1153 | ||
1154 | if (add_guard_pages) { | |
1155 | addr = (void *)((uintptr_t)addr - (1 << vm_page_shift)); | |
1156 | size += 2 * (1 << vm_page_shift); | |
1157 | } | |
1158 | err = munmap(addr, size); | |
1159 | if ((err == -1) && szone) | |
1160 | szone_error(szone, 0, "Can't deallocate_pages region", addr, NULL); | |
1161 | } | |
1162 | ||
1163 | static int | |
511daa4c A |
1164 | #if TARGET_OS_EMBEDDED |
1165 | madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last) | |
1166 | #else | |
34e8f829 | 1167 | madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi) |
511daa4c | 1168 | #endif |
34e8f829 A |
1169 | { |
1170 | if (pgHi > pgLo) { | |
1171 | size_t len = pgHi - pgLo; | |
1172 | ||
1173 | #if DEBUG_MALLOC | |
1174 | if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) | |
1175 | memset((void *)pgLo, 0xed, len); // Scribble on MADV_FREEd memory | |
1176 | #endif | |
511daa4c A |
1177 | |
1178 | #if TARGET_OS_EMBEDDED | |
1179 | if (last) { | |
1180 | if (*last == pgLo) | |
1181 | return 0; | |
1182 | ||
1183 | *last = pgLo; | |
1184 | } | |
1185 | #endif | |
1186 | ||
34e8f829 | 1187 | MAGMALLOC_MADVFREEREGION((void *)szone, (void *)r, (void *)pgLo, len); // DTrace USDT Probe |
511daa4c A |
1188 | #if TARGET_OS_EMBEDDED |
1189 | if (-1 == madvise((void *)pgLo, len, MADV_FREE)) { | |
1190 | #else | |
34e8f829 | 1191 | if (-1 == madvise((void *)pgLo, len, MADV_FREE_REUSABLE)) { |
511daa4c | 1192 | #endif |
34e8f829 | 1193 | /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */ |
7ba935f9 | 1194 | #if DEBUG_MADVISE |
1f2f436a A |
1195 | szone_error(szone, 0, "madvise_free_range madvise(..., MADV_FREE_REUSABLE) failed", |
1196 | (void *)pgLo, "length=%d\n", len); | |
34e8f829 A |
1197 | #endif |
1198 | } | |
1199 | } | |
1200 | return 0; | |
1201 | } | |
1202 | ||
1203 | static kern_return_t | |
1204 | _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) | |
1205 | { | |
1206 | *ptr = (void *)address; | |
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | // Multiplicative hash where the multiplier is a prime near (ULONG_MAX / phi). [phi = 1.618033...] | |
1211 | // pthread_t's are page aligned, (sometimes even in ascending sequence). These hash well. | |
1212 | // See Knuth TAOCP, Vol. 3. | |
1213 | #if __LP64__ | |
1214 | #define HASH_SELF() \ | |
1215 | ((((uintptr_t)pthread_self()) >> vm_page_shift) * 11400714819323198549ULL) >> (64 - szone->num_tiny_magazines_mask_shift) | |
1216 | #else | |
1217 | #define HASH_SELF() \ | |
1218 | ((((uintptr_t)pthread_self()) >> vm_page_shift) * 2654435761UL) >> (32 - szone->num_tiny_magazines_mask_shift) | |
1219 | #endif | |
1220 | ||
1f2f436a | 1221 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
34e8f829 A |
1222 | /* |
1223 | * These commpage routines provide fast access to the logical cpu number | |
1224 | * of the calling processor assuming no pre-emption occurs. | |
1225 | */ | |
34e8f829 A |
1226 | |
1227 | static INLINE mag_index_t | |
1228 | mag_get_thread_index(szone_t *szone) | |
1229 | { | |
1230 | if (!__is_threaded) | |
1231 | return 0; | |
1232 | else | |
1f2f436a | 1233 | return cpu_number() & (TINY_MAX_MAGAZINES - 1); |
51282358 A |
1234 | } |
1235 | ||
34e8f829 A |
1236 | #else |
1237 | #warning deriving magazine index from pthread_self() [want processor number] | |
1238 | ||
1239 | static INLINE mag_index_t | |
1240 | mag_get_thread_index(szone_t *szone) | |
1241 | { | |
1242 | if (!__is_threaded) | |
1243 | return 0; | |
1244 | else if ((pthread_key_t) -1 == szone->cpu_id_key) { // In case pthread_key_create() failed. | |
1245 | return HASH_SELF(); | |
1246 | } else { | |
1247 | mag_index_t idx = (mag_index_t)(intptr_t)pthread_getspecific(szone->cpu_id_key); | |
1248 | ||
1249 | // Has this thread been hinted with a non-zero value [i.e. 1 + cpuid()] ? | |
1250 | // If so, bump down the hint to a zero-based magazine index and return it. | |
1251 | if (idx) { | |
1252 | return idx - 1; | |
1253 | } else { | |
1254 | // No hint available. Contruct a magazine index for this thread ... | |
1255 | idx = HASH_SELF(); | |
1256 | ||
1257 | // bump up the hint to exclude zero and try to memorize it ... | |
1258 | pthread_setspecific(szone->cpu_id_key, (const void *)((uintptr_t)idx + 1)); | |
1259 | ||
1260 | // and return the (zero-based) magazine index. | |
1261 | return idx; | |
1262 | } | |
1263 | } | |
1264 | } | |
1265 | #endif | |
1266 | ||
1267 | static magazine_t * | |
1268 | mag_lock_zine_for_region_trailer(szone_t *szone, magazine_t *magazines, region_trailer_t *trailer, mag_index_t mag_index) | |
1269 | { | |
1270 | mag_index_t refreshed_index; | |
1271 | magazine_t *mag_ptr = &(magazines[mag_index]); | |
1272 | ||
1273 | // Take the lock on entry. | |
1274 | SZONE_MAGAZINE_PTR_LOCK(szone, mag_ptr); | |
1275 | ||
1276 | // Now in the time it took to acquire the lock, the region may have migrated | |
1277 | // from one magazine to another. In which case the magazine lock we obtained | |
1278 | // (namely magazines[mag_index].mag_lock) is stale. If so, keep on tryin' ... | |
1279 | while (mag_index != (refreshed_index = trailer->mag_index)) { // Note assignment | |
1280 | ||
1281 | SZONE_MAGAZINE_PTR_UNLOCK(szone, mag_ptr); | |
1282 | ||
1283 | mag_index = refreshed_index; | |
1284 | mag_ptr = &(magazines[mag_index]); | |
1285 | SZONE_MAGAZINE_PTR_LOCK(szone, mag_ptr); | |
1286 | } | |
1287 | ||
1288 | return mag_ptr; | |
1289 | } | |
1290 | ||
1291 | /******************************************************************************* | |
1292 | * Region hash implementation | |
1293 | * | |
1294 | * This is essentially a duplicate of the existing Large allocator hash, minus | |
1295 | * the ability to remove entries. The two should be combined eventually. | |
1296 | ******************************************************************************/ | |
1297 | #pragma mark region hash | |
1298 | ||
1299 | /* | |
1300 | * hash_lookup_region_no_lock - Scan a hash ring looking for an entry for a | |
1301 | * given region. | |
1302 | * | |
1303 | * FIXME: If consecutive queries of the same region are likely, a one-entry | |
1304 | * cache would likely be a significant performance win here. | |
1305 | */ | |
1306 | static INLINE rgnhdl_t | |
1307 | hash_lookup_region_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r) { | |
1308 | size_t index, hash_index; | |
1309 | rgnhdl_t entry; | |
1310 | ||
1311 | if (!num_entries) | |
1312 | return 0; | |
1313 | ||
1314 | // Multiplicative hash where the multiplier is a prime near (ULONG_MAX / phi). [phi = 1.618033...] | |
1315 | // Since the values of (((uintptr_t)r >> HASH_BLOCKS_ALIGN) are (roughly) an ascending sequence of integers, | |
1316 | // this hash works really well. See Knuth TAOCP, Vol. 3. | |
1317 | #if __LP64__ | |
1318 | index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 11400714819323198549ULL) >> (64 - shift); | |
1319 | #else | |
1320 | index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 2654435761UL) >> (32 - shift); | |
1321 | #endif | |
1322 | do { | |
1323 | entry = regions + index; | |
1324 | if (*entry == 0) | |
1325 | return 0; | |
1326 | if (*entry == r) | |
1327 | return entry; | |
1328 | if (++index == num_entries) | |
1329 | index = 0; | |
1330 | } while (index != hash_index); | |
1331 | return 0; | |
1332 | } | |
1333 | ||
1334 | /* | |
1335 | * hash_region_insert_no_lock - Insert a region into the hash ring. | |
1336 | */ | |
1337 | static void | |
1338 | hash_region_insert_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r) { | |
1339 | size_t index, hash_index; | |
1340 | rgnhdl_t entry; | |
1341 | ||
1342 | // Multiplicative hash where the multiplier is a prime near (ULONG_MAX / phi). [phi = 1.618033...] | |
1343 | // Since the values of (((uintptr_t)r >> HASH_BLOCKS_ALIGN) are (roughly) an ascending sequence of integers, | |
1344 | // this hash works really well. See Knuth TAOCP, Vol. 3. | |
1345 | #if __LP64__ | |
1346 | index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 11400714819323198549ULL) >> (64 - shift); | |
1347 | #else | |
1348 | index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 2654435761UL) >> (32 - shift); | |
1349 | #endif | |
1350 | do { | |
1351 | entry = regions + index; | |
1352 | if (*entry == HASHRING_OPEN_ENTRY || *entry == HASHRING_REGION_DEALLOCATED) { | |
1353 | *entry = r; | |
1354 | return; | |
1355 | } | |
1356 | if (++index == num_entries) | |
1357 | index = 0; | |
1358 | } while (index != hash_index); | |
1359 | } | |
1360 | ||
1361 | /* | |
1362 | * hash_regions_alloc_no_lock - Allocate space for a number of entries. This | |
1363 | * must be a VM allocation as to avoid recursing between allocating a new small | |
1364 | * region, and asking the small region to allocate space for the new list of | |
1365 | * regions. | |
1366 | */ | |
1367 | static region_t * | |
1368 | hash_regions_alloc_no_lock(szone_t *szone, size_t num_entries) | |
1369 | { | |
1370 | size_t size = num_entries * sizeof(region_t); | |
1371 | ||
1372 | return allocate_pages(szone, round_page(size), 0, 0, VM_MEMORY_MALLOC); | |
1373 | } | |
1374 | ||
1375 | /* | |
1376 | * hash_regions_grow_no_lock - Grow the hash ring, and rehash the entries. | |
1377 | * Return the new region and new size to update the szone. Do not deallocate | |
1378 | * the old entries since someone may still be allocating them. | |
1379 | */ | |
1380 | static region_t * | |
1381 | hash_regions_grow_no_lock(szone_t *szone, region_t *regions, size_t old_size, size_t *mutable_shift, | |
1382 | size_t *new_size) | |
1383 | { | |
1384 | // double in size and allocate memory for the regions | |
1385 | *new_size = old_size + old_size; | |
1386 | *mutable_shift = *mutable_shift + 1; | |
1387 | region_t *new_regions = hash_regions_alloc_no_lock(szone, *new_size); | |
1388 | ||
1389 | // rehash the entries into the new list | |
1390 | size_t index; | |
1391 | for (index = 0; index < old_size; ++index) { | |
1392 | region_t r = regions[index]; | |
1393 | if (r != HASHRING_OPEN_ENTRY && r != HASHRING_REGION_DEALLOCATED) | |
1394 | hash_region_insert_no_lock(new_regions, *new_size, *mutable_shift, r); | |
1395 | } | |
1396 | return new_regions; | |
1397 | } | |
1398 | ||
1399 | /********************* FREE LIST UTILITIES ************************/ | |
1400 | ||
1401 | // A free list entry is comprised of a pair of pointers, previous and next. | |
1402 | // These are used to implement a doubly-linked list, which permits efficient | |
1403 | // extraction. | |
1404 | // | |
1405 | // Because the free list entries are previously freed objects, a misbehaved | |
1406 | // program may write to a pointer after it has called free() on that pointer, | |
1407 | // either by dereference or buffer overflow from an adjacent pointer. This write | |
1408 | // would then corrupt the free list's previous and next pointers, leading to a | |
1409 | // crash. In order to detect this case, we take advantage of the fact that | |
1410 | // malloc'd pointers are known to be at least 16 byte aligned, and thus have | |
1411 | // at least 4 trailing zero bits. | |
1412 | // | |
1413 | // When an entry is added to the free list, a checksum of the previous and next | |
1f2f436a | 1414 | // pointers is calculated and written to the high four bits of the respective |
34e8f829 A |
1415 | // pointers. Upon detection of an invalid checksum, an error is logged and NULL |
1416 | // is returned. Since all code which un-checksums pointers checks for a NULL | |
1417 | // return, a potentially crashing or malicious dereference is avoided at the | |
1418 | // cost of leaking the corrupted block, and any subsequent blocks on the free | |
1419 | // list of that size. | |
1420 | ||
1421 | static NOINLINE void | |
1422 | free_list_checksum_botch(szone_t *szone, free_list_t *ptr) | |
1423 | { | |
1424 | szone_error(szone, 1, "incorrect checksum for freed object " | |
1425 | "- object was probably modified after being freed.", ptr, NULL); | |
1426 | } | |
1427 | ||
1428 | static INLINE uintptr_t free_list_gen_checksum(uintptr_t ptr) | |
1429 | { | |
1430 | uint8_t chk; | |
1431 | ||
1432 | chk = (unsigned char)(ptr >> 0); | |
1433 | chk += (unsigned char)(ptr >> 8); | |
1434 | chk += (unsigned char)(ptr >> 16); | |
1435 | chk += (unsigned char)(ptr >> 24); | |
1436 | #if __LP64__ | |
1437 | chk += (unsigned char)(ptr >> 32); | |
1438 | chk += (unsigned char)(ptr >> 40); | |
1439 | chk += (unsigned char)(ptr >> 48); | |
1440 | chk += (unsigned char)(ptr >> 56); | |
1441 | #endif | |
1442 | ||
1443 | return chk & (uintptr_t)0xF; | |
1444 | } | |
1445 | ||
1f2f436a A |
1446 | #define NYBBLE 4 |
1447 | #if __LP64__ | |
1448 | #define ANTI_NYBBLE (64 - NYBBLE) | |
1449 | #else | |
1450 | #define ANTI_NYBBLE (32 - NYBBLE) | |
1451 | #endif | |
1452 | ||
34e8f829 A |
1453 | static INLINE uintptr_t |
1454 | free_list_checksum_ptr(szone_t *szone, void *ptr) | |
1455 | { | |
1456 | uintptr_t p = (uintptr_t)ptr; | |
1f2f436a | 1457 | return (p >> NYBBLE) | (free_list_gen_checksum(p ^ szone->cookie) << ANTI_NYBBLE); // compiles to rotate instruction |
34e8f829 A |
1458 | } |
1459 | ||
1460 | static INLINE void * | |
1461 | free_list_unchecksum_ptr(szone_t *szone, ptr_union *ptr) | |
1462 | { | |
1463 | ptr_union p; | |
1f2f436a A |
1464 | uintptr_t t = ptr->u; |
1465 | ||
1466 | t = (t << NYBBLE) | (t >> ANTI_NYBBLE); // compiles to rotate instruction | |
1467 | p.u = t & ~(uintptr_t)0xF; | |
1468 | ||
1469 | if ((t & (uintptr_t)0xF) != free_list_gen_checksum(p.u ^ szone->cookie)) | |
34e8f829 A |
1470 | { |
1471 | free_list_checksum_botch(szone, (free_list_t *)ptr); | |
1472 | return NULL; | |
1473 | } | |
1474 | return p.p; | |
1475 | } | |
1476 | ||
1f2f436a A |
1477 | #undef ANTI_NYBBLE |
1478 | #undef NYBBLE | |
1479 | ||
34e8f829 A |
1480 | static unsigned |
1481 | free_list_count(szone_t *szone, free_list_t *ptr) | |
1482 | { | |
1483 | unsigned count = 0; | |
1484 | ||
1485 | while (ptr) { | |
1486 | count++; | |
1487 | ptr = free_list_unchecksum_ptr(szone, &ptr->next); | |
1488 | } | |
1489 | return count; | |
1490 | } | |
1491 | ||
1492 | static INLINE void | |
1493 | recirc_list_extract(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) | |
1494 | { | |
1495 | // excise node from list | |
1496 | if (NULL == node->prev) | |
1497 | mag_ptr->firstNode = node->next; | |
1498 | else | |
1499 | node->prev->next = node->next; | |
1500 | ||
1501 | if (NULL == node->next) | |
1502 | mag_ptr->lastNode = node->prev; | |
1503 | else | |
1504 | node->next->prev = node->prev; | |
1505 | ||
1506 | mag_ptr->recirculation_entries--; | |
1507 | } | |
1508 | ||
1509 | static INLINE void | |
1510 | recirc_list_splice_last(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) | |
1511 | { | |
1512 | if (NULL == mag_ptr->lastNode) { | |
1513 | mag_ptr->firstNode = node; | |
1514 | node->prev = NULL; | |
1515 | } else { | |
1516 | node->prev = mag_ptr->lastNode; | |
1517 | mag_ptr->lastNode->next = node; | |
1518 | } | |
1519 | mag_ptr->lastNode = node; | |
1520 | node->next = NULL; | |
1521 | node->recirc_suitable = FALSE; | |
1522 | mag_ptr->recirculation_entries++; | |
1523 | } | |
1524 | ||
1525 | static INLINE void | |
1526 | recirc_list_splice_first(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) | |
1527 | { | |
1528 | if (NULL == mag_ptr->firstNode) { | |
1529 | mag_ptr->lastNode = node; | |
1530 | node->next = NULL; | |
1531 | } else { | |
1532 | node->next = mag_ptr->firstNode; | |
1533 | mag_ptr->firstNode->prev = node; | |
1534 | } | |
1535 | mag_ptr->firstNode = node; | |
1536 | node->prev = NULL; | |
1537 | node->recirc_suitable = FALSE; | |
1538 | mag_ptr->recirculation_entries++; | |
1539 | } | |
1540 | ||
1541 | /* Macros used to manipulate the uint32_t quantity mag_bitmap. */ | |
1542 | ||
1543 | /* BITMAPV variants are used by tiny. */ | |
1544 | #if defined(__LP64__) | |
1545 | // assert(NUM_SLOTS == 64) in which case (slot >> 5) is either 0 or 1 | |
1546 | #define BITMAPV_SET(bitmap,slot) (bitmap[(slot) >> 5] |= 1 << ((slot) & 31)) | |
1547 | #define BITMAPV_CLR(bitmap,slot) (bitmap[(slot) >> 5] &= ~ (1 << ((slot) & 31))) | |
1548 | #define BITMAPV_BIT(bitmap,slot) ((bitmap[(slot) >> 5] >> ((slot) & 31)) & 1) | |
1549 | #define BITMAPV_CTZ(bitmap) (__builtin_ctzl(bitmap)) | |
1550 | #else | |
1551 | // assert(NUM_SLOTS == 32) in which case (slot >> 5) is always 0, so code it that way | |
1552 | #define BITMAPV_SET(bitmap,slot) (bitmap[0] |= 1 << (slot)) | |
1553 | #define BITMAPV_CLR(bitmap,slot) (bitmap[0] &= ~ (1 << (slot))) | |
1554 | #define BITMAPV_BIT(bitmap,slot) ((bitmap[0] >> (slot)) & 1) | |
1555 | #define BITMAPV_CTZ(bitmap) (__builtin_ctz(bitmap)) | |
1556 | #endif | |
1557 | ||
1558 | /* BITMAPN is used by small. (slot >> 5) takes on values from 0 to 7. */ | |
1559 | #define BITMAPN_SET(bitmap,slot) (bitmap[(slot) >> 5] |= 1 << ((slot) & 31)) | |
1560 | #define BITMAPN_CLR(bitmap,slot) (bitmap[(slot) >> 5] &= ~ (1 << ((slot) & 31))) | |
1561 | #define BITMAPN_BIT(bitmap,slot) ((bitmap[(slot) >> 5] >> ((slot) & 31)) & 1) | |
1562 | ||
1563 | /* returns bit # of least-significant one bit, starting at 0 (undefined if !bitmap) */ | |
1564 | #define BITMAP32_CTZ(bitmap) (__builtin_ctz(bitmap[0])) | |
1565 | ||
1566 | /********************* TINY FREE LIST UTILITIES ************************/ | |
1567 | ||
1568 | // We encode the meta-headers as follows: | |
1569 | // Each quantum has an associated set of 2 bits: | |
1570 | // block_header when 1 says this block is the beginning of a block | |
1571 | // in_use when 1 says this block is in use | |
1572 | // so a block in use of size 3 is 1-1 0-X 0-X | |
1573 | // for a free block TINY_FREE_SIZE(ptr) carries the size and the bits are 1-0 X-X X-X | |
1574 | // for a block middle the bits are 0-0 | |
1575 | ||
1576 | // We store the meta-header bit arrays by interleaving them 32 bits at a time. | |
1577 | // Initial 32 bits of block_header, followed by initial 32 bits of in_use, followed | |
1578 | // by next 32 bits of block_header, followed by next 32 bits of in_use, etc. | |
1579 | // This localizes memory references thereby reducing cache and TLB pressures. | |
1580 | ||
1581 | static INLINE void | |
1582 | BITARRAY_SET(uint32_t *bits, msize_t index) | |
1583 | { | |
1584 | // index >> 5 identifies the uint32_t to manipulate in the conceptually contiguous bits array | |
1585 | // (index >> 5) << 1 identifies the uint32_t allowing for the actual interleaving | |
1586 | bits[(index >> 5) << 1] |= (1 << (index & 31)); | |
1587 | } | |
1588 | ||
1589 | static INLINE void | |
1590 | BITARRAY_CLR(uint32_t *bits, msize_t index) | |
1591 | { | |
1592 | bits[(index >> 5) << 1] &= ~(1 << (index & 31)); | |
1593 | } | |
1594 | ||
1595 | static INLINE boolean_t | |
1596 | BITARRAY_BIT(uint32_t *bits, msize_t index) | |
1597 | { | |
1598 | return ((bits[(index >> 5) << 1]) >> (index & 31)) & 1; | |
1599 | } | |
1600 | ||
1601 | #if 0 | |
1602 | static INLINE void bitarray_mclr(uint32_t *bits, unsigned start, unsigned end) ALWAYSINLINE; | |
1603 | ||
1604 | static INLINE void | |
1605 | bitarray_mclr(uint32_t *bits, unsigned start, unsigned end) | |
1606 | { | |
1607 | // start >> 5 identifies the uint32_t to manipulate in the conceptually contiguous bits array | |
1608 | // (start >> 5) << 1 identifies the uint32_t allowing for the actual interleaving | |
1609 | uint32_t *addr = bits + ((start >> 5) << 1); | |
1610 | ||
1611 | uint32_t span = end - start; | |
1612 | start = start & 31; | |
1613 | end = start + span; | |
1614 | ||
1615 | if (end > 31) { | |
1616 | addr[0] &= (0xFFFFFFFFU >> (31 - start)) >> 1; | |
1617 | addr[2] &= (0xFFFFFFFFU << (end - 32)); | |
1618 | } else { | |
1619 | unsigned mask = (0xFFFFFFFFU >> (31 - start)) >> 1; | |
1620 | mask |= (0xFFFFFFFFU << end); | |
1621 | addr[0] &= mask; | |
1622 | } | |
1623 | } | |
1624 | #endif | |
1625 | ||
1626 | /* | |
1627 | * Obtain the size of a free tiny block (in msize_t units). | |
1628 | */ | |
1629 | static msize_t | |
1630 | get_tiny_free_size(const void *ptr) | |
1631 | { | |
1632 | void *next_block = (void *)((uintptr_t)ptr + TINY_QUANTUM); | |
1633 | void *region_end = TINY_REGION_END(TINY_REGION_FOR_PTR(ptr)); | |
1634 | ||
1635 | // check whether the next block is outside the tiny region or a block header | |
1636 | // if so, then the size of this block is one, and there is no stored size. | |
1637 | if (next_block < region_end) | |
1638 | { | |
1639 | uint32_t *next_header = TINY_BLOCK_HEADER_FOR_PTR(next_block); | |
1640 | msize_t next_index = TINY_INDEX_FOR_PTR(next_block); | |
1641 | ||
1642 | if (!BITARRAY_BIT(next_header, next_index)) | |
1643 | return TINY_FREE_SIZE(ptr); | |
1644 | } | |
1645 | return 1; | |
1646 | } | |
1647 | ||
1648 | /* | |
1649 | * Get the size of the previous free block, which is stored in the last two | |
1650 | * bytes of the block. If the previous block is not free, then the result is | |
1651 | * undefined. | |
1652 | */ | |
1653 | static msize_t | |
1654 | get_tiny_previous_free_msize(const void *ptr) | |
1655 | { | |
1656 | // check whether the previous block is in the tiny region and a block header | |
1657 | // if so, then the size of the previous block is one, and there is no stored | |
1658 | // size. | |
1659 | if (ptr != TINY_REGION_FOR_PTR(ptr)) | |
1660 | { | |
1661 | void *prev_block = (void *)((uintptr_t)ptr - TINY_QUANTUM); | |
1662 | uint32_t *prev_header = TINY_BLOCK_HEADER_FOR_PTR(prev_block); | |
1663 | msize_t prev_index = TINY_INDEX_FOR_PTR(prev_block); | |
1664 | if (BITARRAY_BIT(prev_header, prev_index)) | |
1665 | return 1; | |
1666 | return TINY_PREVIOUS_MSIZE(ptr); | |
1667 | } | |
1668 | // don't read possibly unmapped memory before the beginning of the region | |
1669 | return 0; | |
1670 | } | |
1671 | ||
1672 | static INLINE msize_t | |
1673 | get_tiny_meta_header(const void *ptr, boolean_t *is_free) | |
1674 | { | |
1675 | // returns msize and is_free | |
1676 | // may return 0 for the msize component (meaning 65536) | |
1677 | uint32_t *block_header; | |
1678 | msize_t index; | |
1679 | ||
1680 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
1681 | index = TINY_INDEX_FOR_PTR(ptr); | |
1682 | ||
1683 | msize_t midx = (index >> 5) << 1; | |
1684 | uint32_t mask = 1 << (index & 31); | |
1685 | *is_free = 0; | |
1686 | if (0 == (block_header[midx] & mask)) // if (!BITARRAY_BIT(block_header, index)) | |
1687 | return 0; | |
1688 | if (0 == (block_header[midx + 1] & mask)) { // if (!BITARRAY_BIT(in_use, index)) | |
1689 | *is_free = 1; | |
1690 | return get_tiny_free_size(ptr); | |
1691 | } | |
1692 | ||
1693 | // index >> 5 identifies the uint32_t to manipulate in the conceptually contiguous bits array | |
1694 | // (index >> 5) << 1 identifies the uint32_t allowing for the actual interleaving | |
1695 | #if defined(__LP64__) | |
1696 | // The return value, msize, is computed as the distance to the next 1 bit in block_header. | |
1697 | // That's guaranteed to be somewhwere in the next 64 bits. And those bits could span three | |
1698 | // uint32_t block_header elements. Collect the bits into a single uint64_t and measure up with ffsl. | |
1699 | uint32_t *addr = ((uint32_t *)block_header) + ((index >> 5) << 1); | |
1700 | uint32_t bitidx = index & 31; | |
1701 | uint64_t word_lo = addr[0]; | |
1702 | uint64_t word_mid = addr[2]; | |
1703 | uint64_t word_hi = addr[4]; | |
1704 | uint64_t word_lomid = (word_lo >> bitidx) | (word_mid << (32 - bitidx)); | |
1705 | uint64_t word = bitidx ? word_lomid | (word_hi << (64 - bitidx)) : word_lomid; | |
1706 | uint32_t result = __builtin_ffsl(word >> 1); | |
1707 | #else | |
1708 | // The return value, msize, is computed as the distance to the next 1 bit in block_header. | |
1709 | // That's guaranteed to be somwhwere in the next 32 bits. And those bits could span two | |
1710 | // uint32_t block_header elements. Collect the bits into a single uint32_t and measure up with ffs. | |
1711 | uint32_t *addr = ((uint32_t *)block_header) + ((index >> 5) << 1); | |
1712 | uint32_t bitidx = index & 31; | |
1713 | uint32_t word = bitidx ? (addr[0] >> bitidx) | (addr[2] << (32 - bitidx)) : addr[0]; | |
1714 | uint32_t result = __builtin_ffs(word >> 1); | |
1715 | #endif | |
1716 | return result; | |
1717 | } | |
1718 | ||
1719 | static INLINE void | |
1720 | set_tiny_meta_header_in_use(const void *ptr, msize_t msize) | |
1721 | { | |
1722 | uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
1723 | msize_t index = TINY_INDEX_FOR_PTR(ptr); | |
1724 | msize_t clr_msize = msize - 1; | |
1725 | msize_t midx = (index >> 5) << 1; | |
1726 | uint32_t val = (1 << (index & 31)); | |
1727 | ||
1728 | #if DEBUG_MALLOC | |
1729 | if (msize >= NUM_TINY_SLOTS) | |
1730 | malloc_printf("set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr, msize); | |
1731 | if ((unsigned)index + (unsigned)msize > 0x10000) | |
1732 | malloc_printf("set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr, msize); | |
1733 | #endif | |
1734 | ||
1735 | block_header[midx] |= val; // BITARRAY_SET(block_header, index); | |
1736 | block_header[midx + 1] |= val; // BITARRAY_SET(in_use, index); | |
1737 | ||
1738 | // bitarray_mclr(block_header, index, end_bit); | |
1739 | // bitarray_mclr(in_use, index, end_bit); | |
1740 | ||
1741 | index++; | |
1742 | midx = (index >> 5) << 1; | |
1743 | ||
1744 | unsigned start = index & 31; | |
1745 | unsigned end = start + clr_msize; | |
1746 | ||
1747 | #if defined(__LP64__) | |
1748 | if (end > 63) { | |
1749 | unsigned mask0 = (0xFFFFFFFFU >> (31 - start)) >> 1; | |
1750 | unsigned mask1 = (0xFFFFFFFFU << (end - 64)); | |
1751 | block_header[midx + 0] &= mask0; // clear header | |
1752 | block_header[midx + 1] &= mask0; // clear in_use | |
1753 | block_header[midx + 2] = 0; // clear header | |
1754 | block_header[midx + 3] = 0; // clear in_use | |
1755 | block_header[midx + 4] &= mask1; // clear header | |
1756 | block_header[midx + 5] &= mask1; // clear in_use | |
1757 | } else | |
1758 | #endif | |
1759 | if (end > 31) { | |
1760 | unsigned mask0 = (0xFFFFFFFFU >> (31 - start)) >> 1; | |
1761 | unsigned mask1 = (0xFFFFFFFFU << (end - 32)); | |
1762 | block_header[midx + 0] &= mask0; | |
1763 | block_header[midx + 1] &= mask0; | |
1764 | block_header[midx + 2] &= mask1; | |
1765 | block_header[midx + 3] &= mask1; | |
1766 | } else { | |
1767 | unsigned mask = (0xFFFFFFFFU >> (31 - start)) >> 1; | |
1768 | mask |= (0xFFFFFFFFU << end); | |
1769 | block_header[midx + 0] &= mask; | |
1770 | block_header[midx + 1] &= mask; | |
1771 | } | |
1772 | ||
1773 | // we set the block_header bit for the following block to reaffirm next block is a block | |
1774 | index += clr_msize; | |
1775 | midx = (index >> 5) << 1; | |
1776 | val = (1 << (index & 31)); | |
1777 | block_header[midx] |= val; // BITARRAY_SET(block_header, (index+clr_msize)); | |
1778 | #if DEBUG_MALLOC | |
1779 | { | |
1780 | boolean_t ff; | |
1781 | msize_t mf; | |
1782 | ||
1783 | mf = get_tiny_meta_header(ptr, &ff); | |
1784 | if (msize != mf) { | |
1785 | malloc_printf("setting header for tiny in_use %p : %d\n", ptr, msize); | |
1786 | malloc_printf("reading header for tiny %p : %d %d\n", ptr, mf, ff); | |
1787 | } | |
1788 | } | |
1789 | #endif | |
1790 | } | |
1791 | ||
1792 | static INLINE void | |
1793 | set_tiny_meta_header_in_use_1(const void *ptr) // As above with msize == 1 | |
1794 | { | |
1795 | uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
1796 | msize_t index = TINY_INDEX_FOR_PTR(ptr); | |
1797 | msize_t midx = (index >> 5) << 1; | |
1798 | uint32_t val = (1 << (index & 31)); | |
1799 | ||
1800 | block_header[midx] |= val; // BITARRAY_SET(block_header, index); | |
1801 | block_header[midx + 1] |= val; // BITARRAY_SET(in_use, index); | |
1802 | ||
1803 | index++; | |
1804 | midx = (index >> 5) << 1; | |
1805 | val = (1 << (index & 31)); | |
1806 | ||
1807 | block_header[midx] |= val; // BITARRAY_SET(block_header, (index+clr_msize)) | |
1808 | } | |
1809 | ||
1810 | static INLINE void | |
1811 | set_tiny_meta_header_middle(const void *ptr) | |
1812 | { | |
1813 | // indicates this block is in the middle of an in use block | |
1814 | uint32_t *block_header; | |
1815 | uint32_t *in_use; | |
1816 | msize_t index; | |
1817 | ||
1818 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
1819 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
1820 | index = TINY_INDEX_FOR_PTR(ptr); | |
1821 | ||
1822 | BITARRAY_CLR(block_header, index); | |
1823 | BITARRAY_CLR(in_use, index); | |
1824 | } | |
1825 | ||
1826 | static INLINE void | |
1827 | set_tiny_meta_header_free(const void *ptr, msize_t msize) | |
1828 | { | |
1829 | // !msize is acceptable and means 65536 | |
1830 | uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
1831 | msize_t index = TINY_INDEX_FOR_PTR(ptr); | |
1832 | msize_t midx = (index >> 5) << 1; | |
1833 | uint32_t val = (1 << (index & 31)); | |
1834 | ||
1835 | #if DEBUG_MALLOC | |
1836 | if ((unsigned)index + (unsigned)msize > 0x10000) { | |
1837 | malloc_printf("setting header for tiny free %p msize too large: %d\n", ptr, msize); | |
1838 | } | |
1839 | #endif | |
1840 | ||
1841 | block_header[midx] |= val; // BITARRAY_SET(block_header, index); | |
1842 | block_header[midx + 1] &= ~val; // BITARRAY_CLR(in_use, index); | |
1843 | ||
1844 | // mark the end of this block if msize is > 1. For msize == 0, the whole | |
1845 | // region is free, so there is no following block. For msize == 1, there is | |
1846 | // no space to write the size on 64 bit systems. The size for 1 quantum | |
1847 | // blocks is computed from the metadata bitmaps. | |
1848 | if (msize > 1) { | |
1849 | void *follower = FOLLOWING_TINY_PTR(ptr, msize); | |
1850 | TINY_PREVIOUS_MSIZE(follower) = msize; | |
1851 | TINY_FREE_SIZE(ptr) = msize; | |
1852 | } | |
1853 | if (msize == 0) { | |
1854 | TINY_FREE_SIZE(ptr) = msize; | |
1855 | } | |
1856 | #if DEBUG_MALLOC | |
1857 | boolean_t ff; | |
1858 | msize_t mf = get_tiny_meta_header(ptr, &ff); | |
1859 | if ((msize != mf) || !ff) { | |
1860 | malloc_printf("setting header for tiny free %p : %u\n", ptr, msize); | |
1861 | malloc_printf("reading header for tiny %p : %u %u\n", ptr, mf, ff); | |
1862 | } | |
1863 | #endif | |
1864 | } | |
1865 | ||
1866 | static INLINE boolean_t | |
1867 | tiny_meta_header_is_free(const void *ptr) | |
1868 | { | |
1869 | uint32_t *block_header; | |
1870 | uint32_t *in_use; | |
1871 | msize_t index; | |
1872 | ||
1873 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
1874 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
1875 | index = TINY_INDEX_FOR_PTR(ptr); | |
1876 | if (!BITARRAY_BIT(block_header, index)) | |
1877 | return 0; | |
1878 | return !BITARRAY_BIT(in_use, index); | |
1879 | } | |
1880 | ||
1881 | static INLINE void * | |
1882 | tiny_previous_preceding_free(void *ptr, msize_t *prev_msize) | |
1883 | { | |
1884 | // returns the previous block, assuming and verifying it's free | |
1885 | uint32_t *block_header; | |
1886 | uint32_t *in_use; | |
1887 | msize_t index; | |
1888 | msize_t previous_msize; | |
1889 | msize_t previous_index; | |
1890 | void *previous_ptr; | |
1891 | ||
1892 | block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr); | |
1893 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
1894 | index = TINY_INDEX_FOR_PTR(ptr); | |
1895 | ||
1896 | if (!index) | |
1897 | return NULL; | |
1898 | if ((previous_msize = get_tiny_previous_free_msize(ptr)) > index) | |
1899 | return NULL; | |
1900 | ||
1901 | previous_index = index - previous_msize; | |
1902 | previous_ptr = (void *)((uintptr_t)TINY_REGION_FOR_PTR(ptr) + TINY_BYTES_FOR_MSIZE(previous_index)); | |
1903 | if (!BITARRAY_BIT(block_header, previous_index)) | |
1904 | return NULL; | |
1905 | if (BITARRAY_BIT(in_use, previous_index)) | |
1906 | return NULL; | |
1907 | if (get_tiny_free_size(previous_ptr) != previous_msize) | |
1908 | return NULL; | |
1909 | ||
1910 | // conservative check did match true check | |
1911 | *prev_msize = previous_msize; | |
1912 | return previous_ptr; | |
1913 | } | |
1914 | ||
1915 | /* | |
1916 | * Adds an item to the proper free list, and also marks the meta-header of the | |
1917 | * block properly. | |
1918 | * Assumes szone has been locked | |
1919 | */ | |
1920 | static void | |
1921 | tiny_free_list_add_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize) | |
1922 | { | |
1923 | grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1; | |
1924 | free_list_t *free_ptr = ptr; | |
1925 | free_list_t *free_head = tiny_mag_ptr->mag_free_list[slot]; | |
1926 | ||
1927 | #if DEBUG_MALLOC | |
1928 | if (LOG(szone,ptr)) { | |
1929 | malloc_printf("in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); | |
1930 | } | |
1931 | if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) { | |
1932 | szone_error(szone, 1, "tiny_free_list_add_ptr: Unaligned ptr", ptr, NULL); | |
1933 | } | |
1934 | #endif | |
1935 | set_tiny_meta_header_free(ptr, msize); | |
1936 | if (free_head) { | |
1937 | #if DEBUG_MALLOC | |
1938 | if (free_list_unchecksum_ptr(szone, &free_head->previous)) { | |
1939 | szone_error(szone, 1, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr, | |
1940 | "ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, (void *)free_head, free_head->previous.p); | |
1941 | } | |
1942 | if (! tiny_meta_header_is_free(free_head)) { | |
1943 | szone_error(szone, 1, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr, | |
1944 | "ptr=%p slot=%d free_head=%p\n", ptr, slot, (void *)free_head); | |
1945 | } | |
1946 | #endif | |
1947 | free_head->previous.u = free_list_checksum_ptr(szone, free_ptr); | |
1948 | } else { | |
1949 | BITMAPV_SET(tiny_mag_ptr->mag_bitmap, slot); | |
1950 | } | |
1951 | free_ptr->previous.u = free_list_checksum_ptr(szone, NULL); | |
1952 | free_ptr->next.u = free_list_checksum_ptr(szone, free_head); | |
1953 | ||
1954 | tiny_mag_ptr->mag_free_list[slot] = free_ptr; | |
1955 | } | |
1956 | ||
1957 | /* | |
1958 | * Removes the item pointed to by ptr in the proper free list. | |
1959 | * Assumes szone has been locked | |
1960 | */ | |
1961 | static void | |
1962 | tiny_free_list_remove_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize) | |
1963 | { | |
1964 | grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1; | |
1965 | free_list_t *free_ptr = ptr, *next, *previous; | |
1966 | ||
1967 | next = free_list_unchecksum_ptr(szone, &free_ptr->next); | |
1968 | previous = free_list_unchecksum_ptr(szone, &free_ptr->previous); | |
1969 | ||
1970 | #if DEBUG_MALLOC | |
1971 | if (LOG(szone,ptr)) { | |
1972 | malloc_printf("In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); | |
1973 | } | |
1974 | #endif | |
1975 | if (!previous) { | |
1976 | // The block to remove is the head of the free list | |
1977 | #if DEBUG_MALLOC | |
1978 | if (tiny_mag_ptr->mag_free_list[slot] != ptr) { | |
1979 | szone_error(szone, 1, "tiny_free_list_remove_ptr: Internal invariant broken (tiny_mag_ptr->mag_free_list[slot])", ptr, | |
1980 | "ptr=%p slot=%d msize=%d tiny_mag_ptr->mag_free_list[slot]=%p\n", | |
1981 | ptr, slot, msize, (void *)tiny_mag_ptr->mag_free_list[slot]); | |
1982 | return; | |
1983 | } | |
1984 | #endif | |
1985 | tiny_mag_ptr->mag_free_list[slot] = next; | |
1986 | if (!next) BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot); | |
1987 | } else { | |
1988 | // We know free_ptr is already checksummed, so we don't need to do it | |
1989 | // again. | |
1990 | previous->next = free_ptr->next; | |
1991 | } | |
1992 | if (next) { | |
1993 | // We know free_ptr is already checksummed, so we don't need to do it | |
1994 | // again. | |
1995 | next->previous = free_ptr->previous; | |
1996 | } | |
1997 | } | |
1998 | ||
1999 | /* | |
2000 | * tiny_region_for_ptr_no_lock - Returns the tiny region containing the pointer, | |
2001 | * or NULL if not found. | |
2002 | */ | |
2003 | static INLINE region_t | |
2004 | tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr) | |
2005 | { | |
2006 | rgnhdl_t r = hash_lookup_region_no_lock(szone->tiny_region_generation->hashed_regions, | |
2007 | szone->tiny_region_generation->num_regions_allocated, | |
2008 | szone->tiny_region_generation->num_regions_allocated_shift, | |
2009 | TINY_REGION_FOR_PTR(ptr)); | |
2010 | return r ? *r : r; | |
2011 | } | |
2012 | ||
2013 | static void | |
2014 | tiny_finalize_region(szone_t *szone, magazine_t *tiny_mag_ptr) { | |
2015 | void *last_block, *previous_block; | |
2016 | uint32_t *last_header; | |
2017 | msize_t last_msize, previous_msize, last_index; | |
2018 | ||
1f2f436a A |
2019 | // It is possible that the block prior to the last block in the region has |
2020 | // been free'd, but was not coalesced with the free bytes at the end of the | |
2021 | // block, since we treat the bytes at the end of the region as "in use" in | |
2022 | // the meta headers. Attempt to coalesce the last block with the previous | |
2023 | // block, so we don't violate the "no consecutive free blocks" invariant. | |
2024 | // | |
2025 | // FIXME: Need to investigate how much work would be required to increase | |
2026 | // 'mag_bytes_free_at_end' when freeing the preceding block, rather | |
2027 | // than performing this workaround. | |
2028 | // | |
2029 | ||
2030 | if (tiny_mag_ptr->mag_bytes_free_at_end) { | |
34e8f829 A |
2031 | last_block = (void *) |
2032 | ((uintptr_t)TINY_REGION_END(tiny_mag_ptr->mag_last_region) - tiny_mag_ptr->mag_bytes_free_at_end); | |
2033 | last_msize = TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end); | |
2034 | last_header = TINY_BLOCK_HEADER_FOR_PTR(last_block); | |
2035 | last_index = TINY_INDEX_FOR_PTR(last_block); | |
2036 | ||
2037 | // Before anything we transform any remaining mag_bytes_free_at_end into a | |
2038 | // regular free block. We take special care here to update the bitfield | |
2039 | // information, since we are bypassing the normal free codepath. If there | |
2040 | // is more than one quanta worth of memory in mag_bytes_free_at_end, then | |
2041 | // there will be two block headers: | |
2042 | // 1) header for the free space at end, msize = 1 | |
2043 | // 2) header inserted by set_tiny_meta_header_in_use after block | |
2044 | // We must clear the second one so that when the free block's size is | |
2045 | // queried, we do not think the block is only 1 quantum in size because | |
2046 | // of the second set header bit. | |
2047 | if (last_index != (NUM_TINY_BLOCKS - 1)) | |
2048 | BITARRAY_CLR(last_header, (last_index + 1)); | |
2049 | ||
34e8f829 A |
2050 | previous_block = tiny_previous_preceding_free(last_block, &previous_msize); |
2051 | if (previous_block) { | |
2052 | set_tiny_meta_header_middle(last_block); | |
2053 | tiny_free_list_remove_ptr(szone, tiny_mag_ptr, previous_block, previous_msize); | |
2054 | last_block = previous_block; | |
2055 | last_msize += previous_msize; | |
2056 | } | |
2057 | ||
2058 | // splice last_block into the free list | |
2059 | tiny_free_list_add_ptr(szone, tiny_mag_ptr, last_block, last_msize); | |
2060 | tiny_mag_ptr->mag_bytes_free_at_end = 0; | |
1f2f436a A |
2061 | } |
2062 | ||
2063 | #if ASLR_INTERNAL | |
2064 | // Coalesce the big free block at start with any following free blocks | |
2065 | if (tiny_mag_ptr->mag_bytes_free_at_start) { | |
2066 | last_block = TINY_REGION_ADDRESS(tiny_mag_ptr->mag_last_region); | |
2067 | last_msize = TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_start); | |
2068 | ||
2069 | void *next_block = (void *) ((uintptr_t)last_block + tiny_mag_ptr->mag_bytes_free_at_start); | |
2070 | ||
2071 | // clear the in use bit we were using to mark the end of the big start block | |
2072 | set_tiny_meta_header_middle((uintptr_t)next_block - TINY_QUANTUM); | |
2073 | ||
2074 | // Coalesce the big start block with any following free blocks | |
2075 | if (tiny_meta_header_is_free(next_block)) { | |
2076 | msize_t next_msize = get_tiny_free_size(next_block); | |
2077 | set_tiny_meta_header_middle(next_block); | |
2078 | tiny_free_list_remove_ptr(szone, tiny_mag_ptr, next_block, next_msize); | |
2079 | last_msize += next_msize; | |
2080 | } | |
2081 | ||
2082 | // splice last_block into the free list | |
2083 | tiny_free_list_add_ptr(szone, tiny_mag_ptr, last_block, last_msize); | |
2084 | tiny_mag_ptr->mag_bytes_free_at_start = 0; | |
2085 | } | |
2086 | #endif | |
2087 | ||
34e8f829 A |
2088 | tiny_mag_ptr->mag_last_region = NULL; |
2089 | } | |
2090 | ||
2091 | static int | |
2092 | tiny_free_detach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r) { | |
2093 | uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r); | |
2094 | uintptr_t current = start; | |
2095 | uintptr_t limit = (uintptr_t)TINY_REGION_END(r); | |
2096 | boolean_t is_free; | |
2097 | msize_t msize; | |
2098 | int total_alloc = 0; | |
2099 | ||
2100 | while (current < limit) { | |
2101 | msize = get_tiny_meta_header((void *)current, &is_free); | |
2102 | if (is_free && !msize && (current == start)) { | |
2103 | // first block is all free | |
2104 | break; | |
2105 | } | |
2106 | if (!msize) { | |
2107 | #if DEBUG_MALLOC | |
2108 | malloc_printf("*** tiny_free_detach_region error with %p: msize=%d is_free =%d\n", | |
2109 | (void *)current, msize, is_free); | |
2110 | #endif | |
2111 | break; | |
2112 | } | |
2113 | if (is_free) { | |
2114 | tiny_free_list_remove_ptr(szone, tiny_mag_ptr, (void *)current, msize); | |
2115 | } else { | |
2116 | total_alloc++; | |
2117 | } | |
2118 | current += TINY_BYTES_FOR_MSIZE(msize); | |
2119 | } | |
2120 | return total_alloc; | |
2121 | } | |
2122 | ||
2123 | static size_t | |
2124 | tiny_free_reattach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r) { | |
2125 | uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r); | |
2126 | uintptr_t current = start; | |
2127 | uintptr_t limit = (uintptr_t)TINY_REGION_END(r); | |
2128 | boolean_t is_free; | |
2129 | msize_t msize; | |
2130 | size_t total_alloc = 0; | |
2131 | ||
2132 | while (current < limit) { | |
2133 | msize = get_tiny_meta_header((void *)current, &is_free); | |
2134 | if (is_free && !msize && (current == start)) { | |
2135 | // first block is all free | |
2136 | break; | |
2137 | } | |
2138 | if (!msize) { | |
2139 | #if DEBUG_MALLOC | |
2140 | malloc_printf("*** tiny_free_reattach_region error with %p: msize=%d is_free =%d\n", | |
2141 | (void *)current, msize, is_free); | |
2142 | #endif | |
2143 | break; | |
2144 | } | |
2145 | if (is_free) { | |
2146 | tiny_free_list_add_ptr(szone, tiny_mag_ptr, (void *)current, msize); | |
2147 | } else { | |
2148 | total_alloc += TINY_BYTES_FOR_MSIZE(msize); | |
2149 | } | |
2150 | current += TINY_BYTES_FOR_MSIZE(msize); | |
2151 | } | |
2152 | return total_alloc; | |
2153 | } | |
2154 | ||
1f2f436a A |
2155 | typedef struct { |
2156 | uint8_t pnum, size; | |
2157 | } tiny_pg_pair_t; | |
2158 | ||
2159 | static void NOINLINE /* want private stack frame for automatic array */ | |
34e8f829 A |
2160 | tiny_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r) { |
2161 | uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r); | |
2162 | uintptr_t current = start; | |
2163 | uintptr_t limit = (uintptr_t)TINY_REGION_END(r); | |
2164 | boolean_t is_free; | |
2165 | msize_t msize; | |
1f2f436a A |
2166 | tiny_pg_pair_t advisory[((TINY_REGION_PAYLOAD_BYTES + vm_page_size - 1) >> vm_page_shift) >> 1]; // 256bytes stack allocated |
2167 | int advisories = 0; | |
34e8f829 A |
2168 | |
2169 | // Scan the metadata identifying blocks which span one or more pages. Mark the pages MADV_FREE taking care to preserve free list | |
2170 | // management data. | |
2171 | while (current < limit) { | |
2172 | msize = get_tiny_meta_header((void *)current, &is_free); | |
2173 | if (is_free && !msize && (current == start)) { | |
2174 | // first block is all free | |
2175 | #if DEBUG_MALLOC | |
2176 | malloc_printf("*** tiny_free_scan_madvise_free first block is all free! %p: msize=%d is_free =%d\n", | |
2177 | (void *)current, msize, is_free); | |
2178 | #endif | |
2179 | uintptr_t pgLo = round_page(start + sizeof(free_list_t) + sizeof(msize_t)); | |
2180 | uintptr_t pgHi = trunc_page(start + TINY_REGION_SIZE - sizeof(msize_t)); | |
2181 | ||
2182 | if (pgLo < pgHi) { | |
1f2f436a A |
2183 | advisory[advisories].pnum = (pgLo - start) >> vm_page_shift; |
2184 | advisory[advisories].size = (pgHi - pgLo) >> vm_page_shift; | |
2185 | advisories++; | |
34e8f829 A |
2186 | } |
2187 | break; | |
2188 | } | |
2189 | if (!msize) { | |
2190 | #if DEBUG_MALLOC | |
2191 | malloc_printf("*** tiny_free_scan_madvise_free error with %p: msize=%d is_free =%d\n", | |
2192 | (void *)current, msize, is_free); | |
2193 | #endif | |
2194 | break; | |
2195 | } | |
2196 | if (is_free) { | |
2197 | uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t)); | |
2198 | uintptr_t pgHi = trunc_page(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t)); | |
2199 | ||
2200 | if (pgLo < pgHi) { | |
1f2f436a A |
2201 | advisory[advisories].pnum = (pgLo - start) >> vm_page_shift; |
2202 | advisory[advisories].size = (pgHi - pgLo) >> vm_page_shift; | |
2203 | advisories++; | |
34e8f829 A |
2204 | } |
2205 | } | |
2206 | current += TINY_BYTES_FOR_MSIZE(msize); | |
2207 | } | |
2208 | ||
1f2f436a A |
2209 | if (advisories > 0) { |
2210 | int i; | |
2211 | ||
2212 | // So long as the following hold for this region: | |
2213 | // (1) No malloc()'s are ever performed from the depot (hence free pages remain free,) | |
2214 | // (2) The region is not handed over to a per-CPU magazine (where malloc()'s could be performed), | |
2215 | // (3) The entire region is not mumap()'d (so the madvise's are applied to the intended addresses), | |
2216 | // then the madvise opportunities collected just above can be applied outside all locks. | |
2217 | // (1) is ensured by design, (2) and (3) are ensured by bumping the globally visible counter node->pinned_to_depot. | |
2218 | ||
2219 | OSAtomicIncrement32Barrier(&(REGION_TRAILER_FOR_TINY_REGION(r)->pinned_to_depot)); | |
2220 | SZONE_MAGAZINE_PTR_UNLOCK(szone, depot_ptr); | |
2221 | for (i = 0; i < advisories; ++i) { | |
2222 | uintptr_t addr = (advisory[i].pnum << vm_page_shift) + start; | |
2223 | size_t size = advisory[i].size << vm_page_shift; | |
2224 | ||
2225 | #if TARGET_OS_EMBEDDED | |
2226 | madvise_free_range(szone, r, addr, addr + size, NULL); | |
2227 | #else | |
2228 | madvise_free_range(szone, r, addr, addr + size); | |
2229 | #endif | |
2230 | } | |
2231 | SZONE_MAGAZINE_PTR_LOCK(szone, depot_ptr); | |
2232 | OSAtomicDecrement32Barrier(&(REGION_TRAILER_FOR_TINY_REGION(r)->pinned_to_depot)); | |
34e8f829 A |
2233 | } |
2234 | } | |
2235 | ||
1f2f436a | 2236 | static region_t |
34e8f829 A |
2237 | tiny_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node) |
2238 | { | |
34e8f829 | 2239 | if (0 < node->bytes_used || |
1f2f436a | 2240 | 0 < node->pinned_to_depot || |
34e8f829 | 2241 | depot_ptr->recirculation_entries < (szone->num_tiny_magazines * 2)) { |
1f2f436a | 2242 | return NULL; |
34e8f829 A |
2243 | } |
2244 | ||
2245 | // disconnect node from Depot | |
2246 | recirc_list_extract(szone, depot_ptr, node); | |
2247 | ||
2248 | // Iterate the region pulling its free entries off the (locked) Depot's free list | |
2249 | region_t sparse_region = TINY_REGION_FOR_PTR(node); | |
2250 | int objects_in_use = tiny_free_detach_region(szone, depot_ptr, sparse_region); | |
2251 | ||
2252 | if (0 == objects_in_use) { | |
2253 | // Invalidate the hash table entry for this region with HASHRING_REGION_DEALLOCATED. | |
2254 | // Using HASHRING_REGION_DEALLOCATED preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not. | |
2255 | rgnhdl_t pSlot = hash_lookup_region_no_lock(szone->tiny_region_generation->hashed_regions, | |
2256 | szone->tiny_region_generation->num_regions_allocated, | |
2257 | szone->tiny_region_generation->num_regions_allocated_shift, sparse_region); | |
1f2f436a A |
2258 | if (NULL == pSlot) { |
2259 | szone_error(szone, 1, "tiny_free_try_depot_unmap_no_lock hash lookup failed:", NULL, "%p\n", sparse_region); | |
2260 | return NULL; | |
2261 | } | |
34e8f829 A |
2262 | *pSlot = HASHRING_REGION_DEALLOCATED; |
2263 | depot_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES; | |
34e8f829 | 2264 | __sync_fetch_and_add( &(szone->num_tiny_regions_dealloc), 1); // Atomically increment num_tiny_regions_dealloc |
34e8f829 | 2265 | |
1f2f436a A |
2266 | // Caller will transfer ownership of the region back to the OS with no locks held |
2267 | MAGMALLOC_DEALLOCREGION((void *)szone, (void *)sparse_region, TINY_REGION_SIZE); // DTrace USDT Probe | |
2268 | return sparse_region; | |
34e8f829 A |
2269 | } else { |
2270 | szone_error(szone, 1, "tiny_free_try_depot_unmap_no_lock objects_in_use not zero:", NULL, "%d\n", objects_in_use); | |
1f2f436a | 2271 | return NULL; |
34e8f829 A |
2272 | } |
2273 | } | |
2274 | ||
1f2f436a | 2275 | static boolean_t |
34e8f829 A |
2276 | tiny_free_do_recirc_to_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index) |
2277 | { | |
2278 | // The entire magazine crossed the "emptiness threshold". Transfer a region | |
2279 | // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e | |
2280 | // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list. | |
2281 | region_trailer_t *node = tiny_mag_ptr->firstNode; | |
2282 | ||
2283 | while (node && !node->recirc_suitable) { | |
2284 | node = node->next; | |
2285 | } | |
2286 | ||
2287 | if (NULL == node) { | |
2288 | #if DEBUG_MALLOC | |
2289 | malloc_printf("*** tiny_free_do_recirc_to_depot end of list\n"); | |
2290 | #endif | |
1f2f436a | 2291 | return TRUE; // Caller must SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); |
34e8f829 A |
2292 | } |
2293 | ||
2294 | region_t sparse_region = TINY_REGION_FOR_PTR(node); | |
2295 | ||
1f2f436a A |
2296 | // Deal with unclaimed memory -- mag_bytes_free_at_end or mag_bytes_free_at_start |
2297 | if (sparse_region == tiny_mag_ptr->mag_last_region && (tiny_mag_ptr->mag_bytes_free_at_end || tiny_mag_ptr->mag_bytes_free_at_start)) { | |
34e8f829 A |
2298 | tiny_finalize_region(szone, tiny_mag_ptr); |
2299 | } | |
2300 | ||
2301 | // disconnect "suitable" node from magazine | |
2302 | recirc_list_extract(szone, tiny_mag_ptr, node); | |
2303 | ||
2304 | // Iterate the region pulling its free entries off its (locked) magazine's free list | |
2305 | int objects_in_use = tiny_free_detach_region(szone, tiny_mag_ptr, sparse_region); | |
2306 | magazine_t *depot_ptr = &(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX]); | |
2307 | ||
2308 | // hand over the region to the (locked) Depot | |
2309 | SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr); | |
2310 | // this will cause tiny_free_list_add_ptr called by tiny_free_reattach_region to use | |
2311 | // the depot as its target magazine, rather than magazine formerly associated with sparse_region | |
2312 | MAGAZINE_INDEX_FOR_TINY_REGION(sparse_region) = DEPOT_MAGAZINE_INDEX; | |
1f2f436a | 2313 | node->pinned_to_depot = 0; |
34e8f829 A |
2314 | |
2315 | // Iterate the region putting its free entries on Depot's free list | |
2316 | size_t bytes_inplay = tiny_free_reattach_region(szone, depot_ptr, sparse_region); | |
2317 | ||
2318 | tiny_mag_ptr->mag_num_bytes_in_objects -= bytes_inplay; | |
2319 | tiny_mag_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES; | |
2320 | tiny_mag_ptr->mag_num_objects -= objects_in_use; | |
2321 | ||
1f2f436a A |
2322 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); // Unlock the originating magazine |
2323 | ||
34e8f829 A |
2324 | depot_ptr->mag_num_bytes_in_objects += bytes_inplay; |
2325 | depot_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES; | |
2326 | depot_ptr->mag_num_objects += objects_in_use; | |
2327 | ||
1f2f436a A |
2328 | // connect to Depot as last node |
2329 | recirc_list_splice_last(szone, depot_ptr, node); | |
34e8f829 | 2330 | |
1f2f436a A |
2331 | MAGMALLOC_RECIRCREGION((void *)szone, (int)mag_index, (void *)sparse_region, TINY_REGION_SIZE, |
2332 | (int)BYTES_USED_FOR_TINY_REGION(sparse_region)); // DTrace USDT Probe | |
34e8f829 A |
2333 | |
2334 | // Mark free'd dirty pages with MADV_FREE to reduce memory pressure | |
2335 | tiny_free_scan_madvise_free(szone, depot_ptr, sparse_region); | |
2336 | ||
1f2f436a A |
2337 | // If the region is entirely empty vm_deallocate() it outside the depot lock |
2338 | region_t r_dealloc = tiny_free_try_depot_unmap_no_lock(szone, depot_ptr, node); | |
34e8f829 | 2339 | SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr); |
1f2f436a A |
2340 | if (r_dealloc) |
2341 | deallocate_pages(szone, r_dealloc, TINY_REGION_SIZE, 0); | |
2342 | return FALSE; // Caller need not unlock the originating magazine | |
34e8f829 A |
2343 | } |
2344 | ||
7ba935f9 A |
2345 | static region_t |
2346 | tiny_find_msize_region(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize) | |
2347 | { | |
2348 | free_list_t *ptr; | |
2349 | grain_t slot = msize - 1; | |
2350 | free_list_t **free_list = tiny_mag_ptr->mag_free_list; | |
2351 | free_list_t **the_slot = free_list + slot; | |
2352 | free_list_t **limit; | |
2353 | #if defined(__LP64__) | |
2354 | uint64_t bitmap; | |
2355 | #else | |
2356 | uint32_t bitmap; | |
2357 | #endif | |
2358 | // Assumes we've locked the magazine | |
2359 | CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__); | |
2360 | ||
2361 | // Look for an exact match by checking the freelist for this msize. | |
2362 | ptr = *the_slot; | |
2363 | if (ptr) | |
2364 | return TINY_REGION_FOR_PTR(ptr); | |
2365 | ||
2366 | // Mask off the bits representing slots holding free blocks smaller than the | |
2367 | // size we need. If there are no larger free blocks, try allocating from | |
2368 | // the free space at the end of the tiny region. | |
2369 | #if defined(__LP64__) | |
2370 | bitmap = ((uint64_t *)(tiny_mag_ptr->mag_bitmap))[0] & ~ ((1ULL << slot) - 1); | |
2371 | #else | |
2372 | bitmap = tiny_mag_ptr->mag_bitmap[0] & ~ ((1 << slot) - 1); | |
2373 | #endif | |
2374 | if (!bitmap) | |
2375 | return NULL; | |
2376 | ||
2377 | slot = BITMAPV_CTZ(bitmap); | |
2378 | limit = free_list + NUM_TINY_SLOTS - 1; | |
2379 | free_list += slot; | |
2380 | ||
2381 | if (free_list < limit) { | |
2382 | ptr = *free_list; | |
2383 | if (ptr) | |
2384 | return TINY_REGION_FOR_PTR(ptr); | |
2385 | else { | |
2386 | /* Shouldn't happen. Fall through to look at last slot. */ | |
2387 | #if DEBUG_MALLOC | |
2388 | malloc_printf("in tiny_find_msize_region(), mag_bitmap out of sync, slot=%d\n",slot); | |
2389 | #endif | |
2390 | } | |
2391 | } | |
2392 | ||
2393 | // We are now looking at the last slot, which contains blocks equal to, or | |
2394 | // due to coalescing of free blocks, larger than (NUM_TINY_SLOTS - 1) * tiny quantum size. | |
2395 | ptr = *limit; | |
2396 | if (ptr) | |
2397 | return TINY_REGION_FOR_PTR(ptr); | |
2398 | ||
2399 | return NULL; | |
2400 | } | |
2401 | ||
34e8f829 | 2402 | static boolean_t |
7ba935f9 | 2403 | tiny_get_region_from_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize) |
34e8f829 A |
2404 | { |
2405 | magazine_t *depot_ptr = &(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX]); | |
2406 | ||
2407 | /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ | |
2408 | if (szone->num_tiny_magazines == 1) // Uniprocessor, single magazine, so no recirculation necessary | |
2409 | return 0; | |
2410 | ||
2411 | #if DEBUG_MALLOC | |
2412 | if (DEPOT_MAGAZINE_INDEX == mag_index) { | |
2413 | szone_error(szone, 1, "tiny_get_region_from_depot called for magazine index -1", NULL, NULL); | |
2414 | return 0; | |
2415 | } | |
2416 | #endif | |
2417 | ||
2418 | SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr); | |
2419 | ||
1f2f436a A |
2420 | // Appropriate a Depot'd region that can satisfy requested msize. |
2421 | region_trailer_t *node; | |
2422 | region_t sparse_region; | |
2423 | ||
2424 | while (1) { | |
2425 | sparse_region = tiny_find_msize_region(szone, depot_ptr, DEPOT_MAGAZINE_INDEX, msize); | |
2426 | if (NULL == sparse_region) { // Depot empty? | |
2427 | SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr); | |
34e8f829 | 2428 | return 0; |
1f2f436a | 2429 | } |
34e8f829 | 2430 | |
1f2f436a A |
2431 | node = REGION_TRAILER_FOR_TINY_REGION(sparse_region); |
2432 | if (0 >= node->pinned_to_depot) | |
2433 | break; | |
2434 | ||
2435 | SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr); | |
2436 | pthread_yield_np(); | |
2437 | SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr); | |
2438 | } | |
7ba935f9 | 2439 | |
1f2f436a | 2440 | // disconnect node from Depot |
34e8f829 A |
2441 | recirc_list_extract(szone, depot_ptr, node); |
2442 | ||
2443 | // Iterate the region pulling its free entries off the (locked) Depot's free list | |
34e8f829 A |
2444 | int objects_in_use = tiny_free_detach_region(szone, depot_ptr, sparse_region); |
2445 | ||
2446 | // Transfer ownership of the region | |
2447 | MAGAZINE_INDEX_FOR_TINY_REGION(sparse_region) = mag_index; | |
1f2f436a | 2448 | node->pinned_to_depot = 0; |
34e8f829 A |
2449 | |
2450 | // Iterate the region putting its free entries on its new (locked) magazine's free list | |
2451 | size_t bytes_inplay = tiny_free_reattach_region(szone, tiny_mag_ptr, sparse_region); | |
2452 | ||
2453 | depot_ptr->mag_num_bytes_in_objects -= bytes_inplay; | |
2454 | depot_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES; | |
2455 | depot_ptr->mag_num_objects -= objects_in_use; | |
2456 | ||
2457 | tiny_mag_ptr->mag_num_bytes_in_objects += bytes_inplay; | |
2458 | tiny_mag_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES; | |
2459 | tiny_mag_ptr->mag_num_objects += objects_in_use; | |
2460 | ||
1f2f436a | 2461 | // connect to magazine as first node |
34e8f829 A |
2462 | recirc_list_splice_first(szone, tiny_mag_ptr, node); |
2463 | ||
2464 | SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr); | |
2465 | ||
1f2f436a A |
2466 | // madvise() outside the Depot lock |
2467 | #if TARGET_OS_EMBEDDED | |
2468 | if (node->failedREUSE) { | |
2469 | #else | |
7ba935f9 A |
2470 | if (node->failedREUSE || |
2471 | -1 == madvise((void *)sparse_region, TINY_REGION_PAYLOAD_BYTES, MADV_FREE_REUSE)) { | |
1f2f436a | 2472 | #endif |
34e8f829 | 2473 | /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */ |
7ba935f9 | 2474 | #if DEBUG_MADVISE |
1f2f436a A |
2475 | szone_error(szone, 0, "tiny_get_region_from_depot madvise(..., MADV_FREE_REUSE) failed", |
2476 | sparse_region, "length=%d\n", TINY_REGION_PAYLOAD_BYTES); | |
34e8f829 | 2477 | #endif |
7ba935f9 | 2478 | node->failedREUSE = TRUE; |
34e8f829 A |
2479 | } |
2480 | ||
1f2f436a A |
2481 | MAGMALLOC_DEPOTREGION((void *)szone, (int)mag_index, (void *)sparse_region, TINY_REGION_SIZE, |
2482 | (int)BYTES_USED_FOR_TINY_REGION(sparse_region)); // DTrace USDT Probe | |
7ba935f9 | 2483 | |
34e8f829 A |
2484 | return 1; |
2485 | } | |
2486 | ||
34e8f829 A |
2487 | #define K 1.5 // headroom measured in number of 1Mb regions |
2488 | #define DENSITY_THRESHOLD(a) \ | |
2489 | ((a) - ((a) >> 2)) // "Emptiness" f = 0.25, so "Density" is (1 - f)*a. Generally: ((a) - ((a) >> -log2(f))) | |
2490 | ||
1f2f436a | 2491 | static INLINE boolean_t |
34e8f829 A |
2492 | tiny_free_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, region_t region, void *ptr, |
2493 | msize_t msize) | |
2494 | { | |
2495 | void *original_ptr = ptr; | |
2496 | size_t original_size = TINY_BYTES_FOR_MSIZE(msize); | |
2497 | void *next_block = ((unsigned char *)ptr + original_size); | |
2498 | msize_t previous_msize, next_msize; | |
2499 | void *previous; | |
2500 | free_list_t *big_free_block; | |
2501 | free_list_t *after_next_block; | |
2502 | free_list_t *before_next_block; | |
34e8f829 A |
2503 | |
2504 | #if DEBUG_MALLOC | |
2505 | if (LOG(szone,ptr)) { | |
2506 | malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); | |
2507 | } | |
1f2f436a | 2508 | if (!msize) { |
34e8f829 A |
2509 | szone_error(szone, 1, "trying to free tiny block that is too small", ptr, |
2510 | "in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); | |
2511 | } | |
2512 | #endif | |
2513 | ||
2514 | // We try to coalesce this block with the preceeding one | |
2515 | previous = tiny_previous_preceding_free(ptr, &previous_msize); | |
2516 | if (previous) { | |
2517 | #if DEBUG_MALLOC | |
2518 | if (LOG(szone, ptr) || LOG(szone,previous)) { | |
2519 | malloc_printf("in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous); | |
2520 | } | |
2521 | #endif | |
34e8f829 A |
2522 | |
2523 | // clear the meta_header since this is no longer the start of a block | |
2524 | set_tiny_meta_header_middle(ptr); | |
2525 | tiny_free_list_remove_ptr(szone, tiny_mag_ptr, previous, previous_msize); | |
2526 | ptr = previous; | |
2527 | msize += previous_msize; | |
2528 | } | |
2529 | // We try to coalesce with the next block | |
2530 | if ((next_block < TINY_REGION_END(region)) && tiny_meta_header_is_free(next_block)) { | |
34e8f829 A |
2531 | next_msize = get_tiny_free_size(next_block); |
2532 | #if DEBUG_MALLOC | |
2533 | if (LOG(szone, ptr) || LOG(szone, next_block)) { | |
2534 | malloc_printf("in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n", | |
2535 | ptr, msize, next_block, next_msize); | |
2536 | } | |
2537 | #endif | |
2538 | // If we are coalescing with the next block, and the next block is in | |
2539 | // the last slot of the free list, then we optimize this case here to | |
2540 | // avoid removing next_block from the slot (NUM_TINY_SLOTS - 1) and then adding ptr back | |
2541 | // to slot (NUM_TINY_SLOTS - 1). | |
2542 | if (next_msize >= NUM_TINY_SLOTS) { | |
2543 | msize += next_msize; | |
2544 | ||
2545 | big_free_block = (free_list_t *)next_block; | |
2546 | after_next_block = free_list_unchecksum_ptr(szone, &big_free_block->next); | |
2547 | before_next_block = free_list_unchecksum_ptr(szone, &big_free_block->previous); | |
2548 | ||
2549 | if (!before_next_block) { | |
2550 | tiny_mag_ptr->mag_free_list[NUM_TINY_SLOTS-1] = ptr; | |
2551 | } else { | |
2552 | before_next_block->next.u = free_list_checksum_ptr(szone, ptr); | |
2553 | } | |
2554 | ||
2555 | if (after_next_block) { | |
2556 | after_next_block->previous.u = free_list_checksum_ptr(szone, ptr); | |
2557 | } | |
2558 | ||
2559 | // we don't need to checksum these since they are already checksummed | |
2560 | ((free_list_t *)ptr)->previous = big_free_block->previous; | |
2561 | ((free_list_t *)ptr)->next = big_free_block->next; | |
2562 | ||
2563 | // clear the meta_header to enable coalescing backwards | |
2564 | set_tiny_meta_header_middle(big_free_block); | |
2565 | set_tiny_meta_header_free(ptr, msize); | |
2566 | ||
2567 | goto tiny_free_ending; | |
2568 | } | |
2569 | tiny_free_list_remove_ptr(szone, tiny_mag_ptr, next_block, next_msize); | |
2570 | set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards | |
2571 | msize += next_msize; | |
2572 | } | |
1f2f436a | 2573 | |
34e8f829 | 2574 | // The tiny cache already scribbles free blocks as they go through the |
1f2f436a A |
2575 | // cache whenever msize < TINY_QUANTUM , so we do not need to do it here. |
2576 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize && (msize >= TINY_QUANTUM)) | |
34e8f829 A |
2577 | memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize)); |
2578 | ||
34e8f829 | 2579 | tiny_free_list_add_ptr(szone, tiny_mag_ptr, ptr, msize); |
1f2f436a | 2580 | |
34e8f829 | 2581 | tiny_free_ending: |
34e8f829 A |
2582 | |
2583 | tiny_mag_ptr->mag_num_objects--; | |
2584 | // we use original_size and not msize to avoid double counting the coalesced blocks | |
2585 | tiny_mag_ptr->mag_num_bytes_in_objects -= original_size; | |
2586 | ||
2587 | // Update this region's bytes in use count | |
2588 | region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(region); | |
2589 | size_t bytes_used = node->bytes_used - original_size; | |
2590 | node->bytes_used = bytes_used; | |
2591 | ||
511daa4c | 2592 | #if !TARGET_OS_EMBEDDED // Always madvise for embedded platforms |
34e8f829 A |
2593 | /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ |
2594 | if (szone->num_tiny_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary | |
2595 | /* NOTHING */ | |
2596 | } else if (DEPOT_MAGAZINE_INDEX != mag_index) { | |
2597 | // Emptiness discriminant | |
2598 | if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) { | |
2599 | /* Region has crossed threshold from density to sparsity. Mark it "suitable" on the | |
2600 | recirculation candidates list. */ | |
2601 | node->recirc_suitable = TRUE; | |
2602 | } else { | |
2603 | /* After this free, we've found the region is still dense, so it must have been even more so before | |
2604 | the free. That implies the region is already correctly marked. Do nothing. */ | |
2605 | } | |
2606 | ||
2607 | // Has the entire magazine crossed the "emptiness threshold"? If so, transfer a region | |
2608 | // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e | |
2609 | // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list. | |
2610 | size_t a = tiny_mag_ptr->num_bytes_in_magazine; // Total bytes allocated to this magazine | |
2611 | size_t u = tiny_mag_ptr->mag_num_bytes_in_objects; // In use (malloc'd) from this magaqzine | |
2612 | ||
1f2f436a A |
2613 | if (a - u > ((3 * TINY_REGION_PAYLOAD_BYTES) / 2) && u < DENSITY_THRESHOLD(a)) { |
2614 | return tiny_free_do_recirc_to_depot(szone, tiny_mag_ptr, mag_index); | |
2615 | } | |
34e8f829 A |
2616 | |
2617 | } else { | |
511daa4c | 2618 | #endif |
34e8f829 | 2619 | // Freed to Depot. N.B. Lock on tiny_magazines[DEPOT_MAGAZINE_INDEX] is already held |
1f2f436a | 2620 | // Calcuate the first page in the coalesced block that would be safe to mark MADV_FREE |
34e8f829 A |
2621 | uintptr_t safe_ptr = (uintptr_t)ptr + sizeof(free_list_t) + sizeof(msize_t); |
2622 | uintptr_t round_safe = round_page(safe_ptr); | |
2623 | ||
1f2f436a | 2624 | // Calcuate the last page in the coalesced block that would be safe to mark MADV_FREE |
34e8f829 A |
2625 | uintptr_t safe_extent = (uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t); |
2626 | uintptr_t trunc_extent = trunc_page(safe_extent); | |
2627 | ||
2628 | // The newly freed block may complete a span of bytes that cover a page. Mark it with MADV_FREE. | |
1f2f436a A |
2629 | if (round_safe < trunc_extent) { // Safe area covers a page (perhaps many) |
2630 | uintptr_t lo = trunc_page((uintptr_t)original_ptr); | |
2631 | uintptr_t hi = round_page((uintptr_t)original_ptr + original_size); | |
2632 | ||
2633 | OSAtomicIncrement32Barrier(&(node->pinned_to_depot)); | |
2634 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
511daa4c | 2635 | #if TARGET_OS_EMBEDDED |
1f2f436a | 2636 | madvise_free_range(szone, region, MAX(round_safe, lo), MIN(trunc_extent, hi), &szone->last_tiny_advise); |
511daa4c | 2637 | #else |
1f2f436a | 2638 | madvise_free_range(szone, region, MAX(round_safe, lo), MIN(trunc_extent, hi)); |
511daa4c | 2639 | #endif |
1f2f436a A |
2640 | SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr); |
2641 | OSAtomicDecrement32Barrier(&(node->pinned_to_depot)); | |
2642 | } | |
34e8f829 | 2643 | |
511daa4c | 2644 | #if !TARGET_OS_EMBEDDED |
1f2f436a | 2645 | if (0 < bytes_used || 0 < node->pinned_to_depot) { |
34e8f829 A |
2646 | /* Depot'd region is still live. Leave it in place on the Depot's recirculation list |
2647 | so as to avoid thrashing between the Depot's free list and a magazines's free list | |
2648 | with detach_region/reattach_region */ | |
2649 | } else { | |
2650 | /* Depot'd region is just now empty. Consider return to OS. */ | |
1f2f436a A |
2651 | region_t r_dealloc = tiny_free_try_depot_unmap_no_lock(szone, tiny_mag_ptr, node); |
2652 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
2653 | if (r_dealloc) | |
2654 | deallocate_pages(szone, r_dealloc, TINY_REGION_SIZE, 0); | |
2655 | return FALSE; // Caller need not unlock | |
34e8f829 A |
2656 | } |
2657 | } | |
511daa4c | 2658 | #endif |
1f2f436a A |
2659 | |
2660 | return TRUE; // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr) | |
34e8f829 A |
2661 | } |
2662 | ||
2663 | // Allocates from the last region or a freshly allocated region | |
2664 | static void * | |
1f2f436a A |
2665 | tiny_malloc_from_region_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, |
2666 | msize_t msize, void * aligned_address) | |
34e8f829 | 2667 | { |
1f2f436a | 2668 | void *ptr; |
34e8f829 | 2669 | |
1f2f436a A |
2670 | // Deal with unclaimed memory -- mag_bytes_free_at_end or mag_bytes_free_at_start |
2671 | if (tiny_mag_ptr->mag_bytes_free_at_end || tiny_mag_ptr->mag_bytes_free_at_start) | |
34e8f829 A |
2672 | tiny_finalize_region(szone, tiny_mag_ptr); |
2673 | ||
34e8f829 A |
2674 | // We set the unused bits of the header in the last pair to be all ones, and those of the inuse to zeroes. |
2675 | ((tiny_region_t)aligned_address)->pairs[CEIL_NUM_TINY_BLOCKS_WORDS-1].header = | |
2676 | (NUM_TINY_BLOCKS & 31) ? (0xFFFFFFFFU << (NUM_TINY_BLOCKS & 31)) : 0; | |
2677 | ((tiny_region_t)aligned_address)->pairs[CEIL_NUM_TINY_BLOCKS_WORDS-1].inuse = 0; | |
2678 | ||
2679 | // Here find the only place in tinyland that (infrequently) takes the tiny_regions_lock. | |
2680 | // Only one thread at a time should be permitted to assess the density of the hash | |
2681 | // ring and adjust if needed. | |
2682 | // Only one thread at a time should be permitted to insert its new region on | |
2683 | // the hash ring. | |
2684 | // It is safe for all other threads to read the hash ring (hashed_regions) and | |
2685 | // the associated sizes (num_regions_allocated and num_tiny_regions). | |
2686 | ||
2687 | LOCK(szone->tiny_regions_lock); | |
2688 | ||
2689 | // Check to see if the hash ring of tiny regions needs to grow. Try to | |
2690 | // avoid the hash ring becoming too dense. | |
2691 | if (szone->tiny_region_generation->num_regions_allocated < (2 * szone->num_tiny_regions)) { | |
2692 | region_t *new_regions; | |
2693 | size_t new_size; | |
2694 | size_t new_shift = szone->tiny_region_generation->num_regions_allocated_shift; // In/Out parameter | |
2695 | new_regions = hash_regions_grow_no_lock(szone, szone->tiny_region_generation->hashed_regions, | |
2696 | szone->tiny_region_generation->num_regions_allocated, | |
2697 | &new_shift, | |
2698 | &new_size); | |
2699 | // Do not deallocate the current hashed_regions allocation since someone may | |
2700 | // be iterating it. Instead, just leak it. | |
2701 | ||
2702 | // Prepare to advance to the "next generation" of the hash ring. | |
2703 | szone->tiny_region_generation->nextgen->hashed_regions = new_regions; | |
2704 | szone->tiny_region_generation->nextgen->num_regions_allocated = new_size; | |
2705 | szone->tiny_region_generation->nextgen->num_regions_allocated_shift = new_shift; | |
2706 | ||
2707 | // Throw the switch to atomically advance to the next generation. | |
2708 | szone->tiny_region_generation = szone->tiny_region_generation->nextgen; | |
2709 | // Ensure everyone sees the advance. | |
34e8f829 | 2710 | OSMemoryBarrier(); |
34e8f829 A |
2711 | } |
2712 | // Tag the region at "aligned_address" as belonging to us, | |
2713 | // and so put it under the protection of the magazine lock we are holding. | |
2714 | // Do this before advertising "aligned_address" on the hash ring(!) | |
2715 | MAGAZINE_INDEX_FOR_TINY_REGION(aligned_address) = mag_index; | |
2716 | ||
2717 | // Insert the new region into the hash ring, and update malloc statistics | |
2718 | hash_region_insert_no_lock(szone->tiny_region_generation->hashed_regions, | |
2719 | szone->tiny_region_generation->num_regions_allocated, | |
2720 | szone->tiny_region_generation->num_regions_allocated_shift, | |
2721 | aligned_address); | |
2722 | ||
2723 | szone->num_tiny_regions++; | |
2724 | UNLOCK(szone->tiny_regions_lock); | |
2725 | ||
2726 | tiny_mag_ptr->mag_last_region = aligned_address; | |
2727 | BYTES_USED_FOR_TINY_REGION(aligned_address) = TINY_BYTES_FOR_MSIZE(msize); | |
1f2f436a A |
2728 | #if ASLR_INTERNAL |
2729 | int offset_msize = malloc_entropy[0] & TINY_ENTROPY_MASK; | |
2730 | #if DEBUG_MALLOC | |
2731 | if (getenv("MallocASLRForce")) offset_msize = strtol(getenv("MallocASLRForce"), NULL, 0) & TINY_ENTROPY_MASK; | |
2732 | if (getenv("MallocASLRPrint")) malloc_printf("Region: %p offset: %d\n", aligned_address, offset_msize); | |
2733 | #endif | |
2734 | #else | |
2735 | int offset_msize = 0; | |
2736 | #endif | |
2737 | ptr = (void *)((uintptr_t) aligned_address + TINY_BYTES_FOR_MSIZE(offset_msize)); | |
34e8f829 A |
2738 | set_tiny_meta_header_in_use(ptr, msize); |
2739 | tiny_mag_ptr->mag_num_objects++; | |
2740 | tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(msize); | |
2741 | tiny_mag_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES; | |
2742 | ||
2743 | // We put a header on the last block so that it appears in use (for coalescing, etc...) | |
2744 | set_tiny_meta_header_in_use_1((void *)((uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(msize))); | |
1f2f436a A |
2745 | tiny_mag_ptr->mag_bytes_free_at_end = TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS - msize - offset_msize); |
2746 | ||
2747 | #if ASLR_INTERNAL | |
2748 | // Put a header on the previous block for same reason | |
2749 | tiny_mag_ptr->mag_bytes_free_at_start = TINY_BYTES_FOR_MSIZE(offset_msize); | |
2750 | if (offset_msize) { | |
2751 | set_tiny_meta_header_in_use_1((void *)((uintptr_t)ptr - TINY_QUANTUM)); | |
2752 | } | |
2753 | #else | |
2754 | tiny_mag_ptr->mag_bytes_free_at_start = 0; | |
2755 | #endif | |
34e8f829 | 2756 | |
1f2f436a A |
2757 | // connect to magazine as last node |
2758 | recirc_list_splice_last(szone, tiny_mag_ptr, REGION_TRAILER_FOR_TINY_REGION(aligned_address)); | |
34e8f829 A |
2759 | |
2760 | #if DEBUG_MALLOC | |
2761 | if (LOG(szone,ptr)) { | |
2762 | malloc_printf("in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr, msize); | |
2763 | } | |
2764 | #endif | |
2765 | return ptr; | |
2766 | } | |
2767 | ||
1f2f436a A |
2768 | static INLINE void * |
2769 | tiny_try_shrink_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_good_size) | |
2770 | { | |
2771 | msize_t new_msize = TINY_MSIZE_FOR_BYTES(new_good_size); | |
2772 | msize_t mshrinkage = TINY_MSIZE_FOR_BYTES(old_size) - new_msize; | |
2773 | ||
2774 | if (mshrinkage) { | |
2775 | void *q = (void *)((uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(new_msize)); | |
2776 | magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines, | |
2777 | REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)), | |
2778 | MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr))); | |
2779 | ||
2780 | // Mark q as block header and in-use, thus creating two blocks. | |
2781 | set_tiny_meta_header_in_use(q, mshrinkage); | |
2782 | tiny_mag_ptr->mag_num_objects++; | |
2783 | ||
2784 | SZONE_MAGAZINE_PTR_UNLOCK(szone,tiny_mag_ptr); | |
2785 | szone_free(szone, q); // avoid inlining free_tiny(szone, q, ...); | |
2786 | } | |
2787 | return ptr; | |
2788 | } | |
2789 | ||
34e8f829 A |
2790 | static INLINE boolean_t |
2791 | tiny_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) | |
2792 | { | |
2793 | // returns 1 on success | |
2794 | msize_t index; | |
2795 | msize_t old_msize; | |
2796 | unsigned next_index; | |
2797 | void *next_block; | |
2798 | boolean_t is_free; | |
2799 | msize_t next_msize, coalesced_msize, leftover_msize; | |
2800 | void *leftover; | |
2801 | ||
2802 | index = TINY_INDEX_FOR_PTR(ptr); | |
2803 | old_msize = TINY_MSIZE_FOR_BYTES(old_size); | |
2804 | next_index = index + old_msize; | |
2805 | ||
2806 | if (next_index >= NUM_TINY_BLOCKS) { | |
2807 | return 0; | |
2808 | } | |
2809 | next_block = (char *)ptr + old_size; | |
2810 | ||
2811 | magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines, | |
2812 | REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)), | |
2813 | MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr))); | |
2814 | ||
2815 | /* | |
2816 | * Look for a free block immediately afterwards. If it's large enough, we can consume (part of) | |
2817 | * it. | |
2818 | */ | |
2819 | is_free = tiny_meta_header_is_free(next_block); | |
2820 | if (!is_free) { | |
2821 | SZONE_MAGAZINE_PTR_UNLOCK(szone,tiny_mag_ptr); | |
2822 | return 0; // next_block is in use; | |
2823 | } | |
2824 | next_msize = get_tiny_free_size(next_block); | |
2825 | if (old_size + TINY_BYTES_FOR_MSIZE(next_msize) < new_size) { | |
2826 | SZONE_MAGAZINE_PTR_UNLOCK(szone,tiny_mag_ptr); | |
2827 | return 0; // even with next block, not enough | |
2828 | } | |
2829 | /* | |
2830 | * The following block is big enough; pull it from its freelist and chop off enough to satisfy | |
2831 | * our needs. | |
2832 | */ | |
2833 | tiny_free_list_remove_ptr(szone, tiny_mag_ptr, next_block, next_msize); | |
2834 | set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards | |
2835 | coalesced_msize = TINY_MSIZE_FOR_BYTES(new_size - old_size + TINY_QUANTUM - 1); | |
2836 | leftover_msize = next_msize - coalesced_msize; | |
2837 | if (leftover_msize) { | |
2838 | /* there's some left, so put the remainder back */ | |
2839 | leftover = (void *)((uintptr_t)next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize)); | |
2840 | ||
2841 | tiny_free_list_add_ptr(szone, tiny_mag_ptr, leftover, leftover_msize); | |
2842 | } | |
2843 | set_tiny_meta_header_in_use(ptr, old_msize + coalesced_msize); | |
2844 | #if DEBUG_MALLOC | |
2845 | if (LOG(szone,ptr)) { | |
2846 | malloc_printf("in tiny_try_realloc_in_place(), ptr=%p, msize=%d\n", ptr, old_msize + coalesced_msize); | |
2847 | } | |
2848 | #endif | |
2849 | tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(coalesced_msize); | |
2850 | ||
2851 | // Update this region's bytes in use count | |
2852 | region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)); | |
2853 | size_t bytes_used = node->bytes_used + TINY_BYTES_FOR_MSIZE(coalesced_msize); | |
2854 | node->bytes_used = bytes_used; | |
2855 | ||
2856 | // Emptiness discriminant | |
2857 | if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) { | |
2858 | /* After this reallocation the region is still sparse, so it must have been even more so before | |
2859 | the reallocation. That implies the region is already correctly marked. Do nothing. */ | |
2860 | } else { | |
2861 | /* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the | |
2862 | recirculation candidates list. */ | |
2863 | node->recirc_suitable = FALSE; | |
2864 | } | |
2865 | ||
2866 | SZONE_MAGAZINE_PTR_UNLOCK(szone,tiny_mag_ptr); | |
2867 | CHECK(szone, __PRETTY_FUNCTION__); | |
2868 | return 1; | |
2869 | } | |
2870 | ||
2871 | static boolean_t | |
2872 | tiny_check_region(szone_t *szone, region_t region) | |
2873 | { | |
2874 | uintptr_t start, ptr, region_end; | |
2875 | boolean_t prev_free = 0; | |
2876 | boolean_t is_free; | |
2877 | msize_t msize; | |
2878 | free_list_t *free_head; | |
2879 | void *follower, *previous, *next; | |
2880 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(region); | |
2881 | magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]); | |
2882 | ||
2883 | // Assumes locked | |
2884 | CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__); | |
2885 | ||
2886 | /* establish region limits */ | |
2887 | start = (uintptr_t)TINY_REGION_ADDRESS(region); | |
2888 | ptr = start; | |
1f2f436a A |
2889 | if (region == tiny_mag_ptr->mag_last_region) { |
2890 | ptr += tiny_mag_ptr->mag_bytes_free_at_start; | |
2891 | ||
2892 | /* | |
2893 | * Check the leading block's integrity here also. | |
2894 | */ | |
2895 | if (tiny_mag_ptr->mag_bytes_free_at_start) { | |
2896 | msize = get_tiny_meta_header((void *)(ptr - TINY_QUANTUM), &is_free); | |
2897 | if (is_free || (msize != 1)) { | |
2898 | malloc_printf("*** invariant broken for leader block %p - %d %d\n", ptr - TINY_QUANTUM, msize, is_free); | |
2899 | } | |
2900 | } | |
2901 | } | |
34e8f829 A |
2902 | region_end = (uintptr_t)TINY_REGION_END(region); |
2903 | ||
2904 | /* | |
2905 | * The last region may have a trailing chunk which has not been converted into inuse/freelist | |
2906 | * blocks yet. | |
2907 | */ | |
2908 | if (region == tiny_mag_ptr->mag_last_region) | |
2909 | region_end -= tiny_mag_ptr->mag_bytes_free_at_end; | |
2910 | ||
2911 | /* | |
2912 | * Scan blocks within the region. | |
2913 | */ | |
2914 | while (ptr < region_end) { | |
2915 | /* | |
2916 | * If the first block is free, and its size is 65536 (msize = 0) then the entire region is | |
2917 | * free. | |
2918 | */ | |
2919 | msize = get_tiny_meta_header((void *)ptr, &is_free); | |
2920 | if (is_free && !msize && (ptr == start)) { | |
2921 | return 1; | |
2922 | } | |
2923 | ||
2924 | /* | |
2925 | * If the block's size is 65536 (msize = 0) then since we're not the first entry the size is | |
2926 | * corrupt. | |
2927 | */ | |
2928 | if (!msize) { | |
2929 | malloc_printf("*** invariant broken for tiny block %p this msize=%d - size is too small\n", | |
2930 | ptr, msize); | |
2931 | return 0; | |
2932 | } | |
2933 | ||
2934 | if (!is_free) { | |
2935 | /* | |
2936 | * In use blocks cannot be more than (NUM_TINY_SLOTS - 1) quanta large. | |
2937 | */ | |
2938 | prev_free = 0; | |
2939 | if (msize > (NUM_TINY_SLOTS - 1)) { | |
2940 | malloc_printf("*** invariant broken for %p this tiny msize=%d - size is too large\n", | |
2941 | ptr, msize); | |
2942 | return 0; | |
2943 | } | |
2944 | /* move to next block */ | |
2945 | ptr += TINY_BYTES_FOR_MSIZE(msize); | |
2946 | } else { | |
2947 | /* | |
2948 | * Free blocks must have been coalesced, we cannot have a free block following another | |
2949 | * free block. | |
2950 | */ | |
2951 | if (prev_free) { | |
2952 | malloc_printf("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n", | |
2953 | ptr, msize); | |
2954 | return 0; | |
2955 | } | |
2956 | prev_free = 1; | |
2957 | /* | |
2958 | * Check the integrity of this block's entry in its freelist. | |
2959 | */ | |
2960 | free_head = (free_list_t *)ptr; | |
2961 | previous = free_list_unchecksum_ptr(szone, &free_head->previous); | |
2962 | next = free_list_unchecksum_ptr(szone, &free_head->next); | |
2963 | if (previous && !tiny_meta_header_is_free(previous)) { | |
2964 | malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n", | |
2965 | ptr, previous); | |
2966 | return 0; | |
2967 | } | |
2968 | if (next && !tiny_meta_header_is_free(next)) { | |
2969 | malloc_printf("*** invariant broken for %p (next in free list %p is not a free pointer)\n", | |
2970 | ptr, next); | |
2971 | return 0; | |
2972 | } | |
2973 | /* | |
2974 | * Check the free block's trailing size value. | |
2975 | */ | |
2976 | follower = FOLLOWING_TINY_PTR(ptr, msize); | |
2977 | if (((uintptr_t)follower != region_end) && (get_tiny_previous_free_msize(follower) != msize)) { | |
2978 | malloc_printf("*** invariant broken for tiny free %p followed by %p in region [%p-%p] " | |
2979 | "(end marker incorrect) should be %d; in fact %d\n", | |
2980 | ptr, follower, TINY_REGION_ADDRESS(region), region_end, msize, get_tiny_previous_free_msize(follower)); | |
2981 | return 0; | |
2982 | } | |
2983 | /* move to next block */ | |
2984 | ptr = (uintptr_t)follower; | |
2985 | } | |
2986 | } | |
2987 | /* | |
2988 | * Ensure that we scanned the entire region | |
2989 | */ | |
2990 | if (ptr != region_end) { | |
2991 | malloc_printf("*** invariant broken for region end %p - %p\n", ptr, region_end); | |
2992 | return 0; | |
2993 | } | |
2994 | /* | |
2995 | * Check the trailing block's integrity. | |
2996 | */ | |
2997 | if (region == tiny_mag_ptr->mag_last_region) { | |
2998 | if (tiny_mag_ptr->mag_bytes_free_at_end) { | |
2999 | msize = get_tiny_meta_header((void *)ptr, &is_free); | |
3000 | if (is_free || (msize != 1)) { | |
3001 | malloc_printf("*** invariant broken for blocker block %p - %d %d\n", ptr, msize, is_free); | |
3002 | } | |
3003 | } | |
3004 | } | |
3005 | return 1; | |
3006 | } | |
3007 | ||
3008 | static kern_return_t | |
3009 | tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, | |
3010 | memory_reader_t reader, vm_range_recorder_t recorder) | |
3011 | { | |
3012 | size_t num_regions; | |
3013 | size_t index; | |
3014 | region_t *regions; | |
3015 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
3016 | unsigned count = 0; | |
3017 | kern_return_t err; | |
3018 | region_t region; | |
3019 | vm_range_t range; | |
3020 | vm_range_t admin_range; | |
3021 | vm_range_t ptr_range; | |
3022 | unsigned char *mapped_region; | |
3023 | uint32_t *block_header; | |
3024 | uint32_t *in_use; | |
3025 | unsigned block_index; | |
3026 | unsigned block_limit; | |
3027 | boolean_t is_free; | |
3028 | msize_t msize; | |
3029 | void *mapped_ptr; | |
3030 | unsigned bit; | |
1f2f436a A |
3031 | magazine_t *tiny_mag_base = NULL; |
3032 | ||
34e8f829 A |
3033 | region_hash_generation_t *trg_ptr; |
3034 | err = reader(task, (vm_address_t)szone->tiny_region_generation, sizeof(region_hash_generation_t), (void **)&trg_ptr); | |
3035 | if (err) return err; | |
3036 | ||
3037 | num_regions = trg_ptr->num_regions_allocated; | |
3038 | err = reader(task, (vm_address_t)trg_ptr->hashed_regions, sizeof(region_t) * num_regions, (void **)®ions); | |
3039 | if (err) return err; | |
3040 | ||
1f2f436a A |
3041 | if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { |
3042 | // Map in all active magazines. Do this outside the iteration over regions. | |
3043 | err = reader(task, (vm_address_t)(szone->tiny_magazines), | |
3044 | szone->num_tiny_magazines*sizeof(magazine_t),(void **)&tiny_mag_base); | |
3045 | if (err) return err; | |
3046 | } | |
3047 | ||
34e8f829 A |
3048 | for (index = 0; index < num_regions; ++index) { |
3049 | region = regions[index]; | |
3050 | if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) { | |
3051 | range.address = (vm_address_t)TINY_REGION_ADDRESS(region); | |
3052 | range.size = (vm_size_t)TINY_REGION_SIZE; | |
3053 | if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { | |
3054 | admin_range.address = range.address + TINY_METADATA_START; | |
3055 | admin_range.size = TINY_METADATA_SIZE; | |
3056 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1); | |
3057 | } | |
3058 | if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { | |
3059 | ptr_range.address = range.address; | |
3060 | ptr_range.size = NUM_TINY_BLOCKS * TINY_QUANTUM; | |
3061 | recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); | |
3062 | } | |
3063 | if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { | |
1f2f436a A |
3064 | void *mag_last_free; |
3065 | vm_address_t mag_last_free_ptr = 0; | |
3066 | msize_t mag_last_free_msize = 0; | |
3067 | ||
34e8f829 A |
3068 | err = reader(task, range.address, range.size, (void **)&mapped_region); |
3069 | if (err) | |
3070 | return err; | |
3071 | ||
3072 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(mapped_region); | |
1f2f436a A |
3073 | magazine_t *tiny_mag_ptr = tiny_mag_base + mag_index; |
3074 | ||
3075 | if (DEPOT_MAGAZINE_INDEX != mag_index) { | |
3076 | mag_last_free = tiny_mag_ptr->mag_last_free; | |
3077 | if (mag_last_free) { | |
3078 | mag_last_free_ptr = (uintptr_t) mag_last_free & ~(TINY_QUANTUM - 1); | |
3079 | mag_last_free_msize = (uintptr_t) mag_last_free & (TINY_QUANTUM - 1); | |
3080 | } | |
3081 | } else { | |
3082 | for (mag_index = 0; mag_index < szone->num_tiny_magazines; mag_index++) { | |
3083 | if ((void *)range.address == (tiny_mag_base + mag_index)->mag_last_free_rgn) { | |
3084 | mag_last_free = (tiny_mag_base + mag_index)->mag_last_free; | |
3085 | if (mag_last_free) { | |
3086 | mag_last_free_ptr = (uintptr_t) mag_last_free & ~(TINY_QUANTUM - 1); | |
3087 | mag_last_free_msize = (uintptr_t) mag_last_free & (TINY_QUANTUM - 1); | |
3088 | } | |
3089 | } | |
3090 | } | |
34e8f829 A |
3091 | } |
3092 | ||
3093 | block_header = (uint32_t *)(mapped_region + TINY_METADATA_START + sizeof(region_trailer_t)); | |
3094 | in_use = TINY_INUSE_FOR_HEADER(block_header); | |
3095 | block_index = 0; | |
3096 | block_limit = NUM_TINY_BLOCKS; | |
1f2f436a A |
3097 | if (region == tiny_mag_ptr->mag_last_region) { |
3098 | block_index += TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_start); | |
34e8f829 | 3099 | block_limit -= TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end); |
1f2f436a | 3100 | } |
34e8f829 A |
3101 | |
3102 | while (block_index < block_limit) { | |
3103 | vm_size_t block_offset = TINY_BYTES_FOR_MSIZE(block_index); | |
3104 | is_free = !BITARRAY_BIT(in_use, block_index); | |
3105 | if (is_free) { | |
3106 | mapped_ptr = mapped_region + block_offset; | |
3107 | ||
3108 | // mapped_region, the address at which 'range' in 'task' has been | |
3109 | // mapped into our process, is not necessarily aligned to | |
3110 | // TINY_BLOCKS_ALIGN. | |
3111 | // | |
3112 | // Since the code in get_tiny_free_size() assumes the pointer came | |
3113 | // from a properly aligned tiny region, and mapped_region is not | |
3114 | // necessarily aligned, then do the size calculation directly. | |
3115 | // If the next bit is set in the header bitmap, then the size is one | |
3116 | // quantum. Otherwise, read the size field. | |
3117 | if (!BITARRAY_BIT(block_header, (block_index+1))) | |
3118 | msize = TINY_FREE_SIZE(mapped_ptr); | |
3119 | else | |
3120 | msize = 1; | |
3121 | ||
34e8f829 A |
3122 | } else if (range.address + block_offset != mag_last_free_ptr) { |
3123 | msize = 1; | |
3124 | bit = block_index + 1; | |
3125 | while (! BITARRAY_BIT(block_header, bit)) { | |
3126 | bit++; | |
3127 | msize ++; | |
3128 | } | |
3129 | buffer[count].address = range.address + block_offset; | |
3130 | buffer[count].size = TINY_BYTES_FOR_MSIZE(msize); | |
3131 | count++; | |
3132 | if (count >= MAX_RECORDER_BUFFER) { | |
3133 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
3134 | count = 0; | |
3135 | } | |
3136 | } else { | |
3137 | // Block is not free but it matches mag_last_free_ptr so even | |
3138 | // though it is not marked free in the bitmap, we treat it as if | |
3139 | // it is and move on | |
3140 | msize = mag_last_free_msize; | |
3141 | } | |
7ba935f9 A |
3142 | |
3143 | if (!msize) | |
3144 | return KERN_FAILURE; // Somethings amiss. Avoid looping at this block_index. | |
3145 | ||
34e8f829 A |
3146 | block_index += msize; |
3147 | } | |
3148 | if (count) { | |
3149 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
3150 | count = 0; | |
3151 | } | |
3152 | } | |
3153 | } | |
3154 | } | |
3155 | return 0; | |
3156 | } | |
3157 | ||
3158 | static void * | |
3159 | tiny_malloc_from_free_list(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize) | |
3160 | { | |
3161 | free_list_t *ptr; | |
3162 | msize_t this_msize; | |
3163 | grain_t slot = msize - 1; | |
3164 | free_list_t **free_list = tiny_mag_ptr->mag_free_list; | |
3165 | free_list_t **the_slot = free_list + slot; | |
3166 | free_list_t *next; | |
3167 | free_list_t **limit; | |
3168 | #if defined(__LP64__) | |
3169 | uint64_t bitmap; | |
3170 | #else | |
3171 | uint32_t bitmap; | |
3172 | #endif | |
3173 | msize_t leftover_msize; | |
3174 | free_list_t *leftover_ptr; | |
3175 | ||
3176 | // Assumes we've locked the region | |
3177 | CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__); | |
3178 | ||
3179 | // Look for an exact match by checking the freelist for this msize. | |
3180 | // | |
3181 | ptr = *the_slot; | |
3182 | if (ptr) { | |
3183 | next = free_list_unchecksum_ptr(szone, &ptr->next); | |
3184 | if (next) { | |
3185 | next->previous = ptr->previous; | |
3186 | } else { | |
3187 | BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot); | |
3188 | } | |
3189 | *the_slot = next; | |
3190 | this_msize = msize; | |
3191 | #if DEBUG_MALLOC | |
3192 | if (LOG(szone, ptr)) { | |
3193 | malloc_printf("in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr, this_msize); | |
3194 | } | |
3195 | #endif | |
3196 | goto return_tiny_alloc; | |
3197 | } | |
3198 | ||
3199 | // Mask off the bits representing slots holding free blocks smaller than the | |
3200 | // size we need. If there are no larger free blocks, try allocating from | |
3201 | // the free space at the end of the tiny region. | |
3202 | #if defined(__LP64__) | |
3203 | bitmap = ((uint64_t *)(tiny_mag_ptr->mag_bitmap))[0] & ~ ((1ULL << slot) - 1); | |
3204 | #else | |
3205 | bitmap = tiny_mag_ptr->mag_bitmap[0] & ~ ((1 << slot) - 1); | |
3206 | #endif | |
3207 | if (!bitmap) | |
3208 | goto try_tiny_malloc_from_end; | |
3209 | ||
3210 | slot = BITMAPV_CTZ(bitmap); | |
3211 | limit = free_list + NUM_TINY_SLOTS - 1; | |
3212 | free_list += slot; | |
3213 | ||
3214 | if (free_list < limit) { | |
3215 | ptr = *free_list; | |
3216 | if (ptr) { | |
3217 | next = free_list_unchecksum_ptr(szone, &ptr->next); | |
3218 | *free_list = next; | |
3219 | if (next) { | |
3220 | next->previous = ptr->previous; | |
3221 | } else { | |
3222 | BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot); | |
3223 | } | |
3224 | this_msize = get_tiny_free_size(ptr); | |
3225 | goto add_leftover_and_proceed; | |
3226 | } | |
3227 | #if DEBUG_MALLOC | |
3228 | malloc_printf("in tiny_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n",slot); | |
3229 | #endif | |
3230 | } | |
3231 | ||
3232 | // We are now looking at the last slot, which contains blocks equal to, or | |
3233 | // due to coalescing of free blocks, larger than (NUM_TINY_SLOTS - 1) * tiny quantum size. | |
3234 | // If the last freelist is not empty, and the head contains a block that is | |
3235 | // larger than our request, then the remainder is put back on the free list. | |
3236 | ptr = *limit; | |
3237 | if (ptr) { | |
3238 | this_msize = get_tiny_free_size(ptr); | |
3239 | next = free_list_unchecksum_ptr(szone, &ptr->next); | |
3240 | if (this_msize - msize >= NUM_TINY_SLOTS) { | |
3241 | // the leftover will go back to the free list, so we optimize by | |
3242 | // modifying the free list rather than a pop and push of the head | |
3243 | leftover_msize = this_msize - msize; | |
3244 | leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); | |
3245 | *limit = leftover_ptr; | |
3246 | if (next) { | |
3247 | next->previous.u = free_list_checksum_ptr(szone, leftover_ptr); | |
3248 | } | |
3249 | leftover_ptr->previous = ptr->previous; | |
3250 | leftover_ptr->next = ptr->next; | |
3251 | set_tiny_meta_header_free(leftover_ptr, leftover_msize); | |
3252 | #if DEBUG_MALLOC | |
3253 | if (LOG(szone,ptr)) { | |
3254 | malloc_printf("in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", | |
3255 | ptr, msize, this_msize); | |
3256 | } | |
3257 | #endif | |
3258 | this_msize = msize; | |
3259 | goto return_tiny_alloc; | |
3260 | } | |
3261 | if (next) { | |
3262 | next->previous = ptr->previous; | |
3263 | } | |
3264 | *limit = next; | |
3265 | goto add_leftover_and_proceed; | |
3266 | /* NOTREACHED */ | |
3267 | } | |
3268 | ||
3269 | try_tiny_malloc_from_end: | |
3270 | // Let's see if we can use tiny_mag_ptr->mag_bytes_free_at_end | |
3271 | if (tiny_mag_ptr->mag_bytes_free_at_end >= TINY_BYTES_FOR_MSIZE(msize)) { | |
3272 | ptr = (free_list_t *)((uintptr_t)TINY_REGION_END(tiny_mag_ptr->mag_last_region) - | |
3273 | tiny_mag_ptr->mag_bytes_free_at_end); | |
3274 | tiny_mag_ptr->mag_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(msize); | |
3275 | if (tiny_mag_ptr->mag_bytes_free_at_end) { | |
3276 | // let's add an in use block after ptr to serve as boundary | |
3277 | set_tiny_meta_header_in_use_1((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); | |
3278 | } | |
3279 | this_msize = msize; | |
3280 | #if DEBUG_MALLOC | |
3281 | if (LOG(szone, ptr)) { | |
3282 | malloc_printf("in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr, msize); | |
3283 | } | |
3284 | #endif | |
3285 | goto return_tiny_alloc; | |
3286 | } | |
1f2f436a A |
3287 | #if ASLR_INTERNAL |
3288 | // Try from start if nothing left at end | |
3289 | if (tiny_mag_ptr->mag_bytes_free_at_start >= TINY_BYTES_FOR_MSIZE(msize)) { | |
3290 | ptr = (free_list_t *)(TINY_REGION_ADDRESS(tiny_mag_ptr->mag_last_region) + | |
3291 | tiny_mag_ptr->mag_bytes_free_at_start - TINY_BYTES_FOR_MSIZE(msize)); | |
3292 | tiny_mag_ptr->mag_bytes_free_at_start -= TINY_BYTES_FOR_MSIZE(msize); | |
3293 | if (tiny_mag_ptr->mag_bytes_free_at_start) { | |
3294 | // let's add an in use block before ptr to serve as boundary | |
3295 | set_tiny_meta_header_in_use_1((unsigned char *)ptr - TINY_QUANTUM); | |
3296 | } | |
3297 | this_msize = msize; | |
3298 | #if DEBUG_MALLOC | |
3299 | if (LOG(szone, ptr)) { | |
3300 | malloc_printf("in tiny_malloc_from_free_list(), from start ptr=%p, msize=%d\n", ptr, msize); | |
3301 | } | |
3302 | #endif | |
3303 | goto return_tiny_alloc; | |
3304 | } | |
3305 | #endif | |
34e8f829 A |
3306 | return NULL; |
3307 | ||
3308 | add_leftover_and_proceed: | |
3309 | if (!this_msize || (this_msize > msize)) { | |
3310 | leftover_msize = this_msize - msize; | |
3311 | leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize)); | |
3312 | #if DEBUG_MALLOC | |
3313 | if (LOG(szone,ptr)) { | |
3314 | malloc_printf("in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize); | |
3315 | } | |
3316 | #endif | |
3317 | tiny_free_list_add_ptr(szone, tiny_mag_ptr, leftover_ptr, leftover_msize); | |
3318 | this_msize = msize; | |
3319 | } | |
3320 | ||
3321 | return_tiny_alloc: | |
3322 | tiny_mag_ptr->mag_num_objects++; | |
3323 | tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(this_msize); | |
3324 | ||
3325 | // Update this region's bytes in use count | |
3326 | region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)); | |
3327 | size_t bytes_used = node->bytes_used + TINY_BYTES_FOR_MSIZE(this_msize); | |
3328 | node->bytes_used = bytes_used; | |
3329 | ||
3330 | // Emptiness discriminant | |
3331 | if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) { | |
3332 | /* After this allocation the region is still sparse, so it must have been even more so before | |
3333 | the allocation. That implies the region is already correctly marked. Do nothing. */ | |
3334 | } else { | |
3335 | /* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the | |
3336 | recirculation candidates list. */ | |
3337 | node->recirc_suitable = FALSE; | |
3338 | } | |
3339 | #if DEBUG_MALLOC | |
3340 | if (LOG(szone,ptr)) { | |
3341 | malloc_printf("in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize); | |
3342 | } | |
3343 | #endif | |
3344 | if (this_msize > 1) | |
3345 | set_tiny_meta_header_in_use(ptr, this_msize); | |
3346 | else | |
3347 | set_tiny_meta_header_in_use_1(ptr); | |
3348 | return ptr; | |
3349 | } | |
3350 | #undef DENSITY_THRESHOLD | |
3351 | #undef K | |
3352 | ||
3353 | static INLINE void * | |
3354 | tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) | |
3355 | { | |
3356 | void *ptr; | |
3357 | mag_index_t mag_index = mag_get_thread_index(szone); | |
3358 | magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]); | |
3359 | ||
3360 | #if DEBUG_MALLOC | |
3361 | if (DEPOT_MAGAZINE_INDEX == mag_index) { | |
3362 | szone_error(szone, 1, "malloc called for magazine index -1", NULL, NULL); | |
3363 | return(NULL); | |
3364 | } | |
3365 | ||
3366 | if (!msize) { | |
3367 | szone_error(szone, 1, "invariant broken (!msize) in allocation (region)", NULL, NULL); | |
3368 | return(NULL); | |
3369 | } | |
3370 | #endif | |
3371 | ||
3372 | SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr); | |
3373 | ||
3374 | #if TINY_CACHE | |
3375 | ptr = tiny_mag_ptr->mag_last_free; | |
3376 | ||
3377 | if ((((uintptr_t)ptr) & (TINY_QUANTUM - 1)) == msize) { | |
3378 | // we have a winner | |
3379 | tiny_mag_ptr->mag_last_free = NULL; | |
1f2f436a | 3380 | tiny_mag_ptr->mag_last_free_rgn = NULL; |
34e8f829 A |
3381 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); |
3382 | CHECK(szone, __PRETTY_FUNCTION__); | |
3383 | ptr = (void *)((uintptr_t)ptr & ~ (TINY_QUANTUM - 1)); | |
3384 | if (cleared_requested) { | |
3385 | memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); | |
3386 | } | |
3387 | #if DEBUG_MALLOC | |
3388 | if (LOG(szone,ptr)) { | |
3389 | malloc_printf("in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr, msize); | |
3390 | } | |
3391 | #endif | |
3392 | return ptr; | |
3393 | } | |
3394 | #endif /* TINY_CACHE */ | |
3395 | ||
1f2f436a | 3396 | while (1) { |
34e8f829 A |
3397 | ptr = tiny_malloc_from_free_list(szone, tiny_mag_ptr, mag_index, msize); |
3398 | if (ptr) { | |
3399 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3400 | CHECK(szone, __PRETTY_FUNCTION__); | |
3401 | if (cleared_requested) { | |
3402 | memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); | |
3403 | } | |
3404 | return ptr; | |
3405 | } | |
34e8f829 | 3406 | |
1f2f436a A |
3407 | if (tiny_get_region_from_depot(szone, tiny_mag_ptr, mag_index, msize)) { |
3408 | ptr = tiny_malloc_from_free_list(szone, tiny_mag_ptr, mag_index, msize); | |
3409 | if (ptr) { | |
3410 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3411 | CHECK(szone, __PRETTY_FUNCTION__); | |
3412 | if (cleared_requested) { | |
3413 | memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize)); | |
3414 | } | |
3415 | return ptr; | |
3416 | } | |
3417 | } | |
3418 | ||
3419 | // The magazine is exhausted. A new region (heap) must be allocated to satisfy this call to malloc(). | |
3420 | // The allocation, an mmap() system call, will be performed outside the magazine spin locks by the first | |
3421 | // thread that suffers the exhaustion. That thread sets "alloc_underway" and enters a critical section. | |
3422 | // Threads arriving here later are excluded from the critical section, yield the CPU, and then retry the | |
3423 | // allocation. After some time the magazine is resupplied, the original thread leaves with its allocation, | |
3424 | // and retry-ing threads succeed in the code just above. | |
3425 | if (!tiny_mag_ptr->alloc_underway) { | |
3426 | void *fresh_region; | |
3427 | ||
3428 | // time to create a new region (do this outside the magazine lock) | |
3429 | tiny_mag_ptr->alloc_underway = TRUE; | |
3430 | OSMemoryBarrier(); | |
3431 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3432 | fresh_region = allocate_pages_securely(szone, TINY_REGION_SIZE, TINY_BLOCKS_ALIGN, VM_MEMORY_MALLOC_TINY); | |
3433 | SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr); | |
3434 | ||
3435 | MAGMALLOC_ALLOCREGION((void *)szone, (int)mag_index, fresh_region, TINY_REGION_SIZE); // DTrace USDT Probe | |
3436 | ||
3437 | if (!fresh_region) { // out of memory! | |
3438 | tiny_mag_ptr->alloc_underway = FALSE; | |
3439 | OSMemoryBarrier(); | |
3440 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3441 | return NULL; | |
3442 | } | |
3443 | ||
3444 | ptr = tiny_malloc_from_region_no_lock(szone, tiny_mag_ptr, mag_index, msize, fresh_region); | |
3445 | ||
3446 | // we don't clear because this freshly allocated space is pristine | |
3447 | tiny_mag_ptr->alloc_underway = FALSE; | |
3448 | OSMemoryBarrier(); | |
3449 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3450 | CHECK(szone, __PRETTY_FUNCTION__); | |
3451 | return ptr; | |
3452 | } else { | |
3453 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3454 | pthread_yield_np(); | |
3455 | SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr); | |
3456 | } | |
3457 | } | |
3458 | /* NOTREACHED */ | |
34e8f829 A |
3459 | } |
3460 | ||
3461 | static NOINLINE void | |
3462 | free_tiny_botch(szone_t *szone, free_list_t *ptr) | |
3463 | { | |
3464 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)); | |
3465 | magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]); | |
3466 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3467 | szone_error(szone, 1, "double free", ptr, NULL); | |
3468 | } | |
3469 | ||
3470 | static INLINE void | |
3471 | free_tiny(szone_t *szone, void *ptr, region_t tiny_region, size_t known_size) | |
3472 | { | |
3473 | msize_t msize; | |
3474 | boolean_t is_free; | |
3475 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region); | |
3476 | magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]); | |
3477 | ||
3478 | // ptr is known to be in tiny_region | |
3479 | if (known_size) { | |
3480 | msize = TINY_MSIZE_FOR_BYTES(known_size + TINY_QUANTUM - 1); | |
3481 | } else { | |
3482 | msize = get_tiny_meta_header(ptr, &is_free); | |
3483 | if (is_free) { | |
3484 | free_tiny_botch(szone, ptr); | |
3485 | return; | |
3486 | } | |
3487 | } | |
3488 | #if DEBUG_MALLOC | |
3489 | if (!msize) { | |
3490 | malloc_printf("*** free_tiny() block in use is too large: %p\n", ptr); | |
3491 | return; | |
3492 | } | |
3493 | #endif | |
3494 | ||
3495 | SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr); | |
3496 | ||
3497 | #if TINY_CACHE | |
3498 | // Depot does not participate in TINY_CACHE since it can't be directly malloc()'d | |
3499 | if (DEPOT_MAGAZINE_INDEX != mag_index) { | |
3500 | if (msize < TINY_QUANTUM) { // to see if the bits fit in the last 4 bits | |
3501 | void *ptr2 = tiny_mag_ptr->mag_last_free; // Might be NULL | |
3502 | region_t rgn2 = tiny_mag_ptr->mag_last_free_rgn; | |
3503 | ||
3504 | /* check that we don't already have this pointer in the cache */ | |
3505 | if (ptr == (void *)((uintptr_t)ptr2 & ~ (TINY_QUANTUM - 1))) { | |
3506 | free_tiny_botch(szone, ptr); | |
3507 | return; | |
3508 | } | |
3509 | ||
3510 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize) | |
3511 | memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize)); | |
3512 | ||
3513 | tiny_mag_ptr->mag_last_free = (void *)(((uintptr_t)ptr) | msize); | |
3514 | tiny_mag_ptr->mag_last_free_rgn = tiny_region; | |
3515 | ||
3516 | if (!ptr2) { | |
3517 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3518 | CHECK(szone, __PRETTY_FUNCTION__); | |
3519 | return; | |
3520 | } | |
3521 | ||
3522 | msize = (uintptr_t)ptr2 & (TINY_QUANTUM - 1); | |
3523 | ptr = (void *)(((uintptr_t)ptr2) & ~(TINY_QUANTUM - 1)); | |
3524 | tiny_region = rgn2; | |
3525 | } | |
3526 | } | |
3527 | #endif /* TINY_CACHE */ | |
3528 | ||
3529 | // Now in the time it took to acquire the lock, the region may have migrated | |
3530 | // from one magazine to another. I.e. trailer->mag_index is volatile. | |
3531 | // In which case the magazine lock we obtained (namely magazines[mag_index].mag_lock) | |
3532 | // is stale. If so, keep on tryin' ... | |
3533 | region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(tiny_region); | |
3534 | mag_index_t refreshed_index; | |
3535 | ||
3536 | while (mag_index != (refreshed_index = trailer->mag_index)) { // Note assignment | |
3537 | ||
3538 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3539 | ||
3540 | mag_index = refreshed_index; | |
3541 | tiny_mag_ptr = &(szone->tiny_magazines[mag_index]); | |
3542 | SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr); | |
3543 | } | |
3544 | ||
1f2f436a | 3545 | if (tiny_free_no_lock(szone, tiny_mag_ptr, mag_index, tiny_region, ptr, msize)) |
34e8f829 | 3546 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); |
1f2f436a | 3547 | |
34e8f829 A |
3548 | CHECK(szone, __PRETTY_FUNCTION__); |
3549 | } | |
3550 | ||
3551 | static void | |
3552 | print_tiny_free_list(szone_t *szone) | |
3553 | { | |
3554 | free_list_t *ptr; | |
3555 | _SIMPLE_STRING b = _simple_salloc(); | |
3556 | mag_index_t mag_index; | |
3557 | ||
3558 | if (b) { | |
3559 | _simple_sappend(b, "tiny free sizes:\n"); | |
3560 | for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) { | |
3561 | grain_t slot = 0; | |
3562 | _simple_sprintf(b,"\tMagazine %d: ", mag_index); | |
3563 | while (slot < NUM_TINY_SLOTS) { | |
3564 | ptr = szone->tiny_magazines[mag_index].mag_free_list[slot]; | |
3565 | if (ptr) { | |
3566 | _simple_sprintf(b, "%s%y[%d]; ", (slot == NUM_TINY_SLOTS-1) ? ">=" : "", | |
3567 | (slot+1)*TINY_QUANTUM, free_list_count(szone, ptr)); | |
3568 | } | |
3569 | slot++; | |
3570 | } | |
3571 | _simple_sappend(b,"\n"); | |
3572 | } | |
3573 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); | |
3574 | _simple_sfree(b); | |
3575 | } | |
3576 | } | |
3577 | ||
3578 | static void | |
1f2f436a | 3579 | print_tiny_region(boolean_t verbose, region_t region, size_t bytes_at_start, size_t bytes_at_end) |
34e8f829 A |
3580 | { |
3581 | unsigned counts[1024]; | |
3582 | unsigned in_use = 0; | |
3583 | uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(region); | |
1f2f436a | 3584 | uintptr_t current = start + bytes_at_end; |
34e8f829 A |
3585 | uintptr_t limit = (uintptr_t)TINY_REGION_END(region) - bytes_at_end; |
3586 | boolean_t is_free; | |
3587 | msize_t msize; | |
3588 | unsigned ci; | |
3589 | _SIMPLE_STRING b; | |
3590 | uintptr_t pgTot = 0; | |
3591 | ||
3592 | if (region == HASHRING_REGION_DEALLOCATED) { | |
3593 | if ((b = _simple_salloc()) != NULL) { | |
3594 | _simple_sprintf(b, "Tiny region [unknown address] was returned to the OS\n"); | |
3595 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); | |
3596 | _simple_sfree(b); | |
3597 | } | |
3598 | return; | |
3599 | } | |
3600 | ||
3601 | memset(counts, 0, sizeof(counts)); | |
3602 | while (current < limit) { | |
3603 | msize = get_tiny_meta_header((void *)current, &is_free); | |
3604 | if (is_free & !msize && (current == start)) { | |
3605 | // first block is all free | |
3606 | uintptr_t pgLo = round_page(start + sizeof(free_list_t) + sizeof(msize_t)); | |
3607 | uintptr_t pgHi = trunc_page(start + TINY_REGION_SIZE - sizeof(msize_t)); | |
3608 | ||
3609 | if (pgLo < pgHi) { | |
3610 | pgTot += (pgHi - pgLo); | |
3611 | } | |
3612 | break; | |
3613 | } | |
3614 | if (!msize) { | |
3615 | malloc_printf("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize); | |
3616 | break; | |
3617 | } | |
3618 | if (!is_free) { | |
3619 | // block in use | |
3620 | if (msize > NUM_TINY_SLOTS) | |
3621 | malloc_printf("*** error at %p msize for in_use is %d\n", (void *)current, msize); | |
3622 | if (msize < 1024) | |
3623 | counts[msize]++; | |
3624 | in_use++; | |
3625 | } else { | |
3626 | uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t)); | |
3627 | uintptr_t pgHi = trunc_page(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t)); | |
3628 | ||
3629 | if (pgLo < pgHi) { | |
3630 | pgTot += (pgHi - pgLo); | |
3631 | } | |
3632 | } | |
3633 | current += TINY_BYTES_FOR_MSIZE(msize); | |
3634 | } | |
3635 | if ((b = _simple_salloc()) != NULL) { | |
3636 | _simple_sprintf(b, "Tiny region [%p-%p, %y] \t", (void *)start, TINY_REGION_END(region), (int)TINY_REGION_SIZE); | |
3637 | _simple_sprintf(b, "Magazine=%d \t", MAGAZINE_INDEX_FOR_TINY_REGION(region)); | |
3638 | _simple_sprintf(b, "Allocations in use=%d \t Bytes in use=%ly \t", in_use, BYTES_USED_FOR_TINY_REGION(region)); | |
1f2f436a A |
3639 | if (bytes_at_end || bytes_at_start) |
3640 | _simple_sprintf(b, "Untouched=%ly ", bytes_at_end + bytes_at_start); | |
34e8f829 A |
3641 | if (DEPOT_MAGAZINE_INDEX == MAGAZINE_INDEX_FOR_TINY_REGION(region)) { |
3642 | _simple_sprintf(b, "Advised MADV_FREE=%ly", pgTot); | |
3643 | } else { | |
3644 | _simple_sprintf(b, "Fragments subject to reclamation=%ly", pgTot); | |
3645 | } | |
3646 | if (verbose && in_use) { | |
3647 | _simple_sappend(b, "\n\tSizes in use: "); | |
3648 | for (ci = 0; ci < 1024; ci++) | |
3649 | if (counts[ci]) | |
3650 | _simple_sprintf(b, "%d[%d] ", TINY_BYTES_FOR_MSIZE(ci), counts[ci]); | |
3651 | } | |
3652 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); | |
3653 | _simple_sfree(b); | |
3654 | } | |
3655 | } | |
3656 | ||
3657 | static boolean_t | |
3658 | tiny_free_list_check(szone_t *szone, grain_t slot) | |
3659 | { | |
3660 | mag_index_t mag_index; | |
3661 | ||
3662 | for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) { | |
3663 | magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]); | |
3664 | SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr); | |
3665 | ||
3666 | unsigned count = 0; | |
3667 | free_list_t *ptr = szone->tiny_magazines[mag_index].mag_free_list[slot]; | |
3668 | boolean_t is_free; | |
3669 | free_list_t *previous = NULL; | |
3670 | ||
3671 | while (ptr) { | |
3672 | is_free = tiny_meta_header_is_free(ptr); | |
3673 | if (! is_free) { | |
3674 | malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
3675 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3676 | return 0; | |
3677 | } | |
3678 | if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) { | |
3679 | malloc_printf("*** unaligned ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
3680 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3681 | return 0; | |
3682 | } | |
3683 | if (!tiny_region_for_ptr_no_lock(szone, ptr)) { | |
3684 | malloc_printf("*** ptr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
3685 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3686 | return 0; | |
3687 | } | |
3688 | if (free_list_unchecksum_ptr(szone, &ptr->previous) != previous) { | |
3689 | malloc_printf("*** previous incorrectly set slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
3690 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3691 | return 0; | |
3692 | } | |
3693 | previous = ptr; | |
3694 | ptr = free_list_unchecksum_ptr(szone, &ptr->next); | |
3695 | count++; | |
3696 | } | |
3697 | ||
3698 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
3699 | } | |
3700 | return 1; | |
3701 | } | |
3702 | ||
3703 | /********************* SMALL FREE LIST UTILITIES ************************/ | |
3704 | ||
3705 | /* | |
3706 | * Mark a block as free. Only the first quantum of a block is marked thusly, | |
3707 | * the remainder are marked "middle". | |
3708 | */ | |
3709 | static INLINE void | |
3710 | small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize) | |
3711 | { | |
3712 | meta_headers[index] = msize | SMALL_IS_FREE; | |
3713 | } | |
3714 | ||
3715 | /* | |
3716 | * Mark a block as in use. Only the first quantum of a block is marked thusly, | |
3717 | * the remainder are marked "middle". | |
3718 | */ | |
3719 | static INLINE void | |
3720 | small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize) | |
3721 | { | |
3722 | meta_headers[index] = msize; | |
3723 | } | |
3724 | ||
3725 | /* | |
3726 | * Mark a quantum as being the second or later in a block. | |
3727 | */ | |
3728 | static INLINE void | |
3729 | small_meta_header_set_middle(msize_t *meta_headers, msize_t index) | |
3730 | { | |
3731 | meta_headers[index] = 0; | |
3732 | } | |
3733 | ||
3734 | /* | |
3735 | * Adds an item to the proper free list, and also marks the meta-header of the | |
3736 | * block properly. | |
3737 | * Assumes szone has been locked | |
3738 | */ | |
3739 | static void | |
3740 | small_free_list_add_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize) | |
3741 | { | |
3742 | grain_t slot = (msize <= szone->num_small_slots) ? msize - 1 : szone->num_small_slots - 1; | |
3743 | free_list_t *free_ptr = ptr; | |
3744 | free_list_t *free_head = small_mag_ptr->mag_free_list[slot]; | |
3745 | void *follower; | |
3746 | ||
3747 | #if DEBUG_MALLOC | |
3748 | if (LOG(szone,ptr)) { | |
3749 | malloc_printf("in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); | |
3750 | } | |
3751 | if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) { | |
3752 | szone_error(szone, 1, "small_free_list_add_ptr: Unaligned ptr", ptr, NULL); | |
3753 | } | |
3754 | #endif | |
3755 | small_meta_header_set_is_free(SMALL_META_HEADER_FOR_PTR(ptr), SMALL_META_INDEX_FOR_PTR(ptr), msize); | |
3756 | ||
3757 | if (free_head) { | |
3758 | #if DEBUG_MALLOC | |
3759 | if (free_list_unchecksum_ptr(szone, &free_head->previous)) { | |
3760 | szone_error(szone, 1, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr, | |
3761 | "ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, (void *)free_head, free_head->previous.p); | |
3762 | } | |
3763 | if (!SMALL_PTR_IS_FREE(free_head)) { | |
3764 | szone_error(szone, 1, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr, | |
3765 | "ptr=%p slot=%d free_head=%p\n", ptr, slot, (void *)free_head); | |
3766 | } | |
3767 | #endif | |
3768 | free_head->previous.u = free_list_checksum_ptr(szone, free_ptr); | |
3769 | } else { | |
3770 | BITMAPN_SET(small_mag_ptr->mag_bitmap, slot); | |
3771 | } | |
3772 | free_ptr->previous.u = free_list_checksum_ptr(szone, NULL); | |
3773 | free_ptr->next.u = free_list_checksum_ptr(szone, free_head); | |
3774 | ||
3775 | small_mag_ptr->mag_free_list[slot] = free_ptr; | |
3776 | ||
3777 | // Store msize at the end of the block denoted by "ptr" (i.e. at a negative offset from "follower") | |
3778 | follower = (void *)((uintptr_t)ptr + SMALL_BYTES_FOR_MSIZE(msize)); | |
3779 | SMALL_PREVIOUS_MSIZE(follower) = msize; | |
3780 | } | |
3781 | ||
3782 | /* | |
3783 | * Removes the item pointed to by ptr in the proper free list. | |
3784 | * Assumes szone has been locked | |
3785 | */ | |
3786 | static void | |
3787 | small_free_list_remove_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize) | |
3788 | { | |
3789 | grain_t slot = (msize <= szone->num_small_slots) ? msize - 1 : szone->num_small_slots - 1; | |
3790 | free_list_t *free_ptr = ptr, *next, *previous; | |
3791 | ||
3792 | next = free_list_unchecksum_ptr(szone, &free_ptr->next); | |
3793 | previous = free_list_unchecksum_ptr(szone, &free_ptr->previous); | |
3794 | ||
3795 | #if DEBUG_MALLOC | |
3796 | if (LOG(szone,ptr)) { | |
3797 | malloc_printf("In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize); | |
3798 | } | |
3799 | #endif | |
3800 | ||
3801 | if (!previous) { | |
3802 | // The block to remove is the head of the free list | |
3803 | #if DEBUG_MALLOC | |
3804 | if (small_mag_ptr->mag_free_list[slot] != ptr) { | |
3805 | szone_error(szone, 1, "small_free_list_remove_ptr: Internal invariant broken (small_mag_ptr->mag_free_list[slot])", ptr, | |
3806 | "ptr=%p slot=%d msize=%d small_mag_ptr->mag_free_list[slot]=%p\n", | |
3807 | ptr, slot, msize, (void *)small_mag_ptr->mag_free_list[slot]); | |
3808 | return; | |
3809 | } | |
3810 | #endif | |
3811 | small_mag_ptr->mag_free_list[slot] = next; | |
3812 | if (!next) BITMAPN_CLR(small_mag_ptr->mag_bitmap, slot); | |
3813 | } else { | |
3814 | // We know free_ptr is already checksummed, so we don't need to do it | |
3815 | // again. | |
3816 | previous->next = free_ptr->next; | |
3817 | } | |
3818 | if (next) { | |
3819 | // We know free_ptr is already checksummed, so we don't need to do it | |
3820 | // again. | |
3821 | next->previous = free_ptr->previous; | |
3822 | } | |
3823 | } | |
3824 | ||
3825 | /* | |
3826 | * small_region_for_ptr_no_lock - Returns the small region containing the pointer, | |
3827 | * or NULL if not found. | |
3828 | */ | |
3829 | static INLINE region_t | |
3830 | small_region_for_ptr_no_lock(szone_t *szone, const void *ptr) | |
3831 | { | |
3832 | rgnhdl_t r = hash_lookup_region_no_lock(szone->small_region_generation->hashed_regions, | |
3833 | szone->small_region_generation->num_regions_allocated, | |
3834 | szone->small_region_generation->num_regions_allocated_shift, | |
3835 | SMALL_REGION_FOR_PTR(ptr)); | |
3836 | return r ? *r : r; | |
3837 | } | |
3838 | ||
3839 | static void | |
3840 | small_finalize_region(szone_t *szone, magazine_t *small_mag_ptr) { | |
3841 | void *last_block, *previous_block; | |
3842 | msize_t last_msize, previous_msize, last_index; | |
3843 | ||
34e8f829 A |
3844 | // It is possible that the block prior to the last block in the region has |
3845 | // been free'd, but was not coalesced with the free bytes at the end of the | |
3846 | // block, since we treat the bytes at the end of the region as "in use" in | |
3847 | // the meta headers. Attempt to coalesce the last block with the previous | |
3848 | // block, so we don't violate the "no consecutive free blocks" invariant. | |
3849 | // | |
3850 | // FIXME: If we could calculate the previous small free size in the same | |
3851 | // manner as tiny_previous_preceding_free, it would eliminate the | |
3852 | // index & previous msize checks, which are a guard against reading | |
3853 | // bogus data out of in-use or written-on-freed memory. | |
3854 | // | |
3855 | // FIXME: Need to investigate how much work would be required to increase | |
3856 | // 'mag_bytes_free_at_end' when freeing the preceding block, rather | |
3857 | // than performing this workaround. | |
3858 | // | |
1f2f436a A |
3859 | if (small_mag_ptr->mag_bytes_free_at_end) { |
3860 | last_block = SMALL_REGION_END(small_mag_ptr->mag_last_region) - small_mag_ptr->mag_bytes_free_at_end; | |
3861 | last_msize = SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end); | |
3862 | ||
34e8f829 A |
3863 | last_index = SMALL_META_INDEX_FOR_PTR(last_block); |
3864 | previous_msize = SMALL_PREVIOUS_MSIZE(last_block); | |
3865 | ||
3866 | if (last_index && (previous_msize <= last_index)) { | |
3867 | previous_block = (void *)((uintptr_t)last_block - SMALL_BYTES_FOR_MSIZE(previous_msize)); | |
3868 | if (*SMALL_METADATA_FOR_PTR(previous_block) == (previous_msize | SMALL_IS_FREE)) { | |
3869 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(last_block); | |
3870 | ||
3871 | small_meta_header_set_middle(meta_headers, last_index); | |
3872 | small_free_list_remove_ptr(szone, small_mag_ptr, previous_block, previous_msize); | |
3873 | last_block = (void *)((uintptr_t)last_block - SMALL_BYTES_FOR_MSIZE(previous_msize)); | |
3874 | last_msize += previous_msize; | |
3875 | } | |
3876 | } | |
3877 | ||
3878 | // splice last_block into the free list | |
3879 | small_free_list_add_ptr(szone, small_mag_ptr, last_block, last_msize); | |
3880 | small_mag_ptr->mag_bytes_free_at_end = 0; | |
1f2f436a A |
3881 | } |
3882 | ||
3883 | #if ASLR_INTERNAL | |
3884 | if (small_mag_ptr->mag_bytes_free_at_start) { | |
3885 | last_block = SMALL_REGION_ADDRESS(small_mag_ptr->mag_last_region); | |
3886 | last_msize = SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_start); | |
3887 | ||
3888 | void *next_block = (void *) ((uintptr_t)last_block + small_mag_ptr->mag_bytes_free_at_start); | |
3889 | if (SMALL_PTR_IS_FREE(next_block)) { | |
3890 | msize_t next_msize = SMALL_PTR_SIZE(next_block); | |
3891 | ||
3892 | small_meta_header_set_middle(SMALL_META_HEADER_FOR_PTR(next_block), SMALL_META_INDEX_FOR_PTR(next_block)); | |
3893 | small_free_list_remove_ptr(szone, small_mag_ptr, next_block, next_msize); | |
3894 | last_msize += next_msize; | |
3895 | } | |
3896 | ||
3897 | // splice last_block into the free list | |
3898 | small_free_list_add_ptr(szone, small_mag_ptr, last_block, last_msize); | |
3899 | small_mag_ptr->mag_bytes_free_at_start = 0; | |
3900 | } | |
3901 | #endif | |
3902 | ||
3903 | // TODO: Will we ever need to coalesce the blocks at the beginning and end when we finalize? | |
3904 | ||
34e8f829 A |
3905 | small_mag_ptr->mag_last_region = NULL; |
3906 | } | |
3907 | ||
3908 | static int | |
3909 | small_free_detach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r) { | |
3910 | unsigned char *ptr = SMALL_REGION_ADDRESS(r); | |
3911 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
3912 | uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r); | |
3913 | uintptr_t current = start; | |
3914 | uintptr_t limit = (uintptr_t)SMALL_REGION_END(r); | |
3915 | int total_alloc = 0; | |
3916 | ||
3917 | while (current < limit) { | |
3918 | unsigned index = SMALL_META_INDEX_FOR_PTR(current); | |
3919 | msize_t msize_and_free = meta_headers[index]; | |
3920 | boolean_t is_free = msize_and_free & SMALL_IS_FREE; | |
3921 | msize_t msize = msize_and_free & ~ SMALL_IS_FREE; | |
3922 | ||
3923 | if (!msize) { | |
3924 | #if DEBUG_MALLOC | |
3925 | malloc_printf("*** small_free_detach_region error with %p: msize=%d is_free =%d\n", | |
3926 | (void *)current, msize, is_free); | |
3927 | #endif | |
3928 | break; | |
3929 | } | |
3930 | if (is_free) { | |
3931 | small_free_list_remove_ptr(szone, small_mag_ptr, (void *)current, msize); | |
3932 | } else { | |
3933 | total_alloc++; | |
3934 | } | |
3935 | current += SMALL_BYTES_FOR_MSIZE(msize); | |
3936 | } | |
3937 | return total_alloc; | |
3938 | } | |
3939 | ||
3940 | static size_t | |
3941 | small_free_reattach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r) { | |
3942 | unsigned char *ptr = SMALL_REGION_ADDRESS(r); | |
3943 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
3944 | uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r); | |
3945 | uintptr_t current = start; | |
3946 | uintptr_t limit = (uintptr_t)SMALL_REGION_END(r); | |
3947 | size_t total_alloc = 0; | |
3948 | ||
3949 | while (current < limit) { | |
3950 | unsigned index = SMALL_META_INDEX_FOR_PTR(current); | |
3951 | msize_t msize_and_free = meta_headers[index]; | |
3952 | boolean_t is_free = msize_and_free & SMALL_IS_FREE; | |
3953 | msize_t msize = msize_and_free & ~ SMALL_IS_FREE; | |
3954 | ||
3955 | if (!msize) { | |
3956 | #if DEBUG_MALLOC | |
3957 | malloc_printf("*** small_free_reattach_region error with %p: msize=%d is_free =%d\n", | |
3958 | (void *)current, msize, is_free); | |
3959 | #endif | |
3960 | break; | |
3961 | } | |
3962 | if (is_free) { | |
3963 | small_free_list_add_ptr(szone, small_mag_ptr, (void *)current, msize); | |
3964 | } else { | |
3965 | total_alloc += SMALL_BYTES_FOR_MSIZE(msize); | |
3966 | } | |
3967 | current += SMALL_BYTES_FOR_MSIZE(msize); | |
3968 | } | |
3969 | return total_alloc; | |
3970 | } | |
3971 | ||
1f2f436a A |
3972 | typedef struct { |
3973 | uint16_t pnum, size; | |
3974 | } small_pg_pair_t; | |
3975 | ||
3976 | static void NOINLINE /* want private stack frame for automatic array */ | |
3977 | small_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r) { | |
34e8f829 A |
3978 | uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r); |
3979 | uintptr_t current = start; | |
3980 | uintptr_t limit = (uintptr_t)SMALL_REGION_END(r); | |
3981 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(start); | |
1f2f436a A |
3982 | small_pg_pair_t advisory[((SMALL_REGION_PAYLOAD_BYTES + vm_page_size - 1) >> vm_page_shift) >> 1]; // 4096bytes stack allocated |
3983 | int advisories = 0; | |
34e8f829 A |
3984 | |
3985 | // Scan the metadata identifying blocks which span one or more pages. Mark the pages MADV_FREE taking care to preserve free list | |
3986 | // management data. | |
3987 | while (current < limit) { | |
3988 | unsigned index = SMALL_META_INDEX_FOR_PTR(current); | |
3989 | msize_t msize_and_free = meta_headers[index]; | |
3990 | boolean_t is_free = msize_and_free & SMALL_IS_FREE; | |
3991 | msize_t msize = msize_and_free & ~ SMALL_IS_FREE; | |
3992 | ||
3993 | if (is_free && !msize && (current == start)) { | |
3994 | #if DEBUG_MALLOC | |
3995 | // first block is all free | |
1f2f436a | 3996 | malloc_printf("*** small_free_scan_madvise_free first block is all free! %p: msize=%d is_free =%d\n", |
34e8f829 A |
3997 | (void *)current, msize, is_free); |
3998 | #endif | |
3999 | uintptr_t pgLo = round_page(start + sizeof(free_list_t) + sizeof(msize_t)); | |
4000 | uintptr_t pgHi = trunc_page(start + SMALL_REGION_SIZE - sizeof(msize_t)); | |
4001 | ||
4002 | if (pgLo < pgHi) { | |
1f2f436a A |
4003 | advisory[advisories].pnum = (pgLo - start) >> vm_page_shift; |
4004 | advisory[advisories].size = (pgHi - pgLo) >> vm_page_shift; | |
4005 | advisories++; | |
34e8f829 A |
4006 | } |
4007 | break; | |
4008 | } | |
4009 | if (!msize) { | |
4010 | #if DEBUG_MALLOC | |
1f2f436a | 4011 | malloc_printf("*** small_free_scan_madvise_free error with %p: msize=%d is_free =%d\n", |
34e8f829 A |
4012 | (void *)current, msize, is_free); |
4013 | #endif | |
4014 | break; | |
4015 | } | |
4016 | if (is_free) { | |
4017 | uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t)); | |
4018 | uintptr_t pgHi = trunc_page(current + SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t)); | |
4019 | ||
4020 | if (pgLo < pgHi) { | |
1f2f436a A |
4021 | advisory[advisories].pnum = (pgLo - start) >> vm_page_shift; |
4022 | advisory[advisories].size = (pgHi - pgLo) >> vm_page_shift; | |
4023 | advisories++; | |
34e8f829 A |
4024 | } |
4025 | } | |
4026 | current += SMALL_BYTES_FOR_MSIZE(msize); | |
4027 | } | |
4028 | ||
1f2f436a A |
4029 | if (advisories > 0) { |
4030 | int i; | |
4031 | ||
4032 | OSAtomicIncrement32Barrier(&(REGION_TRAILER_FOR_SMALL_REGION(r)->pinned_to_depot)); | |
4033 | SZONE_MAGAZINE_PTR_UNLOCK(szone, depot_ptr); | |
4034 | for (i = 0; i < advisories; ++i) { | |
4035 | uintptr_t addr = (advisory[i].pnum << vm_page_shift) + start; | |
4036 | size_t size = advisory[i].size << vm_page_shift; | |
4037 | ||
4038 | #if TARGET_OS_EMBEDDED | |
4039 | madvise_free_range(szone, r, addr, addr + size, NULL); | |
4040 | #else | |
4041 | madvise_free_range(szone, r, addr, addr + size); | |
4042 | #endif | |
4043 | } | |
4044 | SZONE_MAGAZINE_PTR_LOCK(szone, depot_ptr); | |
4045 | OSAtomicDecrement32Barrier(&(REGION_TRAILER_FOR_SMALL_REGION(r)->pinned_to_depot)); | |
34e8f829 A |
4046 | } |
4047 | } | |
4048 | ||
1f2f436a | 4049 | static region_t |
34e8f829 A |
4050 | small_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node) |
4051 | { | |
34e8f829 | 4052 | if (0 < node->bytes_used || |
1f2f436a | 4053 | 0 < node->pinned_to_depot || |
34e8f829 | 4054 | depot_ptr->recirculation_entries < (szone->num_small_magazines * 2)) { |
1f2f436a | 4055 | return NULL; |
34e8f829 A |
4056 | } |
4057 | ||
1f2f436a | 4058 | // disconnect first node from Depot |
34e8f829 A |
4059 | recirc_list_extract(szone, depot_ptr, node); |
4060 | ||
4061 | // Iterate the region pulling its free entries off the (locked) Depot's free list | |
4062 | region_t sparse_region = SMALL_REGION_FOR_PTR(node); | |
4063 | int objects_in_use = small_free_detach_region(szone, depot_ptr, sparse_region); | |
4064 | ||
4065 | if (0 == objects_in_use) { | |
4066 | // Invalidate the hash table entry for this region with HASHRING_REGION_DEALLOCATED. | |
4067 | // Using HASHRING_REGION_DEALLOCATED preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not. | |
4068 | rgnhdl_t pSlot = hash_lookup_region_no_lock(szone->small_region_generation->hashed_regions, | |
4069 | szone->small_region_generation->num_regions_allocated, | |
4070 | szone->small_region_generation->num_regions_allocated_shift, sparse_region); | |
1f2f436a A |
4071 | if (NULL == pSlot) { |
4072 | szone_error(szone, 1, "small_free_try_depot_unmap_no_lock hash lookup failed:", NULL, "%p\n", sparse_region); | |
4073 | return NULL; | |
4074 | } | |
34e8f829 A |
4075 | *pSlot = HASHRING_REGION_DEALLOCATED; |
4076 | depot_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES; | |
34e8f829 | 4077 | __sync_fetch_and_add( &(szone->num_small_regions_dealloc), 1); // Atomically increment num_small_regions_dealloc |
34e8f829 | 4078 | |
1f2f436a A |
4079 | // Caller will transfer ownership of the region back to the OS with no locks held |
4080 | MAGMALLOC_DEALLOCREGION((void *)szone, (void *)sparse_region, SMALL_REGION_SIZE); // DTrace USDT Probe | |
4081 | return sparse_region; | |
34e8f829 A |
4082 | |
4083 | } else { | |
4084 | szone_error(szone, 1, "small_free_try_depot_unmap_no_lock objects_in_use not zero:", NULL, "%d\n", objects_in_use); | |
1f2f436a | 4085 | return NULL; |
34e8f829 A |
4086 | } |
4087 | } | |
4088 | ||
1f2f436a | 4089 | static boolean_t |
34e8f829 A |
4090 | small_free_do_recirc_to_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index) |
4091 | { | |
4092 | // The entire magazine crossed the "emptiness threshold". Transfer a region | |
4093 | // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e | |
4094 | // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list. | |
4095 | region_trailer_t *node = small_mag_ptr->firstNode; | |
4096 | ||
4097 | while (node && !node->recirc_suitable) { | |
4098 | node = node->next; | |
4099 | } | |
4100 | ||
4101 | if (NULL == node) { | |
4102 | #if DEBUG_MALLOC | |
4103 | malloc_printf("*** small_free_do_recirc_to_depot end of list\n"); | |
4104 | #endif | |
1f2f436a | 4105 | return TRUE; // Caller must SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); |
34e8f829 A |
4106 | } |
4107 | ||
4108 | region_t sparse_region = SMALL_REGION_FOR_PTR(node); | |
4109 | ||
1f2f436a A |
4110 | // Deal with unclaimed memory -- mag_bytes_free_at_end or mag_bytes_free_at start |
4111 | if (sparse_region == small_mag_ptr->mag_last_region && (small_mag_ptr->mag_bytes_free_at_end || small_mag_ptr->mag_bytes_free_at_start)) { | |
34e8f829 A |
4112 | small_finalize_region(szone, small_mag_ptr); |
4113 | } | |
4114 | ||
1f2f436a | 4115 | // disconnect "suitable" node from magazine |
34e8f829 A |
4116 | recirc_list_extract(szone, small_mag_ptr, node); |
4117 | ||
4118 | // Iterate the region pulling its free entries off its (locked) magazine's free list | |
4119 | int objects_in_use = small_free_detach_region(szone, small_mag_ptr, sparse_region); | |
4120 | magazine_t *depot_ptr = &(szone->small_magazines[DEPOT_MAGAZINE_INDEX]); | |
4121 | ||
4122 | // hand over the region to the (locked) Depot | |
4123 | SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr); | |
4124 | // this will cause small_free_list_add_ptr called by small_free_reattach_region to use | |
4125 | // the depot as its target magazine, rather than magazine formerly associated with sparse_region | |
4126 | MAGAZINE_INDEX_FOR_SMALL_REGION(sparse_region) = DEPOT_MAGAZINE_INDEX; | |
1f2f436a | 4127 | node->pinned_to_depot = 0; |
34e8f829 A |
4128 | |
4129 | // Iterate the region putting its free entries on Depot's free list | |
4130 | size_t bytes_inplay = small_free_reattach_region(szone, depot_ptr, sparse_region); | |
4131 | ||
4132 | small_mag_ptr->mag_num_bytes_in_objects -= bytes_inplay; | |
4133 | small_mag_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES; | |
4134 | small_mag_ptr->mag_num_objects -= objects_in_use; | |
4135 | ||
1f2f436a A |
4136 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); // Unlock the originating magazine |
4137 | ||
34e8f829 A |
4138 | depot_ptr->mag_num_bytes_in_objects += bytes_inplay; |
4139 | depot_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES; | |
4140 | depot_ptr->mag_num_objects += objects_in_use; | |
4141 | ||
1f2f436a A |
4142 | // connect to Depot as last node |
4143 | recirc_list_splice_last(szone, depot_ptr, node); | |
34e8f829 | 4144 | |
1f2f436a A |
4145 | MAGMALLOC_RECIRCREGION((void *)szone, (int)mag_index, (void *)sparse_region, SMALL_REGION_SIZE, |
4146 | (int)BYTES_USED_FOR_SMALL_REGION(sparse_region)); // DTrace USDT Probe | |
34e8f829 A |
4147 | |
4148 | // Mark free'd dirty pages with MADV_FREE to reduce memory pressure | |
1f2f436a | 4149 | small_free_scan_madvise_free(szone, depot_ptr, sparse_region); |
34e8f829 | 4150 | |
1f2f436a A |
4151 | // If the region is entirely empty vm_deallocate() it outside the depot lock |
4152 | region_t r_dealloc = small_free_try_depot_unmap_no_lock(szone, depot_ptr, node); | |
34e8f829 | 4153 | SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr); |
1f2f436a A |
4154 | if (r_dealloc) |
4155 | deallocate_pages(szone, r_dealloc, SMALL_REGION_SIZE, 0); | |
4156 | return FALSE; // Caller need not unlock the originating magazine | |
34e8f829 A |
4157 | } |
4158 | ||
7ba935f9 A |
4159 | static region_t |
4160 | small_find_msize_region(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize) | |
4161 | { | |
4162 | free_list_t *ptr; | |
4163 | grain_t slot = (msize <= szone->num_small_slots) ? msize - 1 : szone->num_small_slots - 1; | |
4164 | free_list_t **free_list = small_mag_ptr->mag_free_list; | |
4165 | free_list_t **the_slot = free_list + slot; | |
4166 | free_list_t **limit; | |
4167 | unsigned bitmap; | |
4168 | ||
4169 | // Assumes we've locked the magazine | |
4170 | CHECK_MAGAZINE_PTR_LOCKED(szone, small_mag_ptr, __PRETTY_FUNCTION__); | |
4171 | ||
4172 | // Look for an exact match by checking the freelist for this msize. | |
4173 | ptr = *the_slot; | |
4174 | if (ptr) | |
4175 | return SMALL_REGION_FOR_PTR(ptr); | |
4176 | ||
4177 | // Mask off the bits representing slots holding free blocks smaller than | |
4178 | // the size we need. | |
4179 | if (szone->is_largemem) { | |
4180 | // BITMAPN_CTZ implementation | |
4181 | unsigned idx = slot >> 5; | |
4182 | bitmap = 0; | |
4183 | unsigned mask = ~ ((1 << (slot & 31)) - 1); | |
4184 | for ( ; idx < SMALL_BITMAP_WORDS; ++idx ) { | |
4185 | bitmap = small_mag_ptr->mag_bitmap[idx] & mask; | |
4186 | if (bitmap != 0) | |
4187 | break; | |
4188 | mask = ~0U; | |
4189 | } | |
4190 | // Check for fallthrough: No bits set in bitmap | |
4191 | if ((bitmap == 0) && (idx == SMALL_BITMAP_WORDS)) | |
4192 | return NULL; | |
4193 | ||
4194 | // Start looking at the first set bit, plus 32 bits for every word of | |
4195 | // zeroes or entries that were too small. | |
4196 | slot = BITMAP32_CTZ((&bitmap)) + (idx * 32); | |
4197 | } else { | |
4198 | bitmap = small_mag_ptr->mag_bitmap[0] & ~ ((1 << slot) - 1); | |
4199 | if (!bitmap) | |
4200 | return NULL; | |
4201 | ||
4202 | slot = BITMAP32_CTZ((&bitmap)); | |
4203 | } | |
4204 | limit = free_list + szone->num_small_slots - 1; | |
4205 | free_list += slot; | |
4206 | ||
4207 | if (free_list < limit) { | |
4208 | ptr = *free_list; | |
4209 | if (ptr) | |
4210 | return SMALL_REGION_FOR_PTR(ptr); | |
4211 | else { | |
4212 | /* Shouldn't happen. Fall through to look at last slot. */ | |
4213 | #if DEBUG_MALLOC | |
4214 | malloc_printf("in small_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n",slot); | |
4215 | #endif | |
4216 | } | |
4217 | } | |
4218 | ||
4219 | // We are now looking at the last slot, which contains blocks equal to, or | |
4220 | // due to coalescing of free blocks, larger than (num_small_slots - 1) * (small quantum size). | |
4221 | ptr = *limit; | |
4222 | if (ptr) | |
4223 | return SMALL_REGION_FOR_PTR(ptr); | |
4224 | ||
4225 | return NULL; | |
4226 | } | |
4227 | ||
34e8f829 | 4228 | static boolean_t |
7ba935f9 | 4229 | small_get_region_from_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize) |
34e8f829 A |
4230 | { |
4231 | magazine_t *depot_ptr = &(szone->small_magazines[DEPOT_MAGAZINE_INDEX]); | |
4232 | ||
4233 | /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ | |
4234 | if (szone->num_small_magazines == 1) // Uniprocessor, single magazine, so no recirculation necessary | |
4235 | return 0; | |
4236 | ||
4237 | #if DEBUG_MALLOC | |
4238 | if (DEPOT_MAGAZINE_INDEX == mag_index) { | |
4239 | szone_error(szone, 1, "small_get_region_from_depot called for magazine index -1", NULL, NULL); | |
4240 | return 0; | |
4241 | } | |
4242 | #endif | |
4243 | ||
4244 | SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr); | |
4245 | ||
1f2f436a A |
4246 | // Appropriate a Depot'd region that can satisfy requested msize. |
4247 | region_trailer_t *node; | |
4248 | region_t sparse_region; | |
4249 | ||
4250 | while (1) { | |
4251 | sparse_region = small_find_msize_region(szone, depot_ptr, DEPOT_MAGAZINE_INDEX, msize); | |
4252 | if (NULL == sparse_region) { // Depot empty? | |
4253 | SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr); | |
4254 | return 0; | |
4255 | } | |
4256 | ||
4257 | node = REGION_TRAILER_FOR_SMALL_REGION(sparse_region); | |
4258 | if (0 >= node->pinned_to_depot) | |
4259 | break; | |
4260 | ||
7ba935f9 | 4261 | SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr); |
1f2f436a A |
4262 | pthread_yield_np(); |
4263 | SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr); | |
34e8f829 | 4264 | } |
7ba935f9 | 4265 | |
7ba935f9 | 4266 | // disconnect node from Depot |
34e8f829 A |
4267 | recirc_list_extract(szone, depot_ptr, node); |
4268 | ||
4269 | // Iterate the region pulling its free entries off the (locked) Depot's free list | |
34e8f829 A |
4270 | int objects_in_use = small_free_detach_region(szone, depot_ptr, sparse_region); |
4271 | ||
4272 | // Transfer ownership of the region | |
4273 | MAGAZINE_INDEX_FOR_SMALL_REGION(sparse_region) = mag_index; | |
1f2f436a | 4274 | node->pinned_to_depot = 0; |
34e8f829 A |
4275 | |
4276 | // Iterate the region putting its free entries on its new (locked) magazine's free list | |
4277 | size_t bytes_inplay = small_free_reattach_region(szone, small_mag_ptr, sparse_region); | |
4278 | ||
4279 | depot_ptr->mag_num_bytes_in_objects -= bytes_inplay; | |
4280 | depot_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES; | |
4281 | depot_ptr->mag_num_objects -= objects_in_use; | |
4282 | ||
4283 | small_mag_ptr->mag_num_bytes_in_objects += bytes_inplay; | |
4284 | small_mag_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES; | |
4285 | small_mag_ptr->mag_num_objects += objects_in_use; | |
4286 | ||
1f2f436a | 4287 | // connect to magazine as first node |
34e8f829 A |
4288 | recirc_list_splice_first(szone, small_mag_ptr, node); |
4289 | ||
4290 | SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr); | |
4291 | ||
1f2f436a A |
4292 | // madvise() outside the Depot lock |
4293 | #if TARGET_OS_EMBEDDED | |
4294 | if (node->failedREUSE) { | |
4295 | #else | |
7ba935f9 A |
4296 | if (node->failedREUSE || |
4297 | -1 == madvise((void *)sparse_region, SMALL_REGION_PAYLOAD_BYTES, MADV_FREE_REUSE)) { | |
1f2f436a | 4298 | #endif |
34e8f829 | 4299 | /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */ |
7ba935f9 | 4300 | #if DEBUG_MADVISE |
1f2f436a A |
4301 | szone_error(szone, 0, "small_get_region_from_depot madvise(..., MADV_FREE_REUSE) failed", |
4302 | sparse_region, "length=%d\n", SMALL_REGION_PAYLOAD_BYTES); | |
34e8f829 | 4303 | #endif |
7ba935f9 | 4304 | node->failedREUSE = TRUE; |
34e8f829 A |
4305 | } |
4306 | ||
1f2f436a A |
4307 | MAGMALLOC_DEPOTREGION((void *)szone, (int)mag_index, (void *)sparse_region, SMALL_REGION_SIZE, |
4308 | (int)BYTES_USED_FOR_SMALL_REGION(sparse_region)); // DTrace USDT Probe | |
7ba935f9 | 4309 | |
34e8f829 A |
4310 | return 1; |
4311 | } | |
4312 | ||
34e8f829 A |
4313 | #define K 1.5 // headroom measured in number of 8Mb regions |
4314 | #define DENSITY_THRESHOLD(a) \ | |
4315 | ((a) - ((a) >> 2)) // "Emptiness" f = 0.25, so "Density" is (1 - f)*a. Generally: ((a) - ((a) >> -log2(f))) | |
4316 | ||
1f2f436a | 4317 | static INLINE boolean_t |
34e8f829 A |
4318 | small_free_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, region_t region, void *ptr, msize_t msize) |
4319 | { | |
4320 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
4321 | unsigned index = SMALL_META_INDEX_FOR_PTR(ptr); | |
4322 | void *original_ptr = ptr; | |
4323 | size_t original_size = SMALL_BYTES_FOR_MSIZE(msize); | |
4324 | unsigned char *next_block = ((unsigned char *)ptr + original_size); | |
4325 | msize_t next_index = index + msize; | |
4326 | msize_t previous_msize, next_msize; | |
4327 | void *previous; | |
34e8f829 A |
4328 | |
4329 | #if DEBUG_MALLOC | |
4330 | if (LOG(szone,ptr)) { | |
4331 | malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); | |
4332 | } | |
1f2f436a | 4333 | if (!msize) { |
34e8f829 A |
4334 | szone_error(szone, 1, "trying to free small block that is too small", ptr, |
4335 | "in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize); | |
4336 | } | |
4337 | #endif | |
4338 | ||
4339 | // We try to coalesce this block with the preceeding one | |
4340 | if (index && (SMALL_PREVIOUS_MSIZE(ptr) <= index)) { | |
4341 | previous_msize = SMALL_PREVIOUS_MSIZE(ptr); | |
4342 | if (meta_headers[index - previous_msize] == (previous_msize | SMALL_IS_FREE)) { | |
4343 | previous = (void *)((uintptr_t)ptr - SMALL_BYTES_FOR_MSIZE(previous_msize)); | |
4344 | // previous is really to be coalesced | |
34e8f829 A |
4345 | #if DEBUG_MALLOC |
4346 | if (LOG(szone, ptr) || LOG(szone,previous)) { | |
4347 | malloc_printf("in small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous); | |
4348 | } | |
4349 | #endif | |
4350 | small_free_list_remove_ptr(szone, small_mag_ptr, previous, previous_msize); | |
4351 | small_meta_header_set_middle(meta_headers, index); | |
4352 | ptr = previous; | |
4353 | msize += previous_msize; | |
4354 | index -= previous_msize; | |
4355 | } | |
4356 | } | |
4357 | // We try to coalesce with the next block | |
4358 | if ((next_block < SMALL_REGION_END(region)) && (meta_headers[next_index] & SMALL_IS_FREE)) { | |
4359 | // next block is free, we coalesce | |
34e8f829 A |
4360 | next_msize = meta_headers[next_index] & ~ SMALL_IS_FREE; |
4361 | #if DEBUG_MALLOC | |
4362 | if (LOG(szone,ptr)) | |
4363 | malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n", | |
4364 | ptr, msize, next_block, next_msize); | |
4365 | #endif | |
4366 | small_free_list_remove_ptr(szone, small_mag_ptr, next_block, next_msize); | |
4367 | small_meta_header_set_middle(meta_headers, next_index); | |
4368 | msize += next_msize; | |
4369 | } | |
4370 | if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) { | |
4371 | if (!msize) { | |
4372 | szone_error(szone, 1, "incorrect size information - block header was damaged", ptr, NULL); | |
4373 | } else { | |
4374 | memset(ptr, 0x55, SMALL_BYTES_FOR_MSIZE(msize)); | |
4375 | } | |
4376 | } | |
4377 | small_free_list_add_ptr(szone, small_mag_ptr, ptr, msize); | |
4378 | small_mag_ptr->mag_num_objects--; | |
4379 | // we use original_size and not msize to avoid double counting the coalesced blocks | |
4380 | small_mag_ptr->mag_num_bytes_in_objects -= original_size; | |
4381 | ||
4382 | // Update this region's bytes in use count | |
4383 | region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(region); | |
4384 | size_t bytes_used = node->bytes_used - original_size; | |
4385 | node->bytes_used = bytes_used; | |
4386 | ||
511daa4c | 4387 | #if !TARGET_OS_EMBEDDED // Always madvise for embedded platforms |
34e8f829 A |
4388 | /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ |
4389 | if (szone->num_small_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary | |
4390 | /* NOTHING */ | |
4391 | } else if (DEPOT_MAGAZINE_INDEX != mag_index) { | |
4392 | // Emptiness discriminant | |
4393 | if (bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES)) { | |
4394 | /* Region has crossed threshold from density to sparsity. Mark it "suitable" on the | |
4395 | recirculation candidates list. */ | |
4396 | node->recirc_suitable = TRUE; | |
4397 | } else { | |
4398 | /* After this free, we've found the region is still dense, so it must have been even more so before | |
4399 | the free. That implies the region is already correctly marked. Do nothing. */ | |
4400 | } | |
4401 | ||
4402 | // Has the entire magazine crossed the "emptiness threshold"? If so, transfer a region | |
4403 | // from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e | |
4404 | // is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list. | |
4405 | ||
4406 | size_t a = small_mag_ptr->num_bytes_in_magazine; // Total bytes allocated to this magazine | |
4407 | size_t u = small_mag_ptr->mag_num_bytes_in_objects; // In use (malloc'd) from this magaqzine | |
4408 | ||
1f2f436a A |
4409 | if (a - u > ((3 * SMALL_REGION_PAYLOAD_BYTES) / 2) && u < DENSITY_THRESHOLD(a)) { |
4410 | return small_free_do_recirc_to_depot(szone, small_mag_ptr, mag_index); | |
4411 | } | |
34e8f829 A |
4412 | |
4413 | } else { | |
511daa4c | 4414 | #endif |
34e8f829 | 4415 | // Freed to Depot. N.B. Lock on small_magazines[DEPOT_MAGAZINE_INDEX] is already held |
1f2f436a | 4416 | // Calcuate the first page in the coalesced block that would be safe to mark MADV_FREE |
34e8f829 A |
4417 | uintptr_t safe_ptr = (uintptr_t)ptr + sizeof(free_list_t) + sizeof(msize_t); |
4418 | uintptr_t round_safe = round_page(safe_ptr); | |
4419 | ||
1f2f436a | 4420 | // Calcuate the last page in the coalesced block that would be safe to mark MADV_FREE |
34e8f829 A |
4421 | uintptr_t safe_extent = (uintptr_t)ptr + SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t); |
4422 | uintptr_t trunc_extent = trunc_page(safe_extent); | |
4423 | ||
1f2f436a | 4424 | // The newly freed block may complete a span of bytes that cover one or more pages. Mark the span with MADV_FREE. |
34e8f829 | 4425 | if (round_safe < trunc_extent) { // Safe area covers a page (perhaps many) |
1f2f436a A |
4426 | uintptr_t lo = trunc_page((uintptr_t)original_ptr); |
4427 | uintptr_t hi = round_page((uintptr_t)original_ptr + original_size); | |
4428 | ||
4429 | OSAtomicIncrement32Barrier(&(node->pinned_to_depot)); | |
4430 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
511daa4c | 4431 | #if TARGET_OS_EMBEDDED |
1f2f436a | 4432 | madvise_free_range(szone, region, MAX(round_safe, lo), MIN(trunc_extent, hi), &szone->last_small_advise); |
511daa4c | 4433 | #else |
1f2f436a | 4434 | madvise_free_range(szone, region, MAX(round_safe, lo), MIN(trunc_extent, hi)); |
511daa4c | 4435 | #endif |
1f2f436a A |
4436 | SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr); |
4437 | OSAtomicDecrement32Barrier(&(node->pinned_to_depot)); | |
4438 | } | |
34e8f829 | 4439 | |
511daa4c | 4440 | #if !TARGET_OS_EMBEDDED |
1f2f436a | 4441 | if (0 < bytes_used || 0 < node->pinned_to_depot) { |
34e8f829 A |
4442 | /* Depot'd region is still live. Leave it in place on the Depot's recirculation list |
4443 | so as to avoid thrashing between the Depot's free list and a magazines's free list | |
4444 | with detach_region/reattach_region */ | |
4445 | } else { | |
4446 | /* Depot'd region is just now empty. Consider return to OS. */ | |
1f2f436a A |
4447 | region_t r_dealloc = small_free_try_depot_unmap_no_lock(szone, small_mag_ptr, node); |
4448 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
4449 | if (r_dealloc) | |
4450 | deallocate_pages(szone, r_dealloc, SMALL_REGION_SIZE, 0); | |
4451 | return FALSE; // Caller need not unlock | |
34e8f829 A |
4452 | } |
4453 | } | |
511daa4c | 4454 | #endif |
1f2f436a A |
4455 | |
4456 | return TRUE; // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr) | |
34e8f829 A |
4457 | } |
4458 | ||
4459 | // Allocates from the last region or a freshly allocated region | |
4460 | static void * | |
1f2f436a A |
4461 | small_malloc_from_region_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, |
4462 | msize_t msize, void *aligned_address) | |
34e8f829 | 4463 | { |
1f2f436a | 4464 | void *ptr; |
34e8f829 | 4465 | |
1f2f436a | 4466 | // Before anything we transform the mag_bytes_free_at_end or mag_bytes_free_at_start - if any - to a regular free block |
34e8f829 | 4467 | /* FIXME: last_block needs to be coalesced with previous entry if free, <rdar://5462322> */ |
1f2f436a | 4468 | if (small_mag_ptr->mag_bytes_free_at_end || small_mag_ptr->mag_bytes_free_at_start) |
34e8f829 A |
4469 | small_finalize_region(szone, small_mag_ptr); |
4470 | ||
34e8f829 A |
4471 | // Here find the only place in smallville that (infrequently) takes the small_regions_lock. |
4472 | // Only one thread at a time should be permitted to assess the density of the hash | |
4473 | // ring and adjust if needed. | |
4474 | // Only one thread at a time should be permitted to insert its new region on | |
4475 | // the hash ring. | |
4476 | // It is safe for all other threads to read the hash ring (hashed_regions) and | |
4477 | // the associated sizes (num_regions_allocated and num_small_regions). | |
4478 | ||
4479 | LOCK(szone->small_regions_lock); | |
4480 | // Check to see if the hash ring of small regions needs to grow. Try to | |
4481 | // avoid the hash ring becoming too dense. | |
4482 | if (szone->small_region_generation->num_regions_allocated < (2 * szone->num_small_regions)) { | |
4483 | region_t *new_regions; | |
4484 | size_t new_size; | |
4485 | size_t new_shift = szone->small_region_generation->num_regions_allocated_shift; // In/Out parameter | |
4486 | new_regions = hash_regions_grow_no_lock(szone, szone->small_region_generation->hashed_regions, | |
4487 | szone->small_region_generation->num_regions_allocated, | |
4488 | &new_shift, | |
4489 | &new_size); | |
4490 | // Do not deallocate the current hashed_regions allocation since someone | |
4491 | // may be iterating it. Instead, just leak it. | |
4492 | ||
4493 | // Prepare to advance to the "next generation" of the hash ring. | |
4494 | szone->small_region_generation->nextgen->hashed_regions = new_regions; | |
4495 | szone->small_region_generation->nextgen->num_regions_allocated = new_size; | |
4496 | szone->small_region_generation->nextgen->num_regions_allocated_shift = new_shift; | |
4497 | ||
4498 | // Throw the switch to atomically advance to the next generation. | |
4499 | szone->small_region_generation = szone->small_region_generation->nextgen; | |
4500 | // Ensure everyone sees the advance. | |
34e8f829 | 4501 | OSMemoryBarrier(); |
34e8f829 A |
4502 | } |
4503 | // Tag the region at "aligned_address" as belonging to us, | |
4504 | // and so put it under the protection of the magazine lock we are holding. | |
4505 | // Do this before advertising "aligned_address" on the hash ring(!) | |
4506 | MAGAZINE_INDEX_FOR_SMALL_REGION(aligned_address) = mag_index; | |
4507 | ||
4508 | // Insert the new region into the hash ring, and update malloc statistics | |
4509 | hash_region_insert_no_lock(szone->small_region_generation->hashed_regions, | |
4510 | szone->small_region_generation->num_regions_allocated, | |
4511 | szone->small_region_generation->num_regions_allocated_shift, | |
4512 | aligned_address); | |
4513 | ||
4514 | szone->num_small_regions++; | |
4515 | ||
4516 | UNLOCK(szone->small_regions_lock); | |
4517 | ||
4518 | small_mag_ptr->mag_last_region = aligned_address; | |
4519 | BYTES_USED_FOR_SMALL_REGION(aligned_address) = SMALL_BYTES_FOR_MSIZE(msize); | |
1f2f436a A |
4520 | #if ASLR_INTERNAL |
4521 | int offset_msize = malloc_entropy[1] & SMALL_ENTROPY_MASK; | |
4522 | #if DEBUG_MALLOC | |
4523 | if (getenv("MallocASLRForce")) offset_msize = strtol(getenv("MallocASLRForce"), NULL, 0) & SMALL_ENTROPY_MASK; | |
4524 | if (getenv("MallocASLRPrint")) malloc_printf("Region: %p offset: %d\n", aligned_address, offset_msize); | |
4525 | #endif | |
4526 | #else | |
4527 | int offset_msize = 0; | |
4528 | #endif | |
4529 | ptr = (void *)((uintptr_t) aligned_address + SMALL_BYTES_FOR_MSIZE(offset_msize)); | |
4530 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), offset_msize, msize); | |
34e8f829 A |
4531 | small_mag_ptr->mag_num_objects++; |
4532 | small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(msize); | |
4533 | small_mag_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES; | |
4534 | ||
1f2f436a A |
4535 | // add a big free block at the end |
4536 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), offset_msize + msize, NUM_SMALL_BLOCKS - msize - offset_msize); | |
4537 | small_mag_ptr->mag_bytes_free_at_end = SMALL_BYTES_FOR_MSIZE(NUM_SMALL_BLOCKS - msize - offset_msize); | |
4538 | ||
4539 | #if ASLR_INTERNAL | |
4540 | // add a big free block at the start | |
4541 | small_mag_ptr->mag_bytes_free_at_start = SMALL_BYTES_FOR_MSIZE(offset_msize); | |
4542 | if (offset_msize) { | |
4543 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), 0, offset_msize); | |
4544 | } | |
4545 | #else | |
4546 | small_mag_ptr->mag_bytes_free_at_start = 0; | |
4547 | #endif | |
4548 | ||
4549 | // connect to magazine as last node | |
4550 | recirc_list_splice_last(szone, small_mag_ptr, REGION_TRAILER_FOR_SMALL_REGION(aligned_address)); | |
4551 | ||
4552 | return ptr; | |
4553 | } | |
4554 | ||
4555 | static INLINE void * | |
4556 | small_try_shrink_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_good_size) | |
4557 | { | |
4558 | msize_t new_msize = SMALL_MSIZE_FOR_BYTES(new_good_size); | |
4559 | msize_t mshrinkage = SMALL_MSIZE_FOR_BYTES(old_size) - new_msize; | |
4560 | ||
4561 | if (mshrinkage) { | |
4562 | void *q = (void *)((uintptr_t)ptr + SMALL_BYTES_FOR_MSIZE(new_msize)); | |
4563 | magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines, | |
4564 | REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)), | |
4565 | MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr))); | |
4566 | ||
4567 | // Mark q as block header and in-use, thus creating two blocks. | |
4568 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), SMALL_META_INDEX_FOR_PTR(ptr), new_msize); | |
4569 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(q), SMALL_META_INDEX_FOR_PTR(q), mshrinkage); | |
4570 | small_mag_ptr->mag_num_objects++; | |
34e8f829 | 4571 | |
1f2f436a A |
4572 | SZONE_MAGAZINE_PTR_UNLOCK(szone,small_mag_ptr); |
4573 | szone_free(szone, q); // avoid inlining free_small(szone, q, ...); | |
4574 | } | |
34e8f829 A |
4575 | |
4576 | return ptr; | |
4577 | } | |
4578 | ||
4579 | static INLINE boolean_t | |
4580 | small_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) | |
4581 | { | |
4582 | // returns 1 on success | |
4583 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
4584 | unsigned index; | |
4585 | msize_t old_msize, new_msize; | |
4586 | unsigned next_index; | |
4587 | void *next_block; | |
4588 | msize_t next_msize_and_free; | |
4589 | boolean_t is_free; | |
4590 | msize_t next_msize, leftover_msize; | |
4591 | void *leftover; | |
4592 | ||
4593 | index = SMALL_META_INDEX_FOR_PTR(ptr); | |
4594 | old_msize = SMALL_MSIZE_FOR_BYTES(old_size); | |
4595 | new_msize = SMALL_MSIZE_FOR_BYTES(new_size + SMALL_QUANTUM - 1); | |
4596 | next_index = index + old_msize; | |
4597 | ||
4598 | if (next_index >= NUM_SMALL_BLOCKS) { | |
4599 | return 0; | |
4600 | } | |
4601 | next_block = (char *)ptr + old_size; | |
4602 | ||
4603 | #if DEBUG_MALLOC | |
4604 | if ((uintptr_t)next_block & (SMALL_QUANTUM - 1)) { | |
4605 | szone_error(szone, 1, "internal invariant broken in realloc(next_block)", next_block, NULL); | |
4606 | } | |
4607 | if (meta_headers[index] != old_msize) | |
4608 | malloc_printf("*** small_try_realloc_in_place incorrect old %d %d\n", | |
4609 | meta_headers[index], old_msize); | |
4610 | #endif | |
4611 | ||
4612 | magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines, | |
4613 | REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)), | |
4614 | MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr))); | |
4615 | ||
4616 | /* | |
4617 | * Look for a free block immediately afterwards. If it's large enough, we can consume (part of) | |
4618 | * it. | |
4619 | */ | |
4620 | next_msize_and_free = meta_headers[next_index]; | |
4621 | is_free = next_msize_and_free & SMALL_IS_FREE; | |
4622 | if (!is_free) { | |
4623 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
4624 | return 0; // next_block is in use; | |
4625 | } | |
4626 | next_msize = next_msize_and_free & ~ SMALL_IS_FREE; | |
4627 | if (old_msize + next_msize < new_msize) { | |
4628 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
4629 | return 0; // even with next block, not enough | |
4630 | } | |
4631 | /* | |
4632 | * The following block is big enough; pull it from its freelist and chop off enough to satisfy | |
4633 | * our needs. | |
4634 | */ | |
4635 | small_free_list_remove_ptr(szone, small_mag_ptr, next_block, next_msize); | |
4636 | small_meta_header_set_middle(meta_headers, next_index); | |
4637 | leftover_msize = old_msize + next_msize - new_msize; | |
4638 | if (leftover_msize) { | |
4639 | /* there's some left, so put the remainder back */ | |
4640 | leftover = (unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(new_msize); | |
4641 | ||
4642 | small_free_list_add_ptr(szone, small_mag_ptr, leftover, leftover_msize); | |
4643 | } | |
4644 | #if DEBUG_MALLOC | |
4645 | if (SMALL_BYTES_FOR_MSIZE(new_msize) > szone->large_threshold) { | |
4646 | malloc_printf("*** realloc in place for %p exceeded msize=%d\n", new_msize); | |
4647 | } | |
4648 | #endif | |
4649 | small_meta_header_set_in_use(meta_headers, index, new_msize); | |
4650 | #if DEBUG_MALLOC | |
4651 | if (LOG(szone,ptr)) { | |
1f2f436a | 4652 | malloc_printf("in small_try_realloc_in_place(), ptr=%p, msize=%d\n", ptr, *SMALL_METADATA_FOR_PTR(ptr)); |
34e8f829 A |
4653 | } |
4654 | #endif | |
4655 | small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(new_msize - old_msize); | |
4656 | ||
4657 | // Update this region's bytes in use count | |
4658 | region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)); | |
4659 | size_t bytes_used = node->bytes_used + SMALL_BYTES_FOR_MSIZE(new_msize - old_msize); | |
4660 | node->bytes_used = bytes_used; | |
4661 | ||
4662 | // Emptiness discriminant | |
4663 | if (bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES)) { | |
4664 | /* After this reallocation the region is still sparse, so it must have been even more so before | |
4665 | the reallocation. That implies the region is already correctly marked. Do nothing. */ | |
4666 | } else { | |
4667 | /* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the | |
4668 | recirculation candidates list. */ | |
4669 | node->recirc_suitable = FALSE; | |
4670 | } | |
4671 | ||
4672 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
4673 | CHECK(szone, __PRETTY_FUNCTION__); | |
4674 | return 1; | |
4675 | } | |
4676 | ||
4677 | static boolean_t | |
4678 | small_check_region(szone_t *szone, region_t region) | |
4679 | { | |
4680 | unsigned char *ptr = SMALL_REGION_ADDRESS(region); | |
4681 | msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr); | |
4682 | unsigned char *region_end = SMALL_REGION_END(region); | |
4683 | msize_t prev_free = 0; | |
4684 | unsigned index; | |
4685 | msize_t msize_and_free; | |
4686 | msize_t msize; | |
4687 | free_list_t *free_head; | |
4688 | void *previous, *next; | |
4689 | msize_t *follower; | |
4690 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)); | |
4691 | magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]); | |
4692 | ||
4693 | // Assumes locked | |
4694 | CHECK_MAGAZINE_PTR_LOCKED(szone, small_mag_ptr, __PRETTY_FUNCTION__); | |
4695 | ||
1f2f436a A |
4696 | if (region == small_mag_ptr->mag_last_region) { |
4697 | ptr += small_mag_ptr->mag_bytes_free_at_start; | |
34e8f829 | 4698 | region_end -= small_mag_ptr->mag_bytes_free_at_end; |
1f2f436a | 4699 | } |
34e8f829 A |
4700 | |
4701 | while (ptr < region_end) { | |
4702 | index = SMALL_META_INDEX_FOR_PTR(ptr); | |
4703 | msize_and_free = meta_headers[index]; | |
4704 | if (!(msize_and_free & SMALL_IS_FREE)) { | |
4705 | // block is in use | |
4706 | msize = msize_and_free; | |
4707 | if (!msize) { | |
4708 | malloc_printf("*** invariant broken: null msize ptr=%p num_small_regions=%d end=%p\n", | |
4709 | ptr, szone->num_small_regions, region_end); | |
4710 | return 0; | |
4711 | } | |
4712 | if (SMALL_BYTES_FOR_MSIZE(msize) > szone->large_threshold) { | |
4713 | malloc_printf("*** invariant broken for %p this small msize=%d - size is too large\n", | |
4714 | ptr, msize_and_free); | |
4715 | return 0; | |
4716 | } | |
4717 | ptr += SMALL_BYTES_FOR_MSIZE(msize); | |
4718 | prev_free = 0; | |
4719 | } else { | |
4720 | // free pointer | |
4721 | msize = msize_and_free & ~ SMALL_IS_FREE; | |
4722 | free_head = (free_list_t *)ptr; | |
4723 | follower = (msize_t *)FOLLOWING_SMALL_PTR(ptr, msize); | |
4724 | if (!msize) { | |
4725 | malloc_printf("*** invariant broken for free block %p this msize=%d\n", ptr, msize); | |
4726 | return 0; | |
4727 | } | |
4728 | if (prev_free) { | |
4729 | malloc_printf("*** invariant broken for %p (2 free in a row)\n", ptr); | |
4730 | return 0; | |
4731 | } | |
4732 | previous = free_list_unchecksum_ptr(szone, &free_head->previous); | |
4733 | next = free_list_unchecksum_ptr(szone, &free_head->next); | |
4734 | if (previous && !SMALL_PTR_IS_FREE(previous)) { | |
4735 | malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n", | |
4736 | ptr, free_head->previous); | |
4737 | return 0; | |
4738 | } | |
4739 | if (next && !SMALL_PTR_IS_FREE(next)) { | |
4740 | malloc_printf("*** invariant broken for %p (next is not a free pointer)\n", ptr); | |
4741 | return 0; | |
4742 | } | |
4743 | if (SMALL_PREVIOUS_MSIZE(follower) != msize) { | |
4744 | malloc_printf("*** invariant broken for small free %p followed by %p in region [%p-%p] " | |
4745 | "(end marker incorrect) should be %d; in fact %d\n", | |
4746 | ptr, follower, SMALL_REGION_ADDRESS(region), region_end, msize, SMALL_PREVIOUS_MSIZE(follower)); | |
4747 | return 0; | |
4748 | } | |
4749 | ptr = (unsigned char *)follower; | |
4750 | prev_free = SMALL_IS_FREE; | |
4751 | } | |
4752 | } | |
4753 | return 1; | |
4754 | } | |
4755 | ||
4756 | static kern_return_t | |
4757 | small_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, | |
4758 | memory_reader_t reader, vm_range_recorder_t recorder) | |
4759 | { | |
4760 | size_t num_regions; | |
4761 | size_t index; | |
4762 | region_t *regions; | |
4763 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
4764 | unsigned count = 0; | |
4765 | kern_return_t err; | |
4766 | region_t region; | |
4767 | vm_range_t range; | |
4768 | vm_range_t admin_range; | |
4769 | vm_range_t ptr_range; | |
4770 | unsigned char *mapped_region; | |
4771 | msize_t *block_header; | |
4772 | unsigned block_index; | |
4773 | unsigned block_limit; | |
4774 | msize_t msize_and_free; | |
4775 | msize_t msize; | |
1f2f436a A |
4776 | magazine_t *small_mag_base = NULL; |
4777 | ||
34e8f829 A |
4778 | region_hash_generation_t *srg_ptr; |
4779 | err = reader(task, (vm_address_t)szone->small_region_generation, sizeof(region_hash_generation_t), (void **)&srg_ptr); | |
4780 | if (err) return err; | |
4781 | ||
4782 | num_regions = srg_ptr->num_regions_allocated; | |
4783 | err = reader(task, (vm_address_t)srg_ptr->hashed_regions, sizeof(region_t) * num_regions, (void **)®ions); | |
4784 | if (err) return err; | |
4785 | ||
1f2f436a A |
4786 | if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { |
4787 | // Map in all active magazines. Do this outside the iteration over regions. | |
4788 | err = reader(task, (vm_address_t)(szone->small_magazines), | |
4789 | szone->num_small_magazines*sizeof(magazine_t),(void **)&small_mag_base); | |
4790 | if (err) return err; | |
4791 | } | |
4792 | ||
34e8f829 A |
4793 | for (index = 0; index < num_regions; ++index) { |
4794 | region = regions[index]; | |
4795 | if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) { | |
4796 | range.address = (vm_address_t)SMALL_REGION_ADDRESS(region); | |
4797 | range.size = SMALL_REGION_SIZE; | |
4798 | if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { | |
4799 | admin_range.address = range.address + SMALL_METADATA_START; | |
4800 | admin_range.size = SMALL_METADATA_SIZE; | |
4801 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1); | |
4802 | } | |
4803 | if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) { | |
4804 | ptr_range.address = range.address; | |
4805 | ptr_range.size = NUM_SMALL_BLOCKS * SMALL_QUANTUM; | |
4806 | recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1); | |
4807 | } | |
4808 | if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) { | |
1f2f436a A |
4809 | void *mag_last_free; |
4810 | vm_address_t mag_last_free_ptr = 0; | |
4811 | msize_t mag_last_free_msize = 0; | |
4812 | ||
34e8f829 A |
4813 | err = reader(task, range.address, range.size, (void **)&mapped_region); |
4814 | if (err) | |
4815 | return err; | |
4816 | ||
4817 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(mapped_region); | |
1f2f436a A |
4818 | magazine_t *small_mag_ptr = small_mag_base + mag_index; |
4819 | ||
4820 | if (DEPOT_MAGAZINE_INDEX != mag_index) { | |
4821 | mag_last_free = small_mag_ptr->mag_last_free; | |
4822 | if (mag_last_free) { | |
4823 | mag_last_free_ptr = (uintptr_t) mag_last_free & ~(SMALL_QUANTUM - 1); | |
4824 | mag_last_free_msize = (uintptr_t) mag_last_free & (SMALL_QUANTUM - 1); | |
4825 | } | |
4826 | } else { | |
4827 | for (mag_index = 0; mag_index < szone->num_small_magazines; mag_index++) { | |
4828 | if ((void *)range.address == (small_mag_base + mag_index)->mag_last_free_rgn) { | |
4829 | mag_last_free = (small_mag_base + mag_index)->mag_last_free; | |
4830 | if (mag_last_free) { | |
4831 | mag_last_free_ptr = (uintptr_t) mag_last_free & ~(SMALL_QUANTUM - 1); | |
4832 | mag_last_free_msize = (uintptr_t) mag_last_free & (SMALL_QUANTUM - 1); | |
4833 | } | |
4834 | } | |
4835 | } | |
34e8f829 A |
4836 | } |
4837 | ||
4838 | block_header = (msize_t *)(mapped_region + SMALL_METADATA_START + sizeof(region_trailer_t)); | |
4839 | block_index = 0; | |
4840 | block_limit = NUM_SMALL_BLOCKS; | |
1f2f436a A |
4841 | if (region == small_mag_ptr->mag_last_region) { |
4842 | block_index += SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_start); | |
34e8f829 | 4843 | block_limit -= SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end); |
1f2f436a | 4844 | } |
34e8f829 A |
4845 | while (block_index < block_limit) { |
4846 | msize_and_free = block_header[block_index]; | |
4847 | msize = msize_and_free & ~ SMALL_IS_FREE; | |
4848 | if (! (msize_and_free & SMALL_IS_FREE) && | |
4849 | range.address + SMALL_BYTES_FOR_MSIZE(block_index) != mag_last_free_ptr) { | |
4850 | // Block in use | |
4851 | buffer[count].address = range.address + SMALL_BYTES_FOR_MSIZE(block_index); | |
4852 | buffer[count].size = SMALL_BYTES_FOR_MSIZE(msize); | |
4853 | count++; | |
4854 | if (count >= MAX_RECORDER_BUFFER) { | |
4855 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
4856 | count = 0; | |
4857 | } | |
4858 | } | |
7ba935f9 A |
4859 | |
4860 | if (!msize) | |
4861 | return KERN_FAILURE; // Somethings amiss. Avoid looping at this block_index. | |
4862 | ||
34e8f829 A |
4863 | block_index += msize; |
4864 | } | |
4865 | if (count) { | |
4866 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count); | |
4867 | count = 0; | |
4868 | } | |
4869 | } | |
4870 | } | |
4871 | } | |
4872 | return 0; | |
4873 | } | |
4874 | ||
4875 | static void * | |
4876 | small_malloc_from_free_list(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize) | |
4877 | { | |
4878 | free_list_t *ptr; | |
4879 | msize_t this_msize; | |
4880 | grain_t slot = (msize <= szone->num_small_slots) ? msize - 1 : szone->num_small_slots - 1; | |
4881 | free_list_t **free_list = small_mag_ptr->mag_free_list; | |
4882 | free_list_t **the_slot = free_list + slot; | |
4883 | free_list_t *next; | |
4884 | free_list_t **limit; | |
4885 | unsigned bitmap; | |
4886 | msize_t leftover_msize; | |
4887 | free_list_t *leftover_ptr; | |
4888 | ||
4889 | // Assumes we've locked the region | |
4890 | CHECK_MAGAZINE_PTR_LOCKED(szone, small_mag_ptr, __PRETTY_FUNCTION__); | |
4891 | ||
4892 | // Look for an exact match by checking the freelist for this msize. | |
4893 | // | |
4894 | ptr = *the_slot; | |
4895 | if (ptr) { | |
4896 | next = free_list_unchecksum_ptr(szone, &ptr->next); | |
4897 | if (next) { | |
4898 | next->previous = ptr->previous; | |
4899 | } else { | |
4900 | BITMAPN_CLR(small_mag_ptr->mag_bitmap, slot); | |
4901 | } | |
4902 | *the_slot = next; | |
4903 | this_msize = msize; | |
4904 | goto return_small_alloc; | |
4905 | } | |
4906 | ||
4907 | // Mask off the bits representing slots holding free blocks smaller than | |
4908 | // the size we need. If there are no larger free blocks, try allocating | |
4909 | // from the free space at the end of the small region. | |
4910 | if (szone->is_largemem) { | |
4911 | // BITMAPN_CTZ implementation | |
4912 | unsigned idx = slot >> 5; | |
4913 | bitmap = 0; | |
4914 | unsigned mask = ~ ((1 << (slot & 31)) - 1); | |
4915 | for ( ; idx < SMALL_BITMAP_WORDS; ++idx ) { | |
4916 | bitmap = small_mag_ptr->mag_bitmap[idx] & mask; | |
4917 | if (bitmap != 0) | |
4918 | break; | |
4919 | mask = ~0U; | |
4920 | } | |
4921 | // Check for fallthrough: No bits set in bitmap | |
4922 | if ((bitmap == 0) && (idx == SMALL_BITMAP_WORDS)) | |
4923 | goto try_small_from_end; | |
4924 | ||
4925 | // Start looking at the first set bit, plus 32 bits for every word of | |
4926 | // zeroes or entries that were too small. | |
4927 | slot = BITMAP32_CTZ((&bitmap)) + (idx * 32); | |
4928 | } else { | |
4929 | bitmap = small_mag_ptr->mag_bitmap[0] & ~ ((1 << slot) - 1); | |
4930 | if (!bitmap) | |
4931 | goto try_small_from_end; | |
4932 | ||
4933 | slot = BITMAP32_CTZ((&bitmap)); | |
4934 | } | |
4935 | // FIXME: Explain use of - 1 here, last slot has special meaning | |
4936 | limit = free_list + szone->num_small_slots - 1; | |
4937 | free_list += slot; | |
4938 | ||
4939 | if (free_list < limit) { | |
4940 | ptr = *free_list; | |
4941 | if (ptr) { | |
4942 | ||
4943 | next = free_list_unchecksum_ptr(szone, &ptr->next); | |
4944 | *free_list = next; | |
4945 | if (next) { | |
4946 | next->previous = ptr->previous; | |
4947 | } else { | |
4948 | BITMAPN_CLR(small_mag_ptr->mag_bitmap, slot); | |
4949 | } | |
4950 | this_msize = SMALL_PTR_SIZE(ptr); | |
4951 | goto add_leftover_and_proceed; | |
4952 | } | |
4953 | #if DEBUG_MALLOC | |
4954 | malloc_printf("in small_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n",slot); | |
4955 | #endif | |
4956 | } | |
4957 | ||
4958 | // We are now looking at the last slot, which contains blocks equal to, or | |
4959 | // due to coalescing of free blocks, larger than (num_small_slots - 1) * (small quantum size). | |
4960 | // If the last freelist is not empty, and the head contains a block that is | |
4961 | // larger than our request, then the remainder is put back on the free list. | |
4962 | // | |
4963 | ptr = *limit; | |
4964 | if (ptr) { | |
4965 | this_msize = SMALL_PTR_SIZE(ptr); | |
4966 | next = free_list_unchecksum_ptr(szone, &ptr->next); | |
4967 | if (this_msize - msize >= szone->num_small_slots) { | |
4968 | // the leftover will go back to the free list, so we optimize by | |
4969 | // modifying the free list rather than a pop and push of the head | |
4970 | leftover_msize = this_msize - msize; | |
4971 | leftover_ptr = (free_list_t *)((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize)); | |
4972 | *limit = leftover_ptr; | |
4973 | if (next) { | |
4974 | next->previous.u = free_list_checksum_ptr(szone, leftover_ptr); | |
4975 | } | |
4976 | leftover_ptr->previous = ptr->previous; | |
4977 | leftover_ptr->next = ptr->next; | |
4978 | small_meta_header_set_is_free(SMALL_META_HEADER_FOR_PTR(leftover_ptr), | |
4979 | SMALL_META_INDEX_FOR_PTR(leftover_ptr), leftover_msize); | |
4980 | // Store msize at the end of the block denoted by "leftover_ptr" (i.e. at a negative offset from follower) | |
4981 | SMALL_PREVIOUS_MSIZE(FOLLOWING_SMALL_PTR(leftover_ptr, leftover_msize)) = leftover_msize; // Access is safe | |
4982 | #if DEBUG_MALLOC | |
4983 | if (LOG(szone,ptr)) { | |
4984 | malloc_printf("in small_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize); | |
4985 | } | |
4986 | #endif | |
4987 | this_msize = msize; | |
4988 | goto return_small_alloc; | |
4989 | } | |
4990 | if (next) { | |
4991 | next->previous = ptr->previous; | |
4992 | } | |
4993 | *limit = next; | |
4994 | goto add_leftover_and_proceed; | |
4995 | } | |
4996 | ||
4997 | try_small_from_end: | |
4998 | // Let's see if we can use small_mag_ptr->mag_bytes_free_at_end | |
4999 | if (small_mag_ptr->mag_bytes_free_at_end >= SMALL_BYTES_FOR_MSIZE(msize)) { | |
5000 | ptr = (free_list_t *)(SMALL_REGION_END(small_mag_ptr->mag_last_region) - | |
5001 | small_mag_ptr->mag_bytes_free_at_end); | |
5002 | small_mag_ptr->mag_bytes_free_at_end -= SMALL_BYTES_FOR_MSIZE(msize); | |
5003 | if (small_mag_ptr->mag_bytes_free_at_end) { | |
5004 | // let's mark this block as in use to serve as boundary | |
5005 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), | |
5006 | SMALL_META_INDEX_FOR_PTR((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize)), | |
5007 | SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end)); | |
5008 | } | |
5009 | this_msize = msize; | |
5010 | goto return_small_alloc; | |
5011 | } | |
1f2f436a A |
5012 | #if ASLR_INTERNAL |
5013 | // Try from start if nothing left at end | |
5014 | if (small_mag_ptr->mag_bytes_free_at_start >= SMALL_BYTES_FOR_MSIZE(msize)) { | |
5015 | ptr = (free_list_t *)(SMALL_REGION_ADDRESS(small_mag_ptr->mag_last_region) + | |
5016 | small_mag_ptr->mag_bytes_free_at_start - SMALL_BYTES_FOR_MSIZE(msize)); | |
5017 | small_mag_ptr->mag_bytes_free_at_start -= SMALL_BYTES_FOR_MSIZE(msize); | |
5018 | if (small_mag_ptr->mag_bytes_free_at_start) { | |
5019 | // let's mark this block as in use to serve as boundary | |
5020 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), 0, SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_start)); | |
5021 | } | |
5022 | this_msize = msize; | |
5023 | goto return_small_alloc; | |
5024 | } | |
5025 | #endif | |
34e8f829 A |
5026 | return NULL; |
5027 | ||
5028 | add_leftover_and_proceed: | |
5029 | if (this_msize > msize) { | |
5030 | leftover_msize = this_msize - msize; | |
5031 | leftover_ptr = (free_list_t *)((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize)); | |
5032 | #if DEBUG_MALLOC | |
5033 | if (LOG(szone,ptr)) { | |
5034 | malloc_printf("in small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize); | |
5035 | } | |
5036 | #endif | |
5037 | small_free_list_add_ptr(szone, small_mag_ptr, leftover_ptr, leftover_msize); | |
5038 | this_msize = msize; | |
5039 | } | |
5040 | ||
5041 | return_small_alloc: | |
5042 | small_mag_ptr->mag_num_objects++; | |
5043 | small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(this_msize); | |
5044 | ||
5045 | // Update this region's bytes in use count | |
5046 | region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)); | |
5047 | size_t bytes_used = node->bytes_used + SMALL_BYTES_FOR_MSIZE(this_msize); | |
5048 | node->bytes_used = bytes_used; | |
5049 | ||
5050 | // Emptiness discriminant | |
5051 | if (bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES)) { | |
5052 | /* After this allocation the region is still sparse, so it must have been even more so before | |
5053 | the allocation. That implies the region is already correctly marked. Do nothing. */ | |
5054 | } else { | |
5055 | /* Region has crossed threshold from sparsity to density. Mark in not "suitable" on the | |
5056 | recirculation candidates list. */ | |
5057 | node->recirc_suitable = FALSE; | |
5058 | } | |
5059 | #if DEBUG_MALLOC | |
5060 | if (LOG(szone,ptr)) { | |
5061 | malloc_printf("in small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize); | |
5062 | } | |
5063 | #endif | |
5064 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), SMALL_META_INDEX_FOR_PTR(ptr), this_msize); | |
5065 | return ptr; | |
5066 | } | |
5067 | #undef DENSITY_THRESHOLD | |
5068 | #undef K | |
5069 | ||
5070 | static INLINE void * | |
5071 | small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) | |
5072 | { | |
5073 | void *ptr; | |
5074 | mag_index_t mag_index = mag_get_thread_index(szone); | |
5075 | magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]); | |
5076 | ||
5077 | SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr); | |
5078 | ||
5079 | #if SMALL_CACHE | |
5080 | ptr = (void *)small_mag_ptr->mag_last_free; | |
5081 | ||
5082 | if ((((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) == msize) { | |
5083 | // we have a winner | |
5084 | small_mag_ptr->mag_last_free = NULL; | |
1f2f436a | 5085 | small_mag_ptr->mag_last_free_rgn = NULL; |
34e8f829 A |
5086 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); |
5087 | CHECK(szone, __PRETTY_FUNCTION__); | |
5088 | ptr = (void *)((uintptr_t)ptr & ~ (SMALL_QUANTUM - 1)); | |
5089 | if (cleared_requested) { | |
5090 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); | |
5091 | } | |
5092 | return ptr; | |
5093 | } | |
5094 | #endif /* SMALL_CACHE */ | |
5095 | ||
1f2f436a | 5096 | while(1) { |
34e8f829 A |
5097 | ptr = small_malloc_from_free_list(szone, small_mag_ptr, mag_index, msize); |
5098 | if (ptr) { | |
5099 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5100 | CHECK(szone, __PRETTY_FUNCTION__); | |
5101 | if (cleared_requested) { | |
5102 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); | |
5103 | } | |
5104 | return ptr; | |
5105 | } | |
34e8f829 | 5106 | |
1f2f436a A |
5107 | if (small_get_region_from_depot(szone, small_mag_ptr, mag_index, msize)) { |
5108 | ptr = small_malloc_from_free_list(szone, small_mag_ptr, mag_index, msize); | |
5109 | if (ptr) { | |
5110 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5111 | CHECK(szone, __PRETTY_FUNCTION__); | |
5112 | if (cleared_requested) { | |
5113 | memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize)); | |
5114 | } | |
5115 | return ptr; | |
5116 | } | |
5117 | } | |
5118 | ||
5119 | // The magazine is exhausted. A new region (heap) must be allocated to satisfy this call to malloc(). | |
5120 | // The allocation, an mmap() system call, will be performed outside the magazine spin locks by the first | |
5121 | // thread that suffers the exhaustion. That thread sets "alloc_underway" and enters a critical section. | |
5122 | // Threads arriving here later are excluded from the critical section, yield the CPU, and then retry the | |
5123 | // allocation. After some time the magazine is resupplied, the original thread leaves with its allocation, | |
5124 | // and retry-ing threads succeed in the code just above. | |
5125 | if (!small_mag_ptr->alloc_underway) { | |
5126 | void *fresh_region; | |
5127 | ||
5128 | // time to create a new region (do this outside the magazine lock) | |
5129 | small_mag_ptr->alloc_underway = TRUE; | |
5130 | OSMemoryBarrier(); | |
5131 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5132 | fresh_region = allocate_pages_securely(szone, SMALL_REGION_SIZE, SMALL_BLOCKS_ALIGN, VM_MEMORY_MALLOC_SMALL); | |
5133 | SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr); | |
5134 | ||
5135 | MAGMALLOC_ALLOCREGION((void *)szone, (int)mag_index, fresh_region, SMALL_REGION_SIZE); // DTrace USDT Probe | |
5136 | ||
5137 | if (!fresh_region) { // out of memory! | |
5138 | small_mag_ptr->alloc_underway = FALSE; | |
5139 | OSMemoryBarrier(); | |
5140 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5141 | return NULL; | |
5142 | } | |
5143 | ||
5144 | ptr = small_malloc_from_region_no_lock(szone, small_mag_ptr, mag_index, msize, fresh_region); | |
5145 | ||
5146 | // we don't clear because this freshly allocated space is pristine | |
5147 | small_mag_ptr->alloc_underway = FALSE; | |
5148 | OSMemoryBarrier(); | |
5149 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5150 | CHECK(szone, __PRETTY_FUNCTION__); | |
5151 | return ptr; | |
5152 | } else { | |
5153 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5154 | pthread_yield_np(); | |
5155 | SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr); | |
5156 | } | |
5157 | } | |
5158 | /* NOTREACHED */ | |
34e8f829 A |
5159 | } |
5160 | ||
5161 | static NOINLINE void | |
5162 | free_small_botch(szone_t *szone, free_list_t *ptr) | |
5163 | { | |
5164 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)); | |
5165 | magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]); | |
5166 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5167 | szone_error(szone, 1, "double free", ptr, NULL); | |
5168 | } | |
5169 | ||
5170 | static INLINE void | |
5171 | free_small(szone_t *szone, void *ptr, region_t small_region, size_t known_size) | |
5172 | { | |
5173 | msize_t msize; | |
5174 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)); | |
5175 | magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]); | |
5176 | ||
5177 | // ptr is known to be in small_region | |
5178 | if (known_size) { | |
5179 | msize = SMALL_MSIZE_FOR_BYTES(known_size + SMALL_QUANTUM - 1); | |
5180 | } else { | |
5181 | msize = SMALL_PTR_SIZE(ptr); | |
5182 | if (SMALL_PTR_IS_FREE(ptr)) { | |
5183 | free_small_botch(szone, ptr); | |
5184 | return; | |
5185 | } | |
5186 | } | |
5187 | ||
5188 | SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr); | |
5189 | ||
5190 | #if SMALL_CACHE | |
5191 | // Depot does not participate in SMALL_CACHE since it can't be directly malloc()'d | |
5192 | if (DEPOT_MAGAZINE_INDEX != mag_index) { | |
5193 | ||
5194 | void *ptr2 = small_mag_ptr->mag_last_free; // Might be NULL | |
5195 | region_t rgn2 = small_mag_ptr->mag_last_free_rgn; | |
5196 | ||
5197 | /* check that we don't already have this pointer in the cache */ | |
5198 | if (ptr == (void *)((uintptr_t)ptr2 & ~ (SMALL_QUANTUM - 1))) { | |
5199 | free_small_botch(szone, ptr); | |
5200 | return; | |
5201 | } | |
5202 | ||
5203 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize) | |
5204 | memset(ptr, 0x55, SMALL_BYTES_FOR_MSIZE(msize)); | |
5205 | ||
5206 | small_mag_ptr->mag_last_free = (void *)(((uintptr_t)ptr) | msize); | |
5207 | small_mag_ptr->mag_last_free_rgn = small_region; | |
5208 | ||
5209 | if (!ptr2) { | |
5210 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5211 | CHECK(szone, __PRETTY_FUNCTION__); | |
5212 | return; | |
5213 | } | |
5214 | ||
5215 | msize = (uintptr_t)ptr2 & (SMALL_QUANTUM - 1); | |
5216 | ptr = (void *)(((uintptr_t)ptr2) & ~(SMALL_QUANTUM - 1)); | |
5217 | small_region = rgn2; | |
5218 | } | |
5219 | #endif /* SMALL_CACHE */ | |
5220 | ||
5221 | // Now in the time it took to acquire the lock, the region may have migrated | |
5222 | // from one magazine to another. I.e. trailer->mag_index is volatile. | |
5223 | // In which case the magazine lock we obtained (namely magazines[mag_index].mag_lock) | |
5224 | // is stale. If so, keep on tryin' ... | |
5225 | region_trailer_t *trailer = REGION_TRAILER_FOR_SMALL_REGION(small_region); | |
5226 | mag_index_t refreshed_index; | |
5227 | ||
5228 | while (mag_index != (refreshed_index = trailer->mag_index)) { // Note assignment | |
5229 | ||
5230 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5231 | ||
5232 | mag_index = refreshed_index; | |
5233 | small_mag_ptr = &(szone->small_magazines[mag_index]); | |
5234 | SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr); | |
5235 | } | |
5236 | ||
1f2f436a | 5237 | if (small_free_no_lock(szone, small_mag_ptr, mag_index, small_region, ptr, msize)) |
34e8f829 | 5238 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); |
1f2f436a | 5239 | |
34e8f829 A |
5240 | CHECK(szone, __PRETTY_FUNCTION__); |
5241 | } | |
5242 | ||
5243 | static void | |
5244 | print_small_free_list(szone_t *szone) | |
5245 | { | |
5246 | free_list_t *ptr; | |
5247 | _SIMPLE_STRING b = _simple_salloc(); | |
5248 | mag_index_t mag_index; | |
5249 | ||
5250 | if (b) { | |
5251 | _simple_sappend(b, "small free sizes:\n"); | |
5252 | for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) { | |
5253 | grain_t slot = 0; | |
5254 | _simple_sprintf(b,"\tMagazine %d: ", mag_index); | |
5255 | while (slot < szone->num_small_slots) { | |
5256 | ptr = szone->small_magazines[mag_index].mag_free_list[slot]; | |
5257 | if (ptr) { | |
5258 | _simple_sprintf(b, "%s%y[%d]; ", (slot == szone->num_small_slots-1) ? ">=" : "", | |
5259 | (slot + 1) * SMALL_QUANTUM, free_list_count(szone, ptr)); | |
5260 | } | |
5261 | slot++; | |
5262 | } | |
5263 | _simple_sappend(b,"\n"); | |
5264 | } | |
5265 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); | |
5266 | _simple_sfree(b); | |
5267 | } | |
5268 | } | |
5269 | ||
5270 | static void | |
1f2f436a | 5271 | print_small_region(szone_t *szone, boolean_t verbose, region_t region, size_t bytes_at_start, size_t bytes_at_end) |
34e8f829 A |
5272 | { |
5273 | unsigned counts[1024]; | |
5274 | unsigned in_use = 0; | |
5275 | uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(region); | |
1f2f436a | 5276 | uintptr_t current = start + bytes_at_start; |
34e8f829 A |
5277 | uintptr_t limit = (uintptr_t)SMALL_REGION_END(region) - bytes_at_end; |
5278 | msize_t msize_and_free; | |
5279 | msize_t msize; | |
5280 | unsigned ci; | |
5281 | _SIMPLE_STRING b; | |
5282 | uintptr_t pgTot = 0; | |
5283 | ||
5284 | if (region == HASHRING_REGION_DEALLOCATED) { | |
5285 | if ((b = _simple_salloc()) != NULL) { | |
5286 | _simple_sprintf(b, "Small region [unknown address] was returned to the OS\n"); | |
5287 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); | |
5288 | _simple_sfree(b); | |
5289 | } | |
5290 | return; | |
5291 | } | |
5292 | ||
5293 | memset(counts, 0, sizeof(counts)); | |
5294 | while (current < limit) { | |
5295 | msize_and_free = *SMALL_METADATA_FOR_PTR(current); | |
5296 | msize = msize_and_free & ~ SMALL_IS_FREE; | |
5297 | if (!msize) { | |
5298 | malloc_printf("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize); | |
5299 | break; | |
5300 | } | |
5301 | if (!(msize_and_free & SMALL_IS_FREE)) { | |
5302 | // block in use | |
5303 | if (msize < 1024) | |
5304 | counts[msize]++; | |
5305 | in_use++; | |
5306 | } else { | |
5307 | uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t)); | |
1f2f436a | 5308 | uintptr_t pgHi = trunc_page(current + SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t)); |
34e8f829 A |
5309 | |
5310 | if (pgLo < pgHi) { | |
5311 | pgTot += (pgHi - pgLo); | |
5312 | } | |
5313 | } | |
5314 | current += SMALL_BYTES_FOR_MSIZE(msize); | |
5315 | } | |
5316 | if ((b = _simple_salloc()) != NULL) { | |
5317 | _simple_sprintf(b, "Small region [%p-%p, %y] \t", (void *)start, SMALL_REGION_END(region), (int)SMALL_REGION_SIZE); | |
5318 | _simple_sprintf(b, "Magazine=%d \t", MAGAZINE_INDEX_FOR_SMALL_REGION(region)); | |
5319 | _simple_sprintf(b, "Allocations in use=%d \t Bytes in use=%ly \t", in_use, BYTES_USED_FOR_SMALL_REGION(region)); | |
1f2f436a A |
5320 | if (bytes_at_end || bytes_at_start) |
5321 | _simple_sprintf(b, "Untouched=%ly ", bytes_at_end + bytes_at_start); | |
34e8f829 A |
5322 | if (DEPOT_MAGAZINE_INDEX == MAGAZINE_INDEX_FOR_SMALL_REGION(region)) { |
5323 | _simple_sprintf(b, "Advised MADV_FREE=%ly", pgTot); | |
5324 | } else { | |
5325 | _simple_sprintf(b, "Fragments subject to reclamation=%ly", pgTot); | |
5326 | } | |
5327 | if (verbose && in_use) { | |
5328 | _simple_sappend(b, "\n\tSizes in use: "); | |
5329 | for (ci = 0; ci < 1024; ci++) | |
5330 | if (counts[ci]) | |
5331 | _simple_sprintf(b, "%d[%d] ", SMALL_BYTES_FOR_MSIZE(ci), counts[ci]); | |
5332 | } | |
5333 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); | |
5334 | _simple_sfree(b); | |
5335 | } | |
5336 | } | |
5337 | ||
5338 | static boolean_t | |
5339 | small_free_list_check(szone_t *szone, grain_t slot) | |
5340 | { | |
5341 | mag_index_t mag_index; | |
5342 | ||
5343 | for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) { | |
5344 | magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]); | |
5345 | SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr); | |
5346 | ||
5347 | unsigned count = 0; | |
5348 | free_list_t *ptr = szone->small_magazines[mag_index].mag_free_list[slot]; | |
5349 | msize_t msize_and_free; | |
5350 | free_list_t *previous = NULL; | |
5351 | ||
5352 | while (ptr) { | |
5353 | msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); | |
5354 | if (!(msize_and_free & SMALL_IS_FREE)) { | |
5355 | malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
5356 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5357 | return 0; | |
5358 | } | |
5359 | if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) { | |
5360 | malloc_printf("*** unaligned ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
5361 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5362 | return 0; | |
5363 | } | |
5364 | if (!small_region_for_ptr_no_lock(szone, ptr)) { | |
5365 | malloc_printf("*** ptr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
5366 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5367 | return 0; | |
5368 | } | |
5369 | if (free_list_unchecksum_ptr(szone, &ptr->previous) != previous) { | |
5370 | malloc_printf("*** previous incorrectly set slot=%d count=%d ptr=%p\n", slot, count, ptr); | |
5371 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5372 | return 0; | |
5373 | } | |
5374 | previous = ptr; | |
5375 | ptr = free_list_unchecksum_ptr(szone, &ptr->next); | |
5376 | count++; | |
5377 | } | |
5378 | ||
5379 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
5380 | } | |
5381 | return 1; | |
5382 | } | |
5383 | ||
5384 | /******************************************************************************* | |
5385 | * Large allocator implementation | |
5386 | ******************************************************************************/ | |
5387 | #pragma mark large allocator | |
5388 | ||
5389 | #if DEBUG_MALLOC | |
5390 | ||
5391 | static void | |
5392 | large_debug_print(szone_t *szone) | |
5393 | { | |
5394 | unsigned index; | |
5395 | large_entry_t *range; | |
5396 | _SIMPLE_STRING b = _simple_salloc(); | |
5397 | ||
5398 | if (b) { | |
5399 | for (index = 0, range = szone->large_entries; index < szone->num_large_entries; index++, range++) | |
5400 | if (range->address) | |
5401 | _simple_sprintf(b, "%d: %p(%y); ", index, range->address, range->size); | |
5402 | ||
5403 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b)); | |
5404 | _simple_sfree(b); | |
5405 | } | |
5406 | } | |
5407 | #endif | |
5408 | ||
5409 | /* | |
5410 | * Scan the hash ring looking for an entry for the given pointer. | |
5411 | */ | |
5412 | static large_entry_t * | |
5413 | large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr) | |
5414 | { | |
5415 | // result only valid with lock held | |
5416 | unsigned num_large_entries = szone->num_large_entries; | |
5417 | unsigned hash_index; | |
5418 | unsigned index; | |
5419 | large_entry_t *range; | |
5420 | ||
5421 | if (!num_large_entries) | |
5422 | return NULL; | |
5423 | ||
5424 | hash_index = ((uintptr_t)ptr >> vm_page_shift) % num_large_entries; | |
5425 | index = hash_index; | |
5426 | ||
5427 | do { | |
5428 | range = szone->large_entries + index; | |
5429 | if (range->address == (vm_address_t)ptr) | |
5430 | return range; | |
5431 | if (0 == range->address) | |
5432 | return NULL; // end of chain | |
5433 | index++; | |
5434 | if (index == num_large_entries) | |
5435 | index = 0; | |
5436 | } while (index != hash_index); | |
5437 | ||
5438 | return NULL; | |
5439 | } | |
5440 | ||
5441 | static void | |
5442 | large_entry_insert_no_lock(szone_t *szone, large_entry_t range) | |
5443 | { | |
5444 | unsigned num_large_entries = szone->num_large_entries; | |
5445 | unsigned hash_index = (((uintptr_t)(range.address)) >> vm_page_shift) % num_large_entries; | |
5446 | unsigned index = hash_index; | |
5447 | large_entry_t *entry; | |
5448 | ||
5449 | // assert(szone->num_large_objects_in_use < szone->num_large_entries); /* must be called with room to spare */ | |
5450 | ||
5451 | do { | |
5452 | entry = szone->large_entries + index; | |
5453 | if (0 == entry->address) { | |
5454 | *entry = range; | |
5455 | return; // end of chain | |
5456 | } | |
5457 | index++; | |
5458 | if (index == num_large_entries) | |
5459 | index = 0; | |
5460 | } while (index != hash_index); | |
5461 | ||
5462 | // assert(0); /* must not fallthrough! */ | |
5463 | } | |
5464 | ||
5465 | // FIXME: can't we simply swap the (now empty) entry with the last entry on the collision chain for this hash slot? | |
5466 | static INLINE void | |
5467 | large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry) | |
5468 | { | |
5469 | unsigned num_large_entries = szone->num_large_entries; | |
5470 | unsigned hash_index = entry - szone->large_entries; | |
5471 | unsigned index = hash_index; | |
5472 | large_entry_t range; | |
5473 | ||
5474 | // assert(entry->address == 0) /* caller must have cleared *entry */ | |
5475 | ||
5476 | do { | |
5477 | index++; | |
5478 | if (index == num_large_entries) | |
5479 | index = 0; | |
5480 | range = szone->large_entries[index]; | |
5481 | if (0 == range.address) | |
5482 | return; | |
5483 | szone->large_entries[index].address = (vm_address_t)0; | |
5484 | szone->large_entries[index].size = 0; | |
5485 | szone->large_entries[index].did_madvise_reusable = FALSE; | |
5486 | large_entry_insert_no_lock(szone, range); // this will reinsert in the | |
5487 | // proper place | |
5488 | } while (index != hash_index); | |
5489 | ||
5490 | // assert(0); /* since entry->address == 0, must not fallthrough! */ | |
5491 | } | |
5492 | ||
5493 | // FIXME: num should probably be a size_t, since you can theoretically allocate | |
5494 | // more than 2^32-1 large_threshold objects in 64 bit. | |
5495 | static INLINE large_entry_t * | |
5496 | large_entries_alloc_no_lock(szone_t *szone, unsigned num) | |
5497 | { | |
5498 | size_t size = num * sizeof(large_entry_t); | |
5499 | ||
5500 | // Note that we allocate memory (via a system call) under a spin lock | |
5501 | // That is certainly evil, however it's very rare in the lifetime of a process | |
5502 | // The alternative would slow down the normal case | |
5503 | return allocate_pages(szone, round_page(size), 0, 0, VM_MEMORY_MALLOC_LARGE); | |
5504 | } | |
5505 | ||
5506 | static void | |
5507 | large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate) | |
5508 | { | |
5509 | size_t size = num * sizeof(large_entry_t); | |
5510 | ||
5511 | range_to_deallocate->address = (vm_address_t)entries; | |
5512 | range_to_deallocate->size = round_page(size); | |
5513 | } | |
5514 | ||
5515 | static large_entry_t * | |
5516 | large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate) | |
5517 | { | |
5518 | // sets range_to_deallocate | |
5519 | unsigned old_num_entries = szone->num_large_entries; | |
5520 | large_entry_t *old_entries = szone->large_entries; | |
5521 | // always an odd number for good hashing | |
5522 | unsigned new_num_entries = (old_num_entries) ? old_num_entries * 2 + 1 : | |
5523 | ((vm_page_size / sizeof(large_entry_t)) - 1); | |
5524 | large_entry_t *new_entries = large_entries_alloc_no_lock(szone, new_num_entries); | |
5525 | unsigned index = old_num_entries; | |
5526 | large_entry_t oldRange; | |
5527 | ||
5528 | // if the allocation of new entries failed, bail | |
5529 | if (new_entries == NULL) | |
5530 | return NULL; | |
5531 | ||
5532 | szone->num_large_entries = new_num_entries; | |
5533 | szone->large_entries = new_entries; | |
5534 | ||
5535 | /* rehash entries into the new list */ | |
5536 | while (index--) { | |
5537 | oldRange = old_entries[index]; | |
5538 | if (oldRange.address) { | |
5539 | large_entry_insert_no_lock(szone, oldRange); | |
5540 | } | |
5541 | } | |
5542 | ||
5543 | if (old_entries) { | |
5544 | large_entries_free_no_lock(szone, old_entries, old_num_entries, range_to_deallocate); | |
5545 | } else { | |
5546 | range_to_deallocate->address = (vm_address_t)0; | |
5547 | range_to_deallocate->size = 0; | |
5548 | } | |
5549 | ||
5550 | return new_entries; | |
5551 | } | |
5552 | ||
5553 | // frees the specific entry in the size table | |
5554 | // returns a range to truly deallocate | |
5555 | static vm_range_t | |
5556 | large_entry_free_no_lock(szone_t *szone, large_entry_t *entry) | |
5557 | { | |
5558 | vm_range_t range; | |
5559 | ||
5560 | range.address = entry->address; | |
5561 | range.size = entry->size; | |
5562 | ||
5563 | if (szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) { | |
1f2f436a | 5564 | protect((void *)range.address, range.size, PROT_READ | PROT_WRITE, szone->debug_flags); |
34e8f829 A |
5565 | range.address -= vm_page_size; |
5566 | range.size += 2 * vm_page_size; | |
5567 | } | |
5568 | ||
5569 | entry->address = 0; | |
5570 | entry->size = 0; | |
5571 | entry->did_madvise_reusable = FALSE; | |
5572 | large_entries_rehash_after_entry_no_lock(szone, entry); | |
5573 | ||
5574 | #if DEBUG_MALLOC | |
5575 | if (large_entry_for_pointer_no_lock(szone, (void *)range.address)) { | |
5576 | malloc_printf("*** freed entry %p still in use; num_large_entries=%d\n", | |
5577 | range.address, szone->num_large_entries); | |
5578 | large_debug_print(szone); | |
5579 | szone_sleep(); | |
5580 | } | |
5581 | #endif | |
5582 | return range; | |
5583 | } | |
5584 | ||
5585 | static NOINLINE kern_return_t | |
5586 | large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address, | |
5587 | unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder) | |
5588 | { | |
5589 | unsigned index = 0; | |
5590 | vm_range_t buffer[MAX_RECORDER_BUFFER]; | |
5591 | unsigned count = 0; | |
5592 | large_entry_t *entries; | |
5593 | kern_return_t err; | |
5594 | vm_range_t range; | |
5595 | large_entry_t entry; | |
5596 | ||
5597 | err = reader(task, large_entries_address, sizeof(large_entry_t) * num_entries, (void **)&entries); | |
5598 | if (err) | |
5599 | return err; | |
5600 | ||
5601 | index = num_entries; | |
5602 | if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) { | |
5603 | range.address = large_entries_address; | |
5604 | range.size = round_page(num_entries * sizeof(large_entry_t)); | |
5605 | recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &range, 1); | |
5606 | } | |
5607 | if (type_mask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE)) { | |
5608 | while (index--) { | |
5609 | entry = entries[index]; | |
5610 | if (entry.address) { | |
5611 | range.address = entry.address; | |
5612 | range.size = entry.size; | |
5613 | buffer[count++] = range; | |
5614 | if (count >= MAX_RECORDER_BUFFER) { | |
5615 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, | |
5616 | buffer, count); | |
5617 | count = 0; | |
5618 | } | |
5619 | } | |
5620 | } | |
5621 | } | |
5622 | if (count) { | |
5623 | recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE, | |
5624 | buffer, count); | |
5625 | } | |
5626 | return 0; | |
5627 | } | |
5628 | ||
5629 | static void * | |
5630 | large_malloc(szone_t *szone, size_t num_pages, unsigned char alignment, | |
5631 | boolean_t cleared_requested) | |
5632 | { | |
5633 | void *addr; | |
5634 | vm_range_t range_to_deallocate; | |
5635 | size_t size; | |
5636 | large_entry_t large_entry; | |
5637 | ||
5638 | if (!num_pages) | |
5639 | num_pages = 1; // minimal allocation size for this szone | |
5640 | size = (size_t)num_pages << vm_page_shift; | |
5641 | range_to_deallocate.size = 0; | |
5642 | range_to_deallocate.address = 0; | |
5643 | ||
5644 | #if LARGE_CACHE | |
5645 | if (size < LARGE_CACHE_SIZE_ENTRY_LIMIT) { // Look for a large_entry_t on the death-row cache? | |
5646 | SZONE_LOCK(szone); | |
5647 | ||
5648 | int i, best = -1, idx = szone->large_entry_cache_newest, stop_idx = szone->large_entry_cache_oldest; | |
5649 | size_t best_size = SIZE_T_MAX; | |
5650 | ||
5651 | while (1) { // Scan large_entry_cache for best fit, starting with most recent entry | |
5652 | size_t this_size = szone->large_entry_cache[idx].size; | |
1f2f436a A |
5653 | addr = (void *)szone->large_entry_cache[idx].address; |
5654 | ||
5655 | if (0 == alignment || 0 == (((uintptr_t) addr) & (((uintptr_t) 1 << alignment) - 1))) { | |
5656 | if (size == this_size) { // size match! | |
5657 | best = idx; | |
5658 | best_size = this_size; | |
5659 | break; | |
5660 | } | |
34e8f829 | 5661 | |
1f2f436a A |
5662 | if (size <= this_size && this_size < best_size) { // improved fit? |
5663 | best = idx; | |
5664 | best_size = this_size; | |
5665 | } | |
34e8f829 A |
5666 | } |
5667 | ||
5668 | if (idx == stop_idx) // exhausted live ring? | |
5669 | break; | |
5670 | ||
5671 | if (idx) | |
5672 | idx--; // bump idx down | |
5673 | else | |
5674 | idx = LARGE_ENTRY_CACHE_SIZE - 1; // wrap idx | |
5675 | } | |
5676 | ||
5677 | if (best > -1 && (best_size - size) < size) { //limit fragmentation to 50% | |
5678 | addr = (void *)szone->large_entry_cache[best].address; | |
5679 | boolean_t was_madvised_reusable = szone->large_entry_cache[best].did_madvise_reusable; | |
5680 | ||
5681 | // Compact live ring to fill entry now vacated at large_entry_cache[best] | |
5682 | // while preserving time-order | |
5683 | if (szone->large_entry_cache_oldest < szone->large_entry_cache_newest) { | |
5684 | ||
5685 | // Ring hasn't wrapped. Fill in from right. | |
5686 | for (i = best; i < szone->large_entry_cache_newest; ++i) | |
5687 | szone->large_entry_cache[i] = szone->large_entry_cache[i + 1]; | |
5688 | ||
5689 | szone->large_entry_cache_newest--; // Pull in right endpoint. | |
5690 | ||
5691 | } else if (szone->large_entry_cache_newest < szone->large_entry_cache_oldest) { | |
5692 | ||
5693 | // Ring has wrapped. Arrange to fill in from the contiguous side. | |
5694 | if (best <= szone->large_entry_cache_newest) { | |
5695 | // Fill from right. | |
5696 | for (i = best; i < szone->large_entry_cache_newest; ++i) | |
5697 | szone->large_entry_cache[i] = szone->large_entry_cache[i + 1]; | |
5698 | ||
5699 | if (0 < szone->large_entry_cache_newest) | |
5700 | szone->large_entry_cache_newest--; | |
5701 | else | |
5702 | szone->large_entry_cache_newest = LARGE_ENTRY_CACHE_SIZE - 1; | |
5703 | } else { | |
5704 | // Fill from left. | |
5705 | for ( i = best; i > szone->large_entry_cache_oldest; --i) | |
5706 | szone->large_entry_cache[i] = szone->large_entry_cache[i - 1]; | |
5707 | ||
5708 | if (szone->large_entry_cache_oldest < LARGE_ENTRY_CACHE_SIZE - 1) | |
5709 | szone->large_entry_cache_oldest++; | |
5710 | else | |
5711 | szone->large_entry_cache_oldest = 0; | |
5712 | } | |
5713 | ||
5714 | } else { | |
5715 | // By trichotomy, large_entry_cache_newest == large_entry_cache_oldest. | |
5716 | // That implies best == large_entry_cache_newest == large_entry_cache_oldest | |
5717 | // and the ring is now empty. | |
5718 | szone->large_entry_cache[best].address = 0; | |
5719 | szone->large_entry_cache[best].size = 0; | |
5720 | szone->large_entry_cache[best].did_madvise_reusable = FALSE; | |
5721 | } | |
5722 | ||
5723 | if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) { | |
5724 | // density of hash table too high; grow table | |
5725 | // we do that under lock to avoid a race | |
5726 | large_entry_t *entries = large_entries_grow_no_lock(szone, &range_to_deallocate); | |
5727 | if (entries == NULL) { | |
5728 | SZONE_UNLOCK(szone); | |
5729 | return NULL; | |
5730 | } | |
5731 | } | |
5732 | ||
5733 | large_entry.address = (vm_address_t)addr; | |
5734 | large_entry.size = best_size; | |
5735 | large_entry.did_madvise_reusable = FALSE; | |
5736 | large_entry_insert_no_lock(szone, large_entry); | |
5737 | ||
5738 | szone->num_large_objects_in_use ++; | |
5739 | szone->num_bytes_in_large_objects += best_size; | |
5740 | if (!was_madvised_reusable) | |
1f2f436a A |
5741 | szone->large_entry_cache_reserve_bytes -= best_size; |
5742 | ||
5743 | szone->large_entry_cache_bytes -= best_size; | |
5744 | ||
5745 | if (szone->flotsam_enabled && szone->large_entry_cache_bytes < SZONE_FLOTSAM_THRESHOLD_LOW) { | |
5746 | szone->flotsam_enabled = FALSE; | |
5747 | } | |
5748 | ||
34e8f829 A |
5749 | SZONE_UNLOCK(szone); |
5750 | ||
5751 | if (range_to_deallocate.size) { | |
5752 | // we deallocate outside the lock | |
5753 | deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0); | |
5754 | } | |
5755 | ||
5756 | // Perform the madvise() outside the lock. | |
5757 | // Typically the madvise() is successful and we'll quickly return from this routine. | |
5758 | // In the unusual case of failure, reacquire the lock to unwind. | |
1f2f436a A |
5759 | #if TARGET_OS_EMBEDDED |
5760 | // Ok to do this madvise on embedded because we won't call MADV_FREE_REUSABLE on a large | |
5761 | // cache block twice without MADV_FREE_REUSE in between. | |
5762 | #endif | |
34e8f829 A |
5763 | if (was_madvised_reusable && -1 == madvise(addr, size, MADV_FREE_REUSE)) { |
5764 | /* -1 return: VM map entry change makes this unfit for reuse. */ | |
7ba935f9 | 5765 | #if DEBUG_MADVISE |
1f2f436a A |
5766 | szone_error(szone, 0, "large_malloc madvise(..., MADV_FREE_REUSE) failed", |
5767 | addr, "length=%d\n", size); | |
34e8f829 A |
5768 | #endif |
5769 | ||
5770 | SZONE_LOCK(szone); | |
5771 | szone->num_large_objects_in_use--; | |
5772 | szone->num_bytes_in_large_objects -= large_entry.size; | |
5773 | ||
5774 | // Re-acquire "entry" after interval just above where we let go the lock. | |
5775 | large_entry_t *entry = large_entry_for_pointer_no_lock(szone, addr); | |
5776 | if (NULL == entry) { | |
5777 | szone_error(szone, 1, "entry for pointer being discarded from death-row vanished", addr, NULL); | |
5778 | SZONE_UNLOCK(szone); | |
5779 | } else { | |
5780 | ||
5781 | range_to_deallocate = large_entry_free_no_lock(szone, entry); | |
5782 | SZONE_UNLOCK(szone); | |
5783 | ||
5784 | if (range_to_deallocate.size) { | |
5785 | // we deallocate outside the lock | |
5786 | deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0); | |
5787 | } | |
5788 | } | |
5789 | /* Fall through to allocate_pages() afresh. */ | |
5790 | } else { | |
5791 | if (cleared_requested) { | |
5792 | memset(addr, 0, size); | |
5793 | } | |
5794 | ||
5795 | return addr; | |
5796 | } | |
5797 | } else { | |
5798 | SZONE_UNLOCK(szone); | |
5799 | } | |
5800 | } | |
5801 | ||
5802 | range_to_deallocate.size = 0; | |
5803 | range_to_deallocate.address = 0; | |
5804 | #endif /* LARGE_CACHE */ | |
5805 | ||
5806 | addr = allocate_pages(szone, size, alignment, szone->debug_flags, VM_MEMORY_MALLOC_LARGE); | |
5807 | if (addr == NULL) { | |
5808 | return NULL; | |
5809 | } | |
5810 | ||
5811 | SZONE_LOCK(szone); | |
5812 | if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) { | |
5813 | // density of hash table too high; grow table | |
5814 | // we do that under lock to avoid a race | |
5815 | large_entry_t *entries = large_entries_grow_no_lock(szone, &range_to_deallocate); | |
5816 | if (entries == NULL) { | |
5817 | SZONE_UNLOCK(szone); | |
5818 | return NULL; | |
5819 | } | |
5820 | } | |
5821 | ||
5822 | large_entry.address = (vm_address_t)addr; | |
5823 | large_entry.size = size; | |
5824 | large_entry.did_madvise_reusable = FALSE; | |
5825 | large_entry_insert_no_lock(szone, large_entry); | |
5826 | ||
5827 | szone->num_large_objects_in_use ++; | |
5828 | szone->num_bytes_in_large_objects += size; | |
5829 | SZONE_UNLOCK(szone); | |
5830 | ||
5831 | if (range_to_deallocate.size) { | |
5832 | // we deallocate outside the lock | |
5833 | deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0); | |
5834 | } | |
5835 | return addr; | |
5836 | } | |
5837 | ||
5838 | static NOINLINE void | |
5839 | free_large(szone_t *szone, void *ptr) | |
5840 | { | |
5841 | // We have established ptr is page-aligned and neither tiny nor small | |
5842 | large_entry_t *entry; | |
5843 | vm_range_t vm_range_to_deallocate; | |
5844 | ||
5845 | SZONE_LOCK(szone); | |
5846 | entry = large_entry_for_pointer_no_lock(szone, ptr); | |
5847 | if (entry) { | |
5848 | #if LARGE_CACHE | |
5849 | #ifndef MADV_CAN_REUSE | |
5850 | #define MADV_CAN_REUSE 9 /* per Francois, for testing until xnu is resubmitted to B&I */ | |
5851 | #endif | |
5852 | if (entry->size < LARGE_CACHE_SIZE_ENTRY_LIMIT && | |
5853 | -1 != madvise((void *)(entry->address), entry->size, MADV_CAN_REUSE)) { // Put the large_entry_t on the death-row cache? | |
5854 | int idx = szone->large_entry_cache_newest, stop_idx = szone->large_entry_cache_oldest; | |
5855 | large_entry_t this_entry = *entry; // Make a local copy, "entry" is volatile when lock is let go. | |
5856 | boolean_t reusable = TRUE; | |
1f2f436a | 5857 | boolean_t should_madvise = szone->large_entry_cache_reserve_bytes + this_entry.size > szone->large_entry_cache_reserve_limit; |
34e8f829 A |
5858 | |
5859 | // Already freed? | |
5860 | // [Note that repeated entries in death-row risk vending the same entry subsequently | |
5861 | // to two different malloc() calls. By checking here the (illegal) double free | |
5862 | // is accommodated, matching the behavior of the previous implementation.] | |
5863 | while (1) { // Scan large_entry_cache starting with most recent entry | |
5864 | if (szone->large_entry_cache[idx].address == entry->address) { | |
5865 | szone_error(szone, 1, "pointer being freed already on death-row", ptr, NULL); | |
5866 | SZONE_UNLOCK(szone); | |
5867 | return; | |
5868 | } | |
5869 | ||
5870 | if (idx == stop_idx) // exhausted live ring? | |
5871 | break; | |
5872 | ||
5873 | if (idx) | |
5874 | idx--; // bump idx down | |
5875 | else | |
5876 | idx = LARGE_ENTRY_CACHE_SIZE - 1; // wrap idx | |
5877 | } | |
5878 | ||
5879 | SZONE_UNLOCK(szone); | |
5880 | ||
5881 | if (szone->debug_flags & SCALABLE_MALLOC_PURGEABLE) { // Are we a purgable zone? | |
5882 | int state = VM_PURGABLE_NONVOLATILE; // restore to default condition | |
5883 | ||
5884 | if (KERN_SUCCESS != vm_purgable_control(mach_task_self(), this_entry.address, VM_PURGABLE_SET_STATE, &state)) { | |
1f2f436a A |
5885 | malloc_printf("*** can't vm_purgable_control(..., VM_PURGABLE_SET_STATE) for large freed block at %p\n", |
5886 | this_entry.address); | |
34e8f829 A |
5887 | reusable = FALSE; |
5888 | } | |
5889 | } | |
5890 | ||
5891 | if (szone->large_legacy_reset_mprotect) { // Linked for Leopard? | |
5892 | // Accomodate Leopard apps that (illegally) mprotect() their own guard pages on large malloc'd allocations | |
1f2f436a | 5893 | int err = mprotect((void *)(this_entry.address), this_entry.size, PROT_READ | PROT_WRITE); |
34e8f829 A |
5894 | if (err) { |
5895 | malloc_printf("*** can't reset protection for large freed block at %p\n", this_entry.address); | |
5896 | reusable = FALSE; | |
5897 | } | |
5898 | } | |
5899 | ||
1f2f436a | 5900 | // madvise(..., MADV_REUSABLE) death-row arrivals if hoarding would exceed large_entry_cache_reserve_limit |
34e8f829 A |
5901 | if (should_madvise) { |
5902 | // Issue madvise to avoid paging out the dirtied free()'d pages in "entry" | |
5903 | MAGMALLOC_MADVFREEREGION((void *)szone, (void *)0, (void *)(this_entry.address), this_entry.size); // DTrace USDT Probe | |
5904 | ||
1f2f436a A |
5905 | #if TARGET_OS_EMBEDDED |
5906 | // Ok to do this madvise on embedded because we won't call MADV_FREE_REUSABLE on a large | |
5907 | // cache block twice without MADV_FREE_REUSE in between. | |
5908 | #endif | |
34e8f829 A |
5909 | if (-1 == madvise((void *)(this_entry.address), this_entry.size, MADV_FREE_REUSABLE)) { |
5910 | /* -1 return: VM map entry change makes this unfit for reuse. */ | |
7ba935f9 | 5911 | #if DEBUG_MADVISE |
1f2f436a A |
5912 | szone_error(szone, 0, "free_large madvise(..., MADV_FREE_REUSABLE) failed", |
5913 | (void *)this_entry.address, "length=%d\n", this_entry.size); | |
34e8f829 A |
5914 | #endif |
5915 | reusable = FALSE; | |
5916 | } | |
5917 | } | |
5918 | ||
5919 | SZONE_LOCK(szone); | |
5920 | ||
5921 | // Re-acquire "entry" after interval just above where we let go the lock. | |
5922 | entry = large_entry_for_pointer_no_lock(szone, ptr); | |
5923 | if (NULL == entry) { | |
5924 | szone_error(szone, 1, "entry for pointer being freed from death-row vanished", ptr, NULL); | |
5925 | SZONE_UNLOCK(szone); | |
5926 | return; | |
5927 | } | |
5928 | ||
5929 | // Add "entry" to death-row ring | |
5930 | if (reusable) { | |
5931 | int idx = szone->large_entry_cache_newest; // Most recently occupied | |
5932 | vm_address_t addr; | |
5933 | size_t adjsize; | |
5934 | ||
5935 | if (szone->large_entry_cache_newest == szone->large_entry_cache_oldest && | |
5936 | 0 == szone->large_entry_cache[idx].address) { | |
5937 | // Ring is empty, idx is good as it stands | |
5938 | addr = 0; | |
5939 | adjsize = 0; | |
5940 | } else { | |
5941 | // Extend the queue to the "right" by bumping up large_entry_cache_newest | |
5942 | if (idx == LARGE_ENTRY_CACHE_SIZE - 1) | |
5943 | idx = 0; // Wrap index | |
5944 | else | |
5945 | idx++; // Bump index | |
5946 | ||
5947 | if (idx == szone->large_entry_cache_oldest) { // Fully occupied | |
5948 | // Drop this entry from the cache and deallocate the VM | |
5949 | addr = szone->large_entry_cache[idx].address; | |
5950 | adjsize = szone->large_entry_cache[idx].size; | |
1f2f436a | 5951 | szone->large_entry_cache_bytes -= adjsize; |
34e8f829 | 5952 | if (!szone->large_entry_cache[idx].did_madvise_reusable) |
1f2f436a | 5953 | szone->large_entry_cache_reserve_bytes -= adjsize; |
34e8f829 A |
5954 | } else { |
5955 | // Using an unoccupied cache slot | |
5956 | addr = 0; | |
5957 | adjsize = 0; | |
5958 | } | |
5959 | } | |
5960 | ||
5961 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE)) | |
5962 | memset((void *)(entry->address), 0x55, entry->size); | |
5963 | ||
5964 | entry->did_madvise_reusable = should_madvise; // Was madvise()'d above? | |
5965 | if (!should_madvise) // Entered on death-row without madvise() => up the hoard total | |
1f2f436a | 5966 | szone->large_entry_cache_reserve_bytes += entry->size; |
34e8f829 | 5967 | |
1f2f436a A |
5968 | szone->large_entry_cache_bytes += entry->size; |
5969 | ||
5970 | if (!szone->flotsam_enabled && szone->large_entry_cache_bytes > SZONE_FLOTSAM_THRESHOLD_HIGH) { | |
5971 | szone->flotsam_enabled = TRUE; | |
5972 | } | |
5973 | ||
34e8f829 A |
5974 | szone->large_entry_cache[idx] = *entry; |
5975 | szone->large_entry_cache_newest = idx; | |
5976 | ||
5977 | szone->num_large_objects_in_use--; | |
5978 | szone->num_bytes_in_large_objects -= entry->size; | |
5979 | ||
5980 | (void)large_entry_free_no_lock(szone, entry); | |
5981 | ||
5982 | if (0 == addr) { | |
5983 | SZONE_UNLOCK(szone); | |
5984 | return; | |
5985 | } | |
5986 | ||
5987 | // Fall through to drop large_entry_cache_oldest from the cache, | |
5988 | // and then deallocate its pages. | |
5989 | ||
5990 | // Trim the queue on the "left" by bumping up large_entry_cache_oldest | |
5991 | if (szone->large_entry_cache_oldest == LARGE_ENTRY_CACHE_SIZE - 1) | |
5992 | szone->large_entry_cache_oldest = 0; | |
5993 | else | |
5994 | szone->large_entry_cache_oldest++; | |
5995 | ||
5996 | // we deallocate_pages, including guard pages, outside the lock | |
5997 | SZONE_UNLOCK(szone); | |
5998 | deallocate_pages(szone, (void *)addr, (size_t)adjsize, 0); | |
5999 | return; | |
6000 | } else { | |
6001 | /* fall through to discard an allocation that is not reusable */ | |
6002 | } | |
6003 | } | |
6004 | #endif /* LARGE_CACHE */ | |
6005 | ||
6006 | szone->num_large_objects_in_use--; | |
6007 | szone->num_bytes_in_large_objects -= entry->size; | |
6008 | ||
6009 | vm_range_to_deallocate = large_entry_free_no_lock(szone, entry); | |
6010 | } else { | |
6011 | #if DEBUG_MALLOC | |
6012 | large_debug_print(szone); | |
6013 | #endif | |
6014 | szone_error(szone, 1, "pointer being freed was not allocated", ptr, NULL); | |
6015 | SZONE_UNLOCK(szone); | |
6016 | return; | |
6017 | } | |
6018 | SZONE_UNLOCK(szone); // we release the lock asap | |
6019 | CHECK(szone, __PRETTY_FUNCTION__); | |
6020 | ||
6021 | // we deallocate_pages, including guard pages, outside the lock | |
6022 | if (vm_range_to_deallocate.address) { | |
6023 | #if DEBUG_MALLOC | |
6024 | // FIXME: large_entry_for_pointer_no_lock() needs the lock held ... | |
6025 | if (large_entry_for_pointer_no_lock(szone, (void *)vm_range_to_deallocate.address)) { | |
6026 | malloc_printf("*** invariant broken: %p still in use num_large_entries=%d\n", | |
6027 | vm_range_to_deallocate.address, szone->num_large_entries); | |
6028 | large_debug_print(szone); | |
6029 | szone_sleep(); | |
6030 | } | |
6031 | #endif | |
6032 | deallocate_pages(szone, (void *)vm_range_to_deallocate.address, (size_t)vm_range_to_deallocate.size, 0); | |
6033 | } | |
6034 | } | |
6035 | ||
1f2f436a A |
6036 | static INLINE void * |
6037 | large_try_shrink_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_good_size) | |
6038 | { | |
6039 | size_t shrinkage = old_size - new_good_size; | |
6040 | ||
6041 | if (shrinkage) { | |
6042 | SZONE_LOCK(szone); | |
6043 | /* contract existing large entry */ | |
6044 | large_entry_t *large_entry = large_entry_for_pointer_no_lock(szone, ptr); | |
6045 | if (!large_entry) { | |
6046 | szone_error(szone, 1, "large entry reallocated is not properly in table", ptr, NULL); | |
6047 | SZONE_UNLOCK(szone); | |
6048 | return ptr; | |
6049 | } | |
6050 | ||
6051 | large_entry->address = (vm_address_t)ptr; | |
6052 | large_entry->size = new_good_size; | |
6053 | szone->num_bytes_in_large_objects -= shrinkage; | |
6054 | SZONE_UNLOCK(szone); // we release the lock asap | |
6055 | ||
6056 | deallocate_pages(szone, (void *)((uintptr_t)ptr + new_good_size), shrinkage, 0); | |
6057 | } | |
6058 | return ptr; | |
6059 | } | |
6060 | ||
34e8f829 A |
6061 | static INLINE int |
6062 | large_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) | |
6063 | { | |
6064 | vm_address_t addr = (vm_address_t)ptr + old_size; | |
6065 | large_entry_t *large_entry; | |
6066 | kern_return_t err; | |
6067 | ||
6068 | SZONE_LOCK(szone); | |
6069 | large_entry = large_entry_for_pointer_no_lock(szone, (void *)addr); | |
6070 | SZONE_UNLOCK(szone); | |
6071 | ||
6072 | if (large_entry) { // check if "addr = ptr + old_size" is already spoken for | |
6073 | return 0; // large pointer already exists in table - extension is not going to work | |
6074 | } | |
6075 | ||
6076 | new_size = round_page(new_size); | |
6077 | /* | |
6078 | * Ask for allocation at a specific address, and mark as realloc | |
6079 | * to request coalescing with previous realloc'ed extensions. | |
6080 | */ | |
6081 | err = vm_allocate(mach_task_self(), &addr, new_size - old_size, VM_MAKE_TAG(VM_MEMORY_REALLOC)); | |
6082 | if (err != KERN_SUCCESS) { | |
6083 | return 0; | |
6084 | } | |
6085 | ||
6086 | SZONE_LOCK(szone); | |
6087 | /* extend existing large entry */ | |
6088 | large_entry = large_entry_for_pointer_no_lock(szone, ptr); | |
6089 | if (!large_entry) { | |
6090 | szone_error(szone, 1, "large entry reallocated is not properly in table", ptr, NULL); | |
6091 | SZONE_UNLOCK(szone); | |
6092 | return 0; // Bail, leaking "addr" | |
6093 | } | |
6094 | ||
6095 | large_entry->address = (vm_address_t)ptr; | |
6096 | large_entry->size = new_size; | |
6097 | szone->num_bytes_in_large_objects += new_size - old_size; | |
6098 | SZONE_UNLOCK(szone); // we release the lock asap | |
6099 | ||
6100 | return 1; | |
6101 | } | |
6102 | ||
6103 | /********************* Zone call backs ************************/ | |
6104 | /* | |
6105 | * Mark these NOINLINE to avoid bloating the purgeable zone call backs | |
6106 | */ | |
6107 | static NOINLINE void | |
6108 | szone_free(szone_t *szone, void *ptr) | |
6109 | { | |
6110 | region_t tiny_region; | |
6111 | region_t small_region; | |
6112 | ||
6113 | #if DEBUG_MALLOC | |
6114 | if (LOG(szone, ptr)) | |
6115 | malloc_printf("in szone_free with %p\n", ptr); | |
6116 | #endif | |
6117 | if (!ptr) | |
6118 | return; | |
6119 | /* | |
6120 | * Try to free to a tiny region. | |
6121 | */ | |
6122 | if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) { | |
6123 | szone_error(szone, 1, "Non-aligned pointer being freed", ptr, NULL); | |
6124 | return; | |
6125 | } | |
6126 | if ((tiny_region = tiny_region_for_ptr_no_lock(szone, ptr)) != NULL) { | |
6127 | if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) { | |
6128 | szone_error(szone, 1, "Pointer to metadata being freed", ptr, NULL); | |
6129 | return; | |
6130 | } | |
6131 | free_tiny(szone, ptr, tiny_region, 0); | |
6132 | return; | |
6133 | } | |
6134 | ||
6135 | /* | |
6136 | * Try to free to a small region. | |
6137 | */ | |
6138 | if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) { | |
6139 | szone_error(szone, 1, "Non-aligned pointer being freed (2)", ptr, NULL); | |
6140 | return; | |
6141 | } | |
6142 | if ((small_region = small_region_for_ptr_no_lock(szone, ptr)) != NULL) { | |
6143 | if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) { | |
6144 | szone_error(szone, 1, "Pointer to metadata being freed (2)", ptr, NULL); | |
6145 | return; | |
6146 | } | |
6147 | free_small(szone, ptr, small_region, 0); | |
6148 | return; | |
6149 | } | |
6150 | ||
6151 | /* check that it's a legal large allocation */ | |
6152 | if ((uintptr_t)ptr & (vm_page_size - 1)) { | |
6153 | szone_error(szone, 1, "non-page-aligned, non-allocated pointer being freed", ptr, NULL); | |
6154 | return; | |
6155 | } | |
6156 | free_large(szone, ptr); | |
6157 | } | |
6158 | ||
6159 | static NOINLINE void | |
6160 | szone_free_definite_size(szone_t *szone, void *ptr, size_t size) | |
6161 | { | |
6162 | #if DEBUG_MALLOC | |
6163 | if (LOG(szone, ptr)) | |
6164 | malloc_printf("in szone_free_definite_size with %p\n", ptr); | |
6165 | ||
6166 | if (0 == size) { | |
6167 | szone_error(szone, 1, "pointer of size zero being freed", ptr, NULL); | |
6168 | return; | |
6169 | } | |
6170 | ||
6171 | #endif | |
6172 | if (!ptr) | |
6173 | return; | |
6174 | ||
6175 | /* | |
6176 | * Try to free to a tiny region. | |
6177 | */ | |
6178 | if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) { | |
6179 | szone_error(szone, 1, "Non-aligned pointer being freed", ptr, NULL); | |
6180 | return; | |
6181 | } | |
6182 | if (size <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) { | |
6183 | if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) { | |
6184 | szone_error(szone, 1, "Pointer to metadata being freed", ptr, NULL); | |
6185 | return; | |
6186 | } | |
6187 | free_tiny(szone, ptr, TINY_REGION_FOR_PTR(ptr), size); | |
6188 | return; | |
6189 | } | |
6190 | ||
6191 | /* | |
6192 | * Try to free to a small region. | |
6193 | */ | |
6194 | if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) { | |
6195 | szone_error(szone, 1, "Non-aligned pointer being freed (2)", ptr, NULL); | |
6196 | return; | |
6197 | } | |
1f2f436a | 6198 | if (size <= szone->large_threshold) { |
34e8f829 A |
6199 | if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) { |
6200 | szone_error(szone, 1, "Pointer to metadata being freed (2)", ptr, NULL); | |
6201 | return; | |
6202 | } | |
6203 | free_small(szone, ptr, SMALL_REGION_FOR_PTR(ptr), size); | |
6204 | return; | |
6205 | } | |
6206 | ||
6207 | /* check that it's a legal large allocation */ | |
6208 | if ((uintptr_t)ptr & (vm_page_size - 1)) { | |
6209 | szone_error(szone, 1, "non-page-aligned, non-allocated pointer being freed", ptr, NULL); | |
6210 | return; | |
6211 | } | |
6212 | free_large(szone, ptr); | |
6213 | } | |
6214 | ||
6215 | static NOINLINE void * | |
6216 | szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested) | |
6217 | { | |
6218 | void *ptr; | |
6219 | msize_t msize; | |
6220 | ||
6221 | if (size <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) { | |
6222 | // think tiny | |
6223 | msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); | |
6224 | if (!msize) | |
6225 | msize = 1; | |
6226 | ptr = tiny_malloc_should_clear(szone, msize, cleared_requested); | |
1f2f436a | 6227 | } else if (size <= szone->large_threshold) { |
34e8f829 A |
6228 | // think small |
6229 | msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); | |
1f2f436a | 6230 | if (!msize) |
34e8f829 A |
6231 | msize = 1; |
6232 | ptr = small_malloc_should_clear(szone, msize, cleared_requested); | |
6233 | } else { | |
6234 | // large | |
6235 | size_t num_pages = round_page(size) >> vm_page_shift; | |
6236 | if (num_pages == 0) /* Overflowed */ | |
6237 | ptr = 0; | |
6238 | else | |
6239 | ptr = large_malloc(szone, num_pages, 0, cleared_requested); | |
6240 | } | |
6241 | #if DEBUG_MALLOC | |
6242 | if (LOG(szone, ptr)) | |
6243 | malloc_printf("szone_malloc returned %p\n", ptr); | |
6244 | #endif | |
6245 | /* | |
6246 | * If requested, scribble on allocated memory. | |
6247 | */ | |
6248 | if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && ptr && !cleared_requested && size) | |
6249 | memset(ptr, 0xaa, size); | |
6250 | ||
6251 | return ptr; | |
6252 | } | |
6253 | ||
6254 | static NOINLINE void * | |
6255 | szone_malloc(szone_t *szone, size_t size) { | |
6256 | return szone_malloc_should_clear(szone, size, 0); | |
6257 | } | |
6258 | ||
6259 | static NOINLINE void * | |
6260 | szone_calloc(szone_t *szone, size_t num_items, size_t size) | |
6261 | { | |
6262 | size_t total_bytes = num_items * size; | |
6263 | ||
6264 | // Check for overflow of integer multiplication | |
6265 | if (num_items > 1) { | |
6266 | #if __LP64__ /* size_t is uint64_t */ | |
6267 | if ((num_items | size) & 0xffffffff00000000ul) { | |
6268 | // num_items or size equals or exceeds sqrt(2^64) == 2^32, appeal to wider arithmetic | |
6269 | __uint128_t product = ((__uint128_t)num_items) * ((__uint128_t)size); | |
6270 | if ((uint64_t)(product >> 64)) // compiles to test on upper register of register pair | |
6271 | return NULL; | |
6272 | } | |
6273 | #else /* size_t is uint32_t */ | |
6274 | if ((num_items | size) & 0xffff0000ul) { | |
6275 | // num_items or size equals or exceeds sqrt(2^32) == 2^16, appeal to wider arithmetic | |
6276 | uint64_t product = ((uint64_t)num_items) * ((uint64_t)size); | |
6277 | if ((uint32_t)(product >> 32)) // compiles to test on upper register of register pair | |
6278 | return NULL; | |
6279 | } | |
6280 | #endif | |
6281 | } | |
6282 | ||
6283 | return szone_malloc_should_clear(szone, total_bytes, 1); | |
6284 | } | |
6285 | ||
6286 | static NOINLINE void * | |
6287 | szone_valloc(szone_t *szone, size_t size) | |
6288 | { | |
6289 | void *ptr; | |
6290 | ||
6291 | if (size <= szone->large_threshold) { | |
6292 | ptr = szone_memalign(szone, vm_page_size, size); | |
6293 | } else { | |
6294 | size_t num_pages; | |
6295 | ||
6296 | num_pages = round_page(size) >> vm_page_shift; | |
6297 | ptr = large_malloc(szone, num_pages, 0, 0); | |
6298 | } | |
6299 | ||
6300 | #if DEBUG_MALLOC | |
6301 | if (LOG(szone, ptr)) | |
6302 | malloc_printf("szone_valloc returned %p\n", ptr); | |
6303 | #endif | |
6304 | return ptr; | |
6305 | } | |
6306 | ||
6307 | /* Isolate PIC-base load (for __is_threaded) here. */ | |
6308 | static NOINLINE size_t | |
6309 | szone_size_try_large(szone_t *szone, const void *ptr) | |
6310 | { | |
6311 | size_t size = 0; | |
6312 | large_entry_t *entry; | |
6313 | ||
6314 | SZONE_LOCK(szone); | |
6315 | entry = large_entry_for_pointer_no_lock(szone, ptr); | |
6316 | if (entry) { | |
6317 | size = entry->size; | |
6318 | } | |
6319 | SZONE_UNLOCK(szone); | |
6320 | #if DEBUG_MALLOC | |
6321 | if (LOG(szone, ptr)) { | |
6322 | malloc_printf("szone_size for %p returned %d\n", ptr, (unsigned)size); | |
6323 | } | |
6324 | #endif | |
6325 | return size; | |
6326 | } | |
6327 | ||
6328 | static NOINLINE size_t | |
6329 | szone_size(szone_t *szone, const void *ptr) | |
6330 | { | |
6331 | boolean_t is_free; | |
6332 | msize_t msize, msize_and_free; | |
6333 | ||
6334 | if (!ptr) | |
6335 | return 0; | |
6336 | #if DEBUG_MALLOC | |
6337 | if (LOG(szone, ptr)) { | |
6338 | malloc_printf("in szone_size for %p (szone=%p)\n", ptr, szone); | |
6339 | } | |
6340 | #endif | |
6341 | ||
6342 | /* | |
6343 | * Look for it in a tiny region. | |
6344 | */ | |
6345 | if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) | |
6346 | return 0; | |
6347 | if (tiny_region_for_ptr_no_lock(szone, ptr)) { | |
6348 | if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) | |
6349 | return 0; | |
6350 | msize = get_tiny_meta_header(ptr, &is_free); | |
6351 | if (is_free) | |
6352 | return 0; | |
6353 | #if TINY_CACHE | |
6354 | { | |
6355 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)); | |
1f2f436a A |
6356 | if (DEPOT_MAGAZINE_INDEX != mag_index) { |
6357 | magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]); | |
34e8f829 | 6358 | |
1f2f436a A |
6359 | if (msize < TINY_QUANTUM && ptr == (void *)((uintptr_t)(tiny_mag_ptr->mag_last_free) & ~ (TINY_QUANTUM - 1))) |
6360 | return 0; | |
6361 | } else { | |
6362 | for (mag_index = 0; mag_index < szone->num_tiny_magazines; mag_index++) { | |
6363 | magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]); | |
6364 | ||
6365 | if (msize < TINY_QUANTUM && ptr == (void *)((uintptr_t)(tiny_mag_ptr->mag_last_free) & ~ (TINY_QUANTUM - 1))) | |
6366 | return 0; | |
6367 | } | |
6368 | } | |
34e8f829 A |
6369 | } |
6370 | #endif | |
6371 | return TINY_BYTES_FOR_MSIZE(msize); | |
6372 | } | |
6373 | ||
6374 | /* | |
6375 | * Look for it in a small region. | |
6376 | */ | |
6377 | if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) | |
6378 | return 0; | |
6379 | if (small_region_for_ptr_no_lock(szone, ptr)) { | |
6380 | if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) | |
6381 | return 0; | |
6382 | msize_and_free = *SMALL_METADATA_FOR_PTR(ptr); | |
6383 | if (msize_and_free & SMALL_IS_FREE) | |
6384 | return 0; | |
6385 | #if SMALL_CACHE | |
6386 | { | |
6387 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)); | |
1f2f436a A |
6388 | if (DEPOT_MAGAZINE_INDEX != mag_index) { |
6389 | magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]); | |
34e8f829 | 6390 | |
1f2f436a A |
6391 | if (ptr == (void *)((uintptr_t)(small_mag_ptr->mag_last_free) & ~ (SMALL_QUANTUM - 1))) |
6392 | return 0; | |
6393 | } else { | |
6394 | for (mag_index = 0; mag_index < szone->num_small_magazines; mag_index++) { | |
6395 | magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]); | |
6396 | ||
6397 | if (ptr == (void *)((uintptr_t)(small_mag_ptr->mag_last_free) & ~ (SMALL_QUANTUM - 1))) | |
6398 | return 0; | |
6399 | } | |
6400 | } | |
34e8f829 A |
6401 | } |
6402 | #endif | |
6403 | return SMALL_BYTES_FOR_MSIZE(msize_and_free); | |
6404 | } | |
6405 | ||
6406 | /* | |
6407 | * If not page-aligned, it cannot have come from a large allocation. | |
6408 | */ | |
6409 | if ((uintptr_t)ptr & (vm_page_size - 1)) | |
6410 | return 0; | |
6411 | ||
6412 | /* | |
6413 | * Look for it in a large entry. | |
6414 | */ | |
6415 | return szone_size_try_large(szone, ptr); | |
6416 | } | |
6417 | ||
6418 | static NOINLINE void * | |
6419 | szone_realloc(szone_t *szone, void *ptr, size_t new_size) | |
6420 | { | |
1f2f436a | 6421 | size_t old_size, new_good_size, valid_size; |
34e8f829 A |
6422 | void *new_ptr; |
6423 | ||
6424 | #if DEBUG_MALLOC | |
6425 | if (LOG(szone, ptr)) { | |
6426 | malloc_printf("in szone_realloc for %p, %d\n", ptr, (unsigned)new_size); | |
6427 | } | |
6428 | #endif | |
1f2f436a A |
6429 | if (NULL == ptr) { |
6430 | // If ptr is a null pointer, realloc() shall be equivalent to malloc() for the specified size. | |
6431 | return szone_malloc(szone, new_size); | |
6432 | } else if (0 == new_size) { | |
6433 | // If size is 0 and ptr is not a null pointer, the object pointed to is freed. | |
6434 | szone_free(szone, ptr); | |
6435 | // If size is 0, either a null pointer or a unique pointer that can be successfully passed | |
6436 | // to free() shall be returned. | |
6437 | return szone_malloc(szone, 1); | |
34e8f829 | 6438 | } |
1f2f436a | 6439 | |
34e8f829 A |
6440 | old_size = szone_size(szone, ptr); |
6441 | if (!old_size) { | |
6442 | szone_error(szone, 1, "pointer being reallocated was not allocated", ptr, NULL); | |
6443 | return NULL; | |
6444 | } | |
1f2f436a A |
6445 | |
6446 | new_good_size = szone_good_size(szone, new_size); | |
6447 | if (new_good_size == old_size) { // Existing allocation is best fit evar? | |
34e8f829 | 6448 | return ptr; |
1f2f436a | 6449 | } |
34e8f829 A |
6450 | |
6451 | /* | |
6452 | * If the new size suits the tiny allocator and the pointer being resized | |
6453 | * belongs to a tiny region, try to reallocate in-place. | |
6454 | */ | |
1f2f436a A |
6455 | if (new_good_size <= (NUM_TINY_SLOTS - 1) * TINY_QUANTUM) { |
6456 | if (old_size <= (NUM_TINY_SLOTS - 1) * TINY_QUANTUM) { | |
6457 | if (new_good_size <= (old_size >> 1)) { | |
6458 | /* | |
6459 | * Serious shrinkage (more than half). free() the excess. | |
6460 | */ | |
6461 | return tiny_try_shrink_in_place(szone, ptr, old_size, new_good_size); | |
6462 | } else if (new_good_size <= old_size) { | |
6463 | /* | |
6464 | * new_good_size smaller than old_size but not by much (less than half). | |
6465 | * Avoid thrashing at the expense of some wasted storage. | |
6466 | */ | |
6467 | return ptr; | |
6468 | } else if (tiny_try_realloc_in_place(szone, ptr, old_size, new_good_size)) { // try to grow the allocation | |
34e8f829 A |
6469 | return ptr; |
6470 | } | |
6471 | } | |
6472 | ||
6473 | /* | |
1f2f436a | 6474 | * Else if the new size suits the small allocator and the pointer being resized |
34e8f829 A |
6475 | * belongs to a small region, and we're not protecting the small allocations |
6476 | * try to reallocate in-place. | |
6477 | */ | |
1f2f436a A |
6478 | } else if (new_good_size <= szone->large_threshold) { |
6479 | if ((NUM_TINY_SLOTS - 1) * TINY_QUANTUM < old_size && old_size <= szone->large_threshold) { | |
6480 | if (new_good_size <= (old_size >> 1)) { | |
6481 | return small_try_shrink_in_place(szone, ptr, old_size, new_good_size); | |
6482 | } else if (new_good_size <= old_size) { | |
6483 | return ptr; | |
6484 | } else if (small_try_realloc_in_place(szone, ptr, old_size, new_good_size)) { | |
34e8f829 A |
6485 | return ptr; |
6486 | } | |
1f2f436a | 6487 | } |
34e8f829 | 6488 | /* |
1f2f436a | 6489 | * Else if the allocation's a large allocation, try to reallocate in-place there. |
34e8f829 | 6490 | */ |
1f2f436a A |
6491 | } else if (!(szone->debug_flags & SCALABLE_MALLOC_PURGEABLE) && // purgeable needs fresh allocation |
6492 | (old_size > szone->large_threshold) && | |
6493 | (new_good_size > szone->large_threshold)) { | |
6494 | if (new_good_size <= (old_size >> 1)) { | |
6495 | return large_try_shrink_in_place(szone, ptr, old_size, new_good_size); | |
6496 | } else if (new_good_size <= old_size) { | |
6497 | return ptr; | |
6498 | } else if (large_try_realloc_in_place(szone, ptr, old_size, new_good_size)) { | |
34e8f829 A |
6499 | return ptr; |
6500 | } | |
6501 | } | |
6502 | ||
6503 | /* | |
6504 | * Can't reallocate in place for whatever reason; allocate a new buffer and copy. | |
6505 | */ | |
1f2f436a A |
6506 | if (new_good_size <= (old_size >> 1)) { |
6507 | /* Serious shrinkage (more than half). FALL THROUGH to alloc/copy/free. */ | |
6508 | } else if (new_good_size <= old_size) { | |
6509 | return ptr; | |
6510 | } | |
6511 | ||
34e8f829 A |
6512 | new_ptr = szone_malloc(szone, new_size); |
6513 | if (new_ptr == NULL) | |
6514 | return NULL; | |
6515 | ||
6516 | /* | |
6517 | * If the allocation's large enough, try to copy using VM. If that fails, or | |
6518 | * if it's too small, just copy by hand. | |
6519 | */ | |
1f2f436a A |
6520 | valid_size = MIN(old_size, new_size); |
6521 | if ((valid_size < szone->vm_copy_threshold) || | |
6522 | vm_copy(mach_task_self(), (vm_address_t)ptr, valid_size, (vm_address_t)new_ptr)) | |
6523 | memcpy(new_ptr, ptr, valid_size); | |
34e8f829 A |
6524 | szone_free(szone, ptr); |
6525 | ||
6526 | #if DEBUG_MALLOC | |
6527 | if (LOG(szone, ptr)) { | |
6528 | malloc_printf("szone_realloc returned %p for %d\n", new_ptr, (unsigned)new_size); | |
6529 | } | |
6530 | #endif | |
6531 | return new_ptr; | |
6532 | } | |
6533 | ||
6534 | static NOINLINE void * | |
6535 | szone_memalign(szone_t *szone, size_t alignment, size_t size) | |
6536 | { | |
6537 | if ((size + alignment) < size) // size_t arithmetic wrapped! | |
6538 | return NULL; | |
6539 | ||
6540 | // alignment is gauranteed a power of 2 at least as large as sizeof(void *), hence non-zero. | |
6541 | // Since size + alignment didn't wrap, 0 <= size + alignment - 1 < size + alignment | |
6542 | size_t span = size + alignment - 1; | |
6543 | ||
6544 | if (alignment <= TINY_QUANTUM) { | |
6545 | return szone_malloc(szone, size); // Trivially satisfied by tiny, small, or large | |
6546 | ||
6547 | } else if (span <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) { | |
6548 | msize_t mspan = TINY_MSIZE_FOR_BYTES(span + TINY_QUANTUM - 1); | |
6549 | void *p = szone_malloc(szone, span); // avoids inlining tiny_malloc_should_clear(szone, mspan, 0); | |
6550 | ||
6551 | if (NULL == p) | |
6552 | return NULL; | |
6553 | ||
6554 | size_t offset = ((uintptr_t) p) & (alignment - 1); // p % alignment | |
6555 | size_t pad = (0 == offset) ? 0 : alignment - offset; // p + pad achieves desired alignment | |
6556 | ||
6557 | msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); | |
6558 | msize_t mpad = TINY_MSIZE_FOR_BYTES(pad + TINY_QUANTUM - 1); | |
6559 | msize_t mwaste = mspan - msize - mpad; // excess blocks | |
6560 | ||
6561 | if (mpad > 0) { | |
6562 | void *q = (void *)(((uintptr_t) p) + pad); | |
6563 | ||
6564 | // Mark q as a block header and in-use, thus creating two blocks. | |
6565 | magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines, | |
6566 | REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)), | |
6567 | MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p))); | |
6568 | set_tiny_meta_header_in_use(q, msize); | |
7ba935f9 | 6569 | tiny_mag_ptr->mag_num_objects++; |
34e8f829 A |
6570 | |
6571 | // set_tiny_meta_header_in_use() "reaffirms" the block_header on the *following* block, so | |
6572 | // now set its in_use bit as well. But only if its within the original allocation made above. | |
6573 | if (mwaste > 0) | |
6574 | BITARRAY_SET(TINY_INUSE_FOR_HEADER(TINY_BLOCK_HEADER_FOR_PTR(q)), TINY_INDEX_FOR_PTR(q) + msize); | |
6575 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
6576 | ||
6577 | // Give up mpad blocks beginning at p to the tiny free list | |
6578 | // region_t r = TINY_REGION_FOR_PTR(p); | |
6579 | szone_free(szone, p); // avoids inlining free_tiny(szone, p, &r); | |
6580 | ||
6581 | p = q; // advance p to the desired alignment | |
6582 | } | |
6583 | ||
6584 | if (mwaste > 0) { | |
6585 | void *q = (void *)(((uintptr_t) p) + TINY_BYTES_FOR_MSIZE(msize)); | |
6586 | // Mark q as block header and in-use, thus creating two blocks. | |
6587 | magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines, | |
6588 | REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)), | |
6589 | MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p))); | |
6590 | set_tiny_meta_header_in_use(q, mwaste); | |
7ba935f9 | 6591 | tiny_mag_ptr->mag_num_objects++; |
34e8f829 A |
6592 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); |
6593 | ||
6594 | // Give up mwaste blocks beginning at q to the tiny free list | |
6595 | // region_t r = TINY_REGION_FOR_PTR(q); | |
6596 | szone_free(szone, q); // avoids inlining free_tiny(szone, q, &r); | |
6597 | } | |
6598 | ||
6599 | return p; // p has the desired size and alignment, and can later be free()'d | |
6600 | ||
6601 | } else if ((NUM_TINY_SLOTS - 1)*TINY_QUANTUM < size && alignment <= SMALL_QUANTUM) { | |
6602 | return szone_malloc(szone, size); // Trivially satisfied by small or large | |
6603 | ||
1f2f436a | 6604 | } else if (span <= szone->large_threshold) { |
34e8f829 A |
6605 | |
6606 | if (size <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) { | |
6607 | size = (NUM_TINY_SLOTS - 1)*TINY_QUANTUM + TINY_QUANTUM; // ensure block allocated by small does not have a tiny-possible size | |
6608 | span = size + alignment - 1; | |
6609 | } | |
6610 | ||
6611 | msize_t mspan = SMALL_MSIZE_FOR_BYTES(span + SMALL_QUANTUM - 1); | |
6612 | void *p = szone_malloc(szone, span); // avoid inlining small_malloc_should_clear(szone, mspan, 0); | |
6613 | ||
6614 | if (NULL == p) | |
6615 | return NULL; | |
6616 | ||
6617 | size_t offset = ((uintptr_t) p) & (alignment - 1); // p % alignment | |
6618 | size_t pad = (0 == offset) ? 0 : alignment - offset; // p + pad achieves desired alignment | |
6619 | ||
6620 | msize_t msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); | |
6621 | msize_t mpad = SMALL_MSIZE_FOR_BYTES(pad + SMALL_QUANTUM - 1); | |
6622 | msize_t mwaste = mspan - msize - mpad; // excess blocks | |
6623 | ||
6624 | if (mpad > 0) { | |
6625 | void *q = (void *)(((uintptr_t) p) + pad); | |
6626 | ||
6627 | // Mark q as block header and in-use, thus creating two blocks. | |
6628 | magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines, | |
6629 | REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)), | |
6630 | MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p))); | |
6631 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(p), SMALL_META_INDEX_FOR_PTR(p), mpad); | |
6632 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(q), SMALL_META_INDEX_FOR_PTR(q), msize + mwaste); | |
7ba935f9 | 6633 | small_mag_ptr->mag_num_objects++; |
34e8f829 A |
6634 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); |
6635 | ||
6636 | // Give up mpad blocks beginning at p to the small free list | |
6637 | // region_t r = SMALL_REGION_FOR_PTR(p); | |
6638 | szone_free(szone, p); // avoid inlining free_small(szone, p, &r); | |
6639 | ||
6640 | p = q; // advance p to the desired alignment | |
6641 | } | |
6642 | if (mwaste > 0) { | |
6643 | void *q = (void *)(((uintptr_t) p) + SMALL_BYTES_FOR_MSIZE(msize)); | |
6644 | // Mark q as block header and in-use, thus creating two blocks. | |
6645 | magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines, | |
6646 | REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)), | |
6647 | MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p))); | |
6648 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(p), SMALL_META_INDEX_FOR_PTR(p), msize); | |
6649 | small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(q), SMALL_META_INDEX_FOR_PTR(q), mwaste); | |
7ba935f9 | 6650 | small_mag_ptr->mag_num_objects++; |
34e8f829 A |
6651 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); |
6652 | ||
6653 | // Give up mwaste blocks beginning at q to the small free list | |
6654 | // region_t r = SMALL_REGION_FOR_PTR(q); | |
6655 | szone_free(szone, q); // avoid inlining free_small(szone, q, &r); | |
6656 | } | |
6657 | ||
6658 | return p; // p has the desired size and alignment, and can later be free()'d | |
6659 | ||
6660 | } else if (szone->large_threshold < size && alignment <= vm_page_size) { | |
6661 | return szone_malloc(szone, size); // Trivially satisfied by large | |
6662 | ||
6663 | } else { | |
6664 | // ensure block allocated by large does not have a small-possible size | |
6665 | size_t num_pages = round_page(MAX(szone->large_threshold + 1, size)) >> vm_page_shift; | |
6666 | void *p; | |
6667 | ||
6668 | if (num_pages == 0) /* Overflowed */ | |
6669 | p = NULL; | |
6670 | else | |
6671 | p = large_malloc(szone, num_pages, MAX(vm_page_shift, __builtin_ctz(alignment)), 0); | |
6672 | ||
6673 | return p; | |
6674 | } | |
6675 | /* NOTREACHED */ | |
6676 | } | |
6677 | ||
6678 | // given a size, returns the number of pointers allocated capable of holding | |
6679 | // that size, up to the limit specified by the 'count' argument. These pointers | |
6680 | // are stored in the 'results' array, which must be allocated by the caller. | |
6681 | // may return zero, since this function is only a best attempt at allocating | |
6682 | // the pointers. clients should be prepared to call malloc for any additional | |
6683 | // blocks they need. | |
6684 | static NOINLINE unsigned | |
6685 | szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count) | |
6686 | { | |
6687 | msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); | |
6688 | unsigned found = 0; | |
6689 | mag_index_t mag_index = mag_get_thread_index(szone); | |
6690 | magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]); | |
6691 | ||
6692 | // only bother implementing this for tiny | |
6693 | if (size > (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) | |
6694 | return 0; | |
6695 | // make sure to return objects at least one quantum in size | |
6696 | if (!msize) | |
6697 | msize = 1; | |
6698 | ||
6699 | CHECK(szone, __PRETTY_FUNCTION__); | |
6700 | ||
6701 | // We must lock the zone now, since tiny_malloc_from_free_list assumes that | |
6702 | // the caller has done so. | |
6703 | SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr); | |
6704 | ||
6705 | // with the zone locked, allocate objects from the free list until all | |
6706 | // sufficiently large objects have been exhausted, or we have met our quota | |
6707 | // of objects to allocate. | |
6708 | while (found < count) { | |
6709 | void *ptr = tiny_malloc_from_free_list(szone, tiny_mag_ptr, mag_index, msize); | |
6710 | if (!ptr) | |
6711 | break; | |
6712 | ||
6713 | *results++ = ptr; | |
6714 | found++; | |
6715 | } | |
6716 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
6717 | return found; | |
6718 | } | |
6719 | ||
6720 | /* Try caching the tiny_region and checking if the next ptr hits there. */ | |
6721 | static NOINLINE void | |
6722 | szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count) | |
6723 | { | |
6724 | unsigned cc = 0; | |
6725 | void *ptr; | |
6726 | region_t tiny_region = NULL; | |
6727 | boolean_t is_free; | |
6728 | msize_t msize; | |
6729 | magazine_t *tiny_mag_ptr = NULL; | |
6730 | mag_index_t mag_index = -1; | |
6731 | ||
6732 | // frees all the pointers in to_be_freed | |
6733 | // note that to_be_freed may be overwritten during the process | |
6734 | if (!count) | |
6735 | return; | |
6736 | ||
6737 | CHECK(szone, __PRETTY_FUNCTION__); | |
6738 | while (cc < count) { | |
6739 | ptr = to_be_freed[cc]; | |
6740 | if (ptr) { | |
6741 | if (NULL == tiny_region || tiny_region != TINY_REGION_FOR_PTR(ptr)) { // region same as last iteration? | |
6742 | if (tiny_mag_ptr) { // non-NULL iff magazine lock taken | |
6743 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
6744 | tiny_mag_ptr = NULL; | |
6745 | } | |
6746 | ||
6747 | tiny_region = tiny_region_for_ptr_no_lock(szone, ptr); | |
6748 | ||
6749 | if (tiny_region) { | |
6750 | tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines, | |
6751 | REGION_TRAILER_FOR_TINY_REGION(tiny_region), | |
6752 | MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region)); | |
6753 | mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region); | |
6754 | } | |
6755 | } | |
6756 | if (tiny_region) { | |
6757 | // this is a tiny pointer | |
6758 | if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) | |
6759 | break; // pointer to metadata; let the standard free deal with it | |
6760 | msize = get_tiny_meta_header(ptr, &is_free); | |
6761 | if (is_free) | |
6762 | break; // a double free; let the standard free deal with it | |
6763 | ||
1f2f436a A |
6764 | if (!tiny_free_no_lock(szone, tiny_mag_ptr, mag_index, tiny_region, ptr, msize)) { |
6765 | // Arrange to re-acquire magazine lock | |
6766 | tiny_mag_ptr = NULL; | |
6767 | tiny_region = NULL; | |
6768 | } | |
34e8f829 A |
6769 | to_be_freed[cc] = NULL; |
6770 | } else { | |
6771 | // No region in this zone claims ptr; let the standard free deal with it | |
6772 | break; | |
6773 | } | |
6774 | } | |
6775 | cc++; | |
6776 | } | |
6777 | ||
6778 | if (tiny_mag_ptr) { | |
6779 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
6780 | tiny_mag_ptr = NULL; | |
6781 | } | |
6782 | ||
6783 | CHECK(szone, __PRETTY_FUNCTION__); | |
6784 | while (count--) { | |
6785 | ptr = to_be_freed[count]; | |
6786 | if (ptr) | |
6787 | szone_free(szone, ptr); | |
6788 | } | |
6789 | } | |
6790 | ||
6791 | // FIXME: Suppose one of the locks is held? | |
6792 | static void | |
6793 | szone_destroy(szone_t *szone) | |
6794 | { | |
6795 | size_t index; | |
6796 | large_entry_t *large; | |
6797 | vm_range_t range_to_deallocate; | |
6798 | ||
1f2f436a A |
6799 | #if LARGE_CACHE |
6800 | SZONE_LOCK(szone); | |
6801 | ||
6802 | /* disable any memory pressure responder */ | |
6803 | szone->flotsam_enabled = FALSE; | |
6804 | ||
6805 | // stack allocated copy of the death-row cache | |
7ba935f9 | 6806 | int idx = szone->large_entry_cache_oldest, idx_max = szone->large_entry_cache_newest; |
1f2f436a A |
6807 | large_entry_t local_entry_cache[LARGE_ENTRY_CACHE_SIZE]; |
6808 | ||
6809 | memcpy((void *)local_entry_cache, (void *)szone->large_entry_cache, sizeof(local_entry_cache)); | |
6810 | ||
6811 | szone->large_entry_cache_oldest = szone->large_entry_cache_newest = 0; | |
6812 | szone->large_entry_cache[0].address = 0x0; | |
6813 | szone->large_entry_cache[0].size = 0; | |
6814 | szone->large_entry_cache_bytes = 0; | |
6815 | szone->large_entry_cache_reserve_bytes = 0; | |
6816 | ||
6817 | SZONE_UNLOCK(szone); | |
6818 | ||
6819 | // deallocate the death-row cache outside the zone lock | |
7ba935f9 | 6820 | while (idx != idx_max) { |
1f2f436a | 6821 | deallocate_pages(szone, (void *) local_entry_cache[idx].address, local_entry_cache[idx].size, 0); |
7ba935f9 A |
6822 | if (++idx == LARGE_ENTRY_CACHE_SIZE) idx = 0; |
6823 | } | |
1f2f436a A |
6824 | if (0 != local_entry_cache[idx].address && 0 != local_entry_cache[idx].size) { |
6825 | deallocate_pages(szone, (void *) local_entry_cache[idx].address, local_entry_cache[idx].size, 0); | |
6826 | } | |
6827 | #endif | |
7ba935f9 | 6828 | |
34e8f829 A |
6829 | /* destroy large entries */ |
6830 | index = szone->num_large_entries; | |
6831 | while (index--) { | |
6832 | large = szone->large_entries + index; | |
6833 | if (large->address) { | |
6834 | // we deallocate_pages, including guard pages | |
6835 | deallocate_pages(szone, (void *)(large->address), large->size, szone->debug_flags); | |
6836 | } | |
6837 | } | |
6838 | large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate); | |
6839 | if (range_to_deallocate.size) | |
6840 | deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0); | |
6841 | ||
6842 | /* destroy tiny regions */ | |
6843 | for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index) | |
6844 | if ((HASHRING_OPEN_ENTRY != szone->tiny_region_generation->hashed_regions[index]) && | |
6845 | (HASHRING_REGION_DEALLOCATED != szone->tiny_region_generation->hashed_regions[index])) | |
6846 | deallocate_pages(szone, szone->tiny_region_generation->hashed_regions[index], TINY_REGION_SIZE, 0); | |
6847 | ||
6848 | /* destroy small regions */ | |
6849 | for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index) | |
6850 | if ((HASHRING_OPEN_ENTRY != szone->small_region_generation->hashed_regions[index]) && | |
6851 | (HASHRING_REGION_DEALLOCATED != szone->small_region_generation->hashed_regions[index])) | |
6852 | deallocate_pages(szone, szone->small_region_generation->hashed_regions[index], SMALL_REGION_SIZE, 0); | |
6853 | ||
6854 | /* destroy region hash rings, if any */ | |
6855 | if (szone->tiny_region_generation->hashed_regions != szone->initial_tiny_regions) { | |
6856 | size_t size = round_page(szone->tiny_region_generation->num_regions_allocated * sizeof(region_t)); | |
6857 | deallocate_pages(szone, szone->tiny_region_generation->hashed_regions, size, 0); | |
6858 | } | |
6859 | if (szone->small_region_generation->hashed_regions != szone->initial_small_regions) { | |
6860 | size_t size = round_page(szone->small_region_generation->num_regions_allocated * sizeof(region_t)); | |
6861 | deallocate_pages(szone, szone->small_region_generation->hashed_regions, size, 0); | |
6862 | } | |
6863 | ||
6864 | /* Now destroy the separate szone region */ | |
6865 | if (szone->cpu_id_key != (pthread_key_t) -1) | |
6866 | (void)pthread_key_delete(szone->cpu_id_key); | |
6867 | deallocate_pages(szone, (void *)&(szone->tiny_magazines[-1]), TINY_MAGAZINE_PAGED_SIZE, SCALABLE_MALLOC_ADD_GUARD_PAGES); | |
6868 | deallocate_pages(szone, (void *)&(szone->small_magazines[-1]), SMALL_MAGAZINE_PAGED_SIZE, SCALABLE_MALLOC_ADD_GUARD_PAGES); | |
1f2f436a | 6869 | deallocate_pages(szone, (void *)szone, SZONE_PAGED_SIZE, 0); |
34e8f829 A |
6870 | } |
6871 | ||
6872 | static NOINLINE size_t | |
6873 | szone_good_size(szone_t *szone, size_t size) | |
6874 | { | |
6875 | msize_t msize; | |
34e8f829 A |
6876 | |
6877 | // Find a good size for this tiny allocation. | |
6878 | if (size <= (NUM_TINY_SLOTS - 1) * TINY_QUANTUM) { | |
6879 | msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1); | |
6880 | if (!msize) | |
6881 | msize = 1; | |
6882 | return TINY_BYTES_FOR_MSIZE(msize); | |
6883 | } | |
6884 | ||
6885 | // Find a good size for this small allocation. | |
1f2f436a | 6886 | if (size <= szone->large_threshold) { |
34e8f829 A |
6887 | msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1); |
6888 | if (!msize) | |
6889 | msize = 1; | |
6890 | return SMALL_BYTES_FOR_MSIZE(msize); | |
6891 | } | |
6892 | ||
6893 | // Check for integer overflow on the size, since unlike the two cases above, | |
6894 | // there is no upper bound on allocation size at this point. | |
6895 | if (size > round_page(size)) | |
6896 | return (size_t)(-1LL); | |
6897 | ||
6898 | #if DEBUG_MALLOC | |
6899 | // It is not acceptable to see a size of zero here, since that means we | |
6900 | // failed to catch a request for zero bytes in the tiny check, or the size | |
6901 | // overflowed to zero during some arithmetic. | |
6902 | if (size == 0) | |
6903 | malloc_printf("szone_good_size() invariant broken %y\n", size); | |
6904 | #endif | |
6905 | return round_page(size); | |
6906 | } | |
6907 | ||
6908 | unsigned szone_check_counter = 0; | |
6909 | unsigned szone_check_start = 0; | |
6910 | unsigned szone_check_modulo = 1; | |
6911 | ||
6912 | static NOINLINE boolean_t | |
6913 | szone_check_all(szone_t *szone, const char *function) | |
6914 | { | |
6915 | size_t index; | |
6916 | ||
6917 | /* check tiny regions - chould check region count */ | |
6918 | for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index) { | |
6919 | region_t tiny = szone->tiny_region_generation->hashed_regions[index]; | |
6920 | ||
6921 | if (HASHRING_REGION_DEALLOCATED == tiny) | |
6922 | continue; | |
6923 | ||
6924 | if (tiny) { | |
6925 | magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines, | |
6926 | REGION_TRAILER_FOR_TINY_REGION(tiny), MAGAZINE_INDEX_FOR_TINY_REGION(tiny)); | |
6927 | ||
6928 | if (!tiny_check_region(szone, tiny)) { | |
6929 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
6930 | szone->debug_flags &= ~ CHECK_REGIONS; | |
6931 | szone_error(szone, 1, "check: tiny region incorrect", NULL, | |
6932 | "*** tiny region %ld incorrect szone_check_all(%s) counter=%d\n", | |
6933 | index, function, szone_check_counter); | |
6934 | return 0; | |
6935 | } | |
6936 | SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr); | |
6937 | } | |
6938 | } | |
6939 | /* check tiny free lists */ | |
6940 | for (index = 0; index < NUM_TINY_SLOTS; ++index) { | |
6941 | if (!tiny_free_list_check(szone, index)) { | |
6942 | szone->debug_flags &= ~ CHECK_REGIONS; | |
6943 | szone_error(szone, 1, "check: tiny free list incorrect", NULL, | |
6944 | "*** tiny free list incorrect (slot=%ld) szone_check_all(%s) counter=%d\n", | |
6945 | index, function, szone_check_counter); | |
6946 | return 0; | |
6947 | } | |
6948 | } | |
6949 | ||
6950 | /* check small regions - could check region count */ | |
6951 | for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index) { | |
6952 | region_t small = szone->small_region_generation->hashed_regions[index]; | |
6953 | ||
6954 | if (HASHRING_REGION_DEALLOCATED == small) | |
6955 | continue; | |
6956 | ||
6957 | if (small) { | |
6958 | magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines, | |
6959 | REGION_TRAILER_FOR_SMALL_REGION(small), MAGAZINE_INDEX_FOR_SMALL_REGION(small)); | |
6960 | ||
6961 | if (!small_check_region(szone, small)) { | |
6962 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
6963 | szone->debug_flags &= ~ CHECK_REGIONS; | |
6964 | szone_error(szone, 1, "check: small region incorrect", NULL, | |
6965 | "*** small region %ld incorrect szone_check_all(%s) counter=%d\n", | |
6966 | index, function, szone_check_counter); | |
6967 | return 0; | |
6968 | } | |
6969 | SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr); | |
6970 | } | |
6971 | } | |
6972 | /* check small free lists */ | |
6973 | for (index = 0; index < szone->num_small_slots; ++index) { | |
6974 | if (!small_free_list_check(szone, index)) { | |
6975 | szone->debug_flags &= ~ CHECK_REGIONS; | |
6976 | szone_error(szone, 1, "check: small free list incorrect", NULL, | |
6977 | "*** small free list incorrect (slot=%ld) szone_check_all(%s) counter=%d\n", | |
6978 | index, function, szone_check_counter); | |
6979 | return 0; | |
6980 | } | |
6981 | } | |
6982 | ||
6983 | return 1; | |
6984 | } | |
6985 | ||
6986 | static boolean_t | |
6987 | szone_check(szone_t *szone) | |
6988 | { | |
6989 | if ((++szone_check_counter % 10000) == 0) | |
6990 | _malloc_printf(ASL_LEVEL_NOTICE, "at szone_check counter=%d\n", szone_check_counter); | |
6991 | ||
6992 | if (szone_check_counter < szone_check_start) | |
6993 | return 1; | |
6994 | ||
6995 | if (szone_check_counter % szone_check_modulo) | |
6996 | return 1; | |
6997 | ||
6998 | return szone_check_all(szone, ""); | |
6999 | } | |
7000 | ||
7001 | static kern_return_t | |
7002 | szone_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address, | |
7003 | memory_reader_t reader, vm_range_recorder_t recorder) | |
7004 | { | |
7005 | szone_t *szone; | |
7006 | kern_return_t err; | |
7007 | ||
7008 | if (!reader) reader = _szone_default_reader; | |
7009 | ||
7010 | err = reader(task, zone_address, sizeof(szone_t), (void **)&szone); | |
7011 | if (err) return err; | |
7012 | ||
7013 | err = tiny_in_use_enumerator(task, context, type_mask, szone, reader, recorder); | |
7014 | if (err) return err; | |
7015 | ||
7016 | err = small_in_use_enumerator(task, context, type_mask, szone, reader, recorder); | |
7017 | if (err) return err; | |
7018 | ||
7019 | err = large_in_use_enumerator(task, context, type_mask, | |
7020 | (vm_address_t)szone->large_entries, szone->num_large_entries, reader, recorder); | |
7021 | return err; | |
7022 | } | |
7023 | ||
7024 | // Following method is deprecated: use scalable_zone_statistics instead | |
7025 | void | |
7026 | scalable_zone_info(malloc_zone_t *zone, unsigned *info_to_fill, unsigned count) | |
7027 | { | |
7028 | szone_t *szone = (void *)zone; | |
7029 | unsigned info[13]; | |
7030 | ||
7031 | // We do not lock to facilitate debug | |
7032 | ||
7033 | size_t s = 0; | |
7034 | unsigned t = 0; | |
7035 | size_t u = 0; | |
7036 | mag_index_t mag_index; | |
7037 | ||
7038 | for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) { | |
1f2f436a | 7039 | s += szone->tiny_magazines[mag_index].mag_bytes_free_at_start; |
34e8f829 A |
7040 | s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end; |
7041 | t += szone->tiny_magazines[mag_index].mag_num_objects; | |
7042 | u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects; | |
7043 | } | |
7044 | ||
7045 | info[4] = t; | |
7046 | info[5] = u; | |
7047 | ||
7048 | for (t = 0, u = 0, mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) { | |
1f2f436a | 7049 | s += szone->small_magazines[mag_index].mag_bytes_free_at_start; |
34e8f829 A |
7050 | s += szone->small_magazines[mag_index].mag_bytes_free_at_end; |
7051 | t += szone->small_magazines[mag_index].mag_num_objects; | |
7052 | u += szone->small_magazines[mag_index].mag_num_bytes_in_objects; | |
7053 | } | |
7054 | ||
7055 | info[6] = t; | |
7056 | info[7] = u; | |
7057 | ||
7058 | info[8] = szone->num_large_objects_in_use; | |
7059 | info[9] = szone->num_bytes_in_large_objects; | |
7060 | ||
7061 | info[10] = 0; // DEPRECATED szone->num_huge_entries; | |
7062 | info[11] = 0; // DEPRECATED szone->num_bytes_in_huge_objects; | |
7063 | ||
7064 | info[12] = szone->debug_flags; | |
7065 | ||
7066 | info[0] = info[4] + info[6] + info[8] + info[10]; | |
7067 | info[1] = info[5] + info[7] + info[9] + info[11]; | |
7068 | ||
7069 | info[3] = (szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE + | |
7070 | (szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE + info[9] + info[11]; | |
7071 | ||
7072 | info[2] = info[3] - s; | |
7073 | memcpy(info_to_fill, info, sizeof(unsigned)*count); | |
7074 | } | |
7075 | ||
7076 | // FIXME: consistent picture requires locking! | |
7077 | static NOINLINE void | |
7078 | szone_print(szone_t *szone, boolean_t verbose) | |
7079 | { | |
7080 | unsigned info[13]; | |
7081 | size_t index; | |
7082 | region_t region; | |
7083 | ||
7084 | scalable_zone_info((void *)szone, info, 13); | |
7085 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, | |
7ba935f9 | 7086 | "Scalable zone %p: inUse=%u(%y) touched=%y allocated=%y flags=%d\n", |
34e8f829 A |
7087 | szone, info[0], info[1], info[2], info[3], info[12]); |
7088 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, | |
7ba935f9 | 7089 | "\ttiny=%u(%y) small=%u(%y) large=%u(%y) huge=%u(%y)\n", |
34e8f829 A |
7090 | info[4], info[5], info[6], info[7], info[8], info[9], info[10], info[11]); |
7091 | // tiny | |
7092 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, | |
7ba935f9 | 7093 | "%lu tiny regions:\n", szone->num_tiny_regions); |
34e8f829 A |
7094 | if (szone->num_tiny_regions_dealloc) |
7095 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, | |
7ba935f9 | 7096 | "[%lu tiny regions have been vm_deallocate'd]\n", szone->num_tiny_regions_dealloc); |
34e8f829 A |
7097 | for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index) { |
7098 | region = szone->tiny_region_generation->hashed_regions[index]; | |
7099 | if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) { | |
7100 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(region); | |
1f2f436a A |
7101 | print_tiny_region(verbose, region, |
7102 | (region == szone->tiny_magazines[mag_index].mag_last_region) ? | |
7103 | szone->tiny_magazines[mag_index].mag_bytes_free_at_start : 0, | |
7104 | (region == szone->tiny_magazines[mag_index].mag_last_region) ? | |
34e8f829 A |
7105 | szone->tiny_magazines[mag_index].mag_bytes_free_at_end : 0); |
7106 | } | |
7107 | } | |
7108 | if (verbose) | |
7109 | print_tiny_free_list(szone); | |
7110 | // small | |
7111 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, | |
7ba935f9 | 7112 | "%lu small regions:\n", szone->num_small_regions); |
34e8f829 A |
7113 | if (szone->num_small_regions_dealloc) |
7114 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, | |
7ba935f9 | 7115 | "[%lu small regions have been vm_deallocate'd]\n", szone->num_small_regions_dealloc); |
34e8f829 A |
7116 | for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index) { |
7117 | region = szone->small_region_generation->hashed_regions[index]; | |
7118 | if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) { | |
7119 | mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(region); | |
7120 | print_small_region(szone, verbose, region, | |
1f2f436a A |
7121 | (region == szone->small_magazines[mag_index].mag_last_region) ? |
7122 | szone->small_magazines[mag_index].mag_bytes_free_at_start : 0, | |
34e8f829 A |
7123 | (region == szone->small_magazines[mag_index].mag_last_region) ? |
7124 | szone->small_magazines[mag_index].mag_bytes_free_at_end : 0); | |
7125 | } | |
7126 | } | |
7127 | if (verbose) | |
7128 | print_small_free_list(szone); | |
7129 | } | |
7130 | ||
7131 | static void | |
7132 | szone_log(malloc_zone_t *zone, void *log_address) | |
7133 | { | |
7134 | szone_t *szone = (szone_t *)zone; | |
7135 | ||
7136 | szone->log_address = log_address; | |
7137 | } | |
7138 | ||
7139 | static void | |
7140 | szone_force_lock(szone_t *szone) | |
7141 | { | |
7142 | mag_index_t i; | |
7143 | ||
7144 | for (i = 0; i < szone->num_tiny_magazines; ++i) { | |
7145 | SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->tiny_magazines[i]))); | |
7146 | } | |
7147 | SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX]))); | |
7148 | ||
7149 | for (i = 0; i < szone->num_small_magazines; ++i) { | |
7150 | SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->small_magazines[i]))); | |
7151 | } | |
7152 | SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->small_magazines[DEPOT_MAGAZINE_INDEX]))); | |
7153 | ||
7154 | SZONE_LOCK(szone); | |
7155 | } | |
7156 | ||
7157 | static void | |
7158 | szone_force_unlock(szone_t *szone) | |
7159 | { | |
7160 | mag_index_t i; | |
7161 | ||
7162 | SZONE_UNLOCK(szone); | |
7163 | ||
7164 | for (i = -1; i < szone->num_small_magazines; ++i) { | |
7165 | SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->small_magazines[i]))); | |
7166 | } | |
7167 | ||
7168 | for (i = -1; i < szone->num_tiny_magazines; ++i) { | |
7169 | SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->tiny_magazines[i]))); | |
7170 | } | |
7171 | } | |
7172 | ||
7173 | static boolean_t | |
7174 | szone_locked(szone_t *szone) | |
7175 | { | |
7176 | mag_index_t i; | |
7177 | int tookLock; | |
7178 | ||
7179 | tookLock = SZONE_TRY_LOCK(szone); | |
7180 | if (tookLock == 0) | |
7181 | return 1; | |
7182 | SZONE_UNLOCK(szone); | |
7183 | ||
7184 | for (i = -1; i < szone->num_small_magazines; ++i) { | |
7185 | tookLock = SZONE_MAGAZINE_PTR_TRY_LOCK(szone, (&(szone->small_magazines[i]))); | |
7186 | if (tookLock == 0) | |
7187 | return 1; | |
7188 | SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->small_magazines[i]))); | |
7189 | } | |
7190 | ||
7191 | for (i = -1; i < szone->num_tiny_magazines; ++i) { | |
7192 | tookLock = SZONE_MAGAZINE_PTR_TRY_LOCK(szone, (&(szone->tiny_magazines[i]))); | |
7193 | if (tookLock == 0) | |
7194 | return 1; | |
7195 | SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->tiny_magazines[i]))); | |
7196 | } | |
7197 | return 0; | |
7198 | } | |
7199 | ||
1f2f436a A |
7200 | static size_t |
7201 | szone_pressure_relief(szone_t *szone, size_t goal) | |
7202 | { | |
7203 | #if LARGE_CACHE | |
7204 | if (!szone->flotsam_enabled) | |
7205 | return 0; | |
7206 | ||
7207 | SZONE_LOCK(szone); | |
7208 | ||
7209 | // stack allocated copy of the death-row cache | |
7210 | int idx = szone->large_entry_cache_oldest, idx_max = szone->large_entry_cache_newest; | |
7211 | large_entry_t local_entry_cache[LARGE_ENTRY_CACHE_SIZE]; | |
7212 | ||
7213 | memcpy((void *)local_entry_cache, (void *)szone->large_entry_cache, sizeof(local_entry_cache)); | |
7214 | ||
7215 | szone->large_entry_cache_oldest = szone->large_entry_cache_newest = 0; | |
7216 | szone->large_entry_cache[0].address = 0x0; | |
7217 | szone->large_entry_cache[0].size = 0; | |
7218 | szone->large_entry_cache_bytes = 0; | |
7219 | szone->large_entry_cache_reserve_bytes = 0; | |
7220 | ||
7221 | szone->flotsam_enabled = FALSE; | |
7222 | ||
7223 | SZONE_UNLOCK(szone); | |
7224 | ||
7225 | // deallocate the death-row cache outside the zone lock | |
7226 | size_t total = 0; | |
7227 | while (idx != idx_max) { | |
7228 | deallocate_pages(szone, (void *) local_entry_cache[idx].address, local_entry_cache[idx].size, 0); | |
7229 | total += local_entry_cache[idx].size; | |
7230 | if (++idx == LARGE_ENTRY_CACHE_SIZE) idx = 0; | |
7231 | } | |
7232 | if (0 != local_entry_cache[idx].address && 0 != local_entry_cache[idx].size) { | |
7233 | deallocate_pages(szone, (void *) local_entry_cache[idx].address, local_entry_cache[idx].size, 0); | |
7234 | total += local_entry_cache[idx].size; | |
7235 | } | |
7236 | MAGMALLOC_PRESSURERELIEF((void *)szone, goal, total); // DTrace USDT Probe | |
7237 | return total; | |
7238 | #else | |
7239 | return 0; | |
7240 | #endif | |
7241 | } | |
7242 | ||
34e8f829 A |
7243 | boolean_t |
7244 | scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone) | |
7245 | { | |
7246 | szone_t *szone = (szone_t *)zone; | |
7247 | ||
7248 | switch (subzone) { | |
7249 | case 0: | |
7250 | { | |
7251 | size_t s = 0; | |
7252 | unsigned t = 0; | |
7253 | size_t u = 0; | |
7254 | mag_index_t mag_index; | |
7255 | ||
7256 | for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) { | |
1f2f436a | 7257 | s += szone->tiny_magazines[mag_index].mag_bytes_free_at_start; |
34e8f829 A |
7258 | s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end; |
7259 | t += szone->tiny_magazines[mag_index].mag_num_objects; | |
7260 | u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects; | |
7261 | } | |
7262 | ||
7263 | stats->blocks_in_use = t; | |
7264 | stats->size_in_use = u; | |
7265 | stats->size_allocated = (szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE; | |
7266 | stats->max_size_in_use = stats->size_allocated - s; | |
7267 | return 1; | |
7268 | } | |
7269 | case 1: | |
7270 | { | |
7271 | size_t s = 0; | |
7272 | unsigned t = 0; | |
7273 | size_t u = 0; | |
7274 | mag_index_t mag_index; | |
7275 | ||
7276 | for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) { | |
1f2f436a | 7277 | s += szone->small_magazines[mag_index].mag_bytes_free_at_start; |
34e8f829 A |
7278 | s += szone->small_magazines[mag_index].mag_bytes_free_at_end; |
7279 | t += szone->small_magazines[mag_index].mag_num_objects; | |
7280 | u += szone->small_magazines[mag_index].mag_num_bytes_in_objects; | |
7281 | } | |
7282 | ||
7283 | stats->blocks_in_use = t; | |
7284 | stats->size_in_use = u; | |
7285 | stats->size_allocated = (szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE; | |
7286 | stats->max_size_in_use = stats->size_allocated - s; | |
7287 | return 1; | |
7288 | } | |
7289 | case 2: | |
7290 | stats->blocks_in_use = szone->num_large_objects_in_use; | |
7291 | stats->size_in_use = szone->num_bytes_in_large_objects; | |
7292 | stats->max_size_in_use = stats->size_allocated = stats->size_in_use; | |
7293 | return 1; | |
7294 | case 3: | |
7295 | stats->blocks_in_use = 0; // DEPRECATED szone->num_huge_entries; | |
7296 | stats->size_in_use = 0; // DEPRECATED szone->num_bytes_in_huge_objects; | |
7297 | stats->max_size_in_use = stats->size_allocated = 0; | |
7298 | return 1; | |
7299 | } | |
7300 | return 0; | |
7301 | } | |
7302 | ||
7303 | static void | |
7304 | szone_statistics(szone_t *szone, malloc_statistics_t *stats) | |
7305 | { | |
7306 | size_t large; | |
7307 | ||
7308 | size_t s = 0; | |
7309 | unsigned t = 0; | |
7310 | size_t u = 0; | |
7311 | mag_index_t mag_index; | |
7312 | ||
7313 | for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) { | |
1f2f436a | 7314 | s += szone->tiny_magazines[mag_index].mag_bytes_free_at_start; |
34e8f829 A |
7315 | s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end; |
7316 | t += szone->tiny_magazines[mag_index].mag_num_objects; | |
7317 | u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects; | |
7318 | } | |
7319 | ||
7320 | for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) { | |
1f2f436a | 7321 | s += szone->small_magazines[mag_index].mag_bytes_free_at_start; |
34e8f829 A |
7322 | s += szone->small_magazines[mag_index].mag_bytes_free_at_end; |
7323 | t += szone->small_magazines[mag_index].mag_num_objects; | |
7324 | u += szone->small_magazines[mag_index].mag_num_bytes_in_objects; | |
7325 | } | |
7326 | ||
7327 | large = szone->num_bytes_in_large_objects + 0; // DEPRECATED szone->num_bytes_in_huge_objects; | |
7328 | ||
7329 | stats->blocks_in_use = t + szone->num_large_objects_in_use + 0; // DEPRECATED szone->num_huge_entries; | |
7330 | stats->size_in_use = u + large; | |
7331 | stats->max_size_in_use = stats->size_allocated = | |
7332 | (szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE + | |
7333 | (szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE + large; | |
7334 | // Now we account for the untouched areas | |
7335 | stats->max_size_in_use -= s; | |
7336 | } | |
7337 | ||
7338 | static void * | |
7339 | legacy_zeroing_large_malloc(szone_t *szone, size_t size) { | |
7340 | if (size > LARGE_THRESHOLD) // Leopard and earlier returned a ZFOD range, so ... | |
7341 | return szone_calloc(szone, 1, size); // Clear to zero always, ham-handedly touching in each page | |
7342 | else | |
7343 | return szone_malloc(szone, size); | |
7344 | } | |
7345 | ||
7346 | static void * | |
7347 | legacy_zeroing_large_valloc(szone_t *szone, size_t size) { | |
7348 | void *p = szone_valloc(szone, size); | |
7349 | ||
7350 | // Leopard and earlier returned a ZFOD range, so ... | |
7351 | memset(p, 0, size); // Clear to zero always, ham-handedly touching in each page | |
7352 | return p; | |
7353 | } | |
7354 | ||
7355 | void zeroify_scalable_zone(malloc_zone_t *zone) | |
7356 | { | |
7357 | szone_t *szone = (szone_t *)zone; | |
7358 | ||
7359 | if (szone) { | |
1f2f436a | 7360 | mprotect(szone, sizeof(szone->basic_zone), PROT_READ | PROT_WRITE); |
34e8f829 A |
7361 | szone->basic_zone.malloc = (void *)legacy_zeroing_large_malloc; |
7362 | szone->basic_zone.valloc = (void *)legacy_zeroing_large_valloc; | |
1f2f436a | 7363 | mprotect(szone, sizeof(szone->basic_zone), PROT_READ); |
34e8f829 A |
7364 | } |
7365 | } | |
7366 | ||
7367 | static const struct malloc_introspection_t szone_introspect = { | |
7368 | (void *)szone_ptr_in_use_enumerator, | |
7369 | (void *)szone_good_size, | |
7370 | (void *)szone_check, | |
7371 | (void *)szone_print, | |
7372 | szone_log, | |
7373 | (void *)szone_force_lock, | |
7374 | (void *)szone_force_unlock, | |
7375 | (void *)szone_statistics, | |
7376 | (void *)szone_locked, | |
1f2f436a | 7377 | NULL, NULL, NULL, NULL, /* Zone enumeration version 7 and forward. */ |
34e8f829 A |
7378 | }; // marked as const to spare the DATA section |
7379 | ||
7380 | malloc_zone_t * | |
7381 | create_scalable_zone(size_t initial_size, unsigned debug_flags) | |
7382 | { | |
7383 | szone_t *szone; | |
7384 | uint64_t hw_memsize = 0; | |
34e8f829 A |
7385 | |
7386 | /* | |
7387 | * Sanity-check our build-time assumptions about the size of a page. | |
7388 | * Since we have sized various things assuming the default page size, | |
7389 | * attempting to determine it dynamically is not useful. | |
7390 | */ | |
7391 | if ((vm_page_size != _vm_page_size) || (vm_page_shift != _vm_page_shift)) { | |
7392 | malloc_printf("*** FATAL ERROR - machine page size does not match our assumptions.\n"); | |
7393 | exit(-1); | |
7394 | } | |
7395 | ||
7396 | #if defined(__i386__) || defined(__x86_64__) | |
7397 | if (_COMM_PAGE_VERSION_REQD > (*((short *) _COMM_PAGE_VERSION))) { // _COMM_PAGE_CPU_NUMBER must be present at runtime | |
7398 | malloc_printf("*** ERROR - comm page version mismatch.\n"); | |
7399 | exit(-1); | |
7400 | } | |
7401 | #endif | |
7402 | ||
1f2f436a A |
7403 | /* get memory for the zone. */ |
7404 | szone = allocate_pages(NULL, SZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC); | |
34e8f829 A |
7405 | if (!szone) |
7406 | return NULL; | |
7407 | ||
7408 | /* set up the szone structure */ | |
7409 | #if 0 | |
7410 | #warning CHECK_REGIONS enabled | |
7411 | debug_flags |= CHECK_REGIONS; | |
7412 | #endif | |
7413 | #if 0 | |
7414 | #warning LOG enabled | |
7415 | szone->log_address = ~0; | |
7416 | #endif | |
7417 | szone->trg[0].nextgen = &(szone->trg[1]); | |
7418 | szone->trg[1].nextgen = &(szone->trg[0]); | |
7419 | szone->tiny_region_generation = &(szone->trg[0]); | |
7420 | ||
7421 | szone->tiny_region_generation->hashed_regions = szone->initial_tiny_regions; | |
7422 | szone->tiny_region_generation->num_regions_allocated = INITIAL_NUM_REGIONS; | |
7423 | szone->tiny_region_generation->num_regions_allocated_shift = INITIAL_NUM_REGIONS_SHIFT; | |
7424 | ||
7425 | szone->srg[0].nextgen = &(szone->srg[1]); | |
7426 | szone->srg[1].nextgen = &(szone->srg[0]); | |
7427 | szone->small_region_generation = &(szone->srg[0]); | |
7428 | ||
7429 | szone->small_region_generation->hashed_regions = szone->initial_small_regions; | |
7430 | szone->small_region_generation->num_regions_allocated = INITIAL_NUM_REGIONS; | |
7431 | szone->small_region_generation->num_regions_allocated_shift = INITIAL_NUM_REGIONS_SHIFT; | |
7432 | ||
7433 | ||
7434 | /* | |
7435 | * Initialize variables that size the free list for SMALL allocations based | |
7436 | * upon the amount of memory in the system. Switch to a larger number of | |
7437 | * free list entries at 1GB. | |
7438 | */ | |
1f2f436a A |
7439 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
7440 | if ((hw_memsize = *(uint64_t *)(uintptr_t)_COMM_PAGE_MEMORY_SIZE) >= (1ULL << 30)) | |
7441 | #else | |
7442 | size_t uint64_t_size = sizeof(hw_memsize); | |
7443 | ||
34e8f829 | 7444 | if (0 == sysctlbyname("hw.memsize", &hw_memsize, &uint64_t_size, 0, 0) && |
1f2f436a A |
7445 | hw_memsize >= (1ULL << 30)) |
7446 | #endif | |
7447 | { | |
34e8f829 A |
7448 | szone->is_largemem = 1; |
7449 | szone->num_small_slots = NUM_SMALL_SLOTS_LARGEMEM; | |
7450 | szone->large_threshold = LARGE_THRESHOLD_LARGEMEM; | |
7451 | szone->vm_copy_threshold = VM_COPY_THRESHOLD_LARGEMEM; | |
7452 | } else { | |
7453 | szone->is_largemem = 0; | |
7454 | szone->num_small_slots = NUM_SMALL_SLOTS; | |
7455 | szone->large_threshold = LARGE_THRESHOLD; | |
7456 | szone->vm_copy_threshold = VM_COPY_THRESHOLD; | |
7457 | } | |
7458 | #if LARGE_CACHE | |
1f2f436a A |
7459 | szone->large_entry_cache_reserve_limit = |
7460 | hw_memsize >> 10; // madvise(..., MADV_REUSABLE) death-row arrivals above this threshold [~0.1%] | |
34e8f829 A |
7461 | |
7462 | /* <rdar://problem/6610904> Reset protection when returning a previous large allocation? */ | |
7463 | int32_t libSystemVersion = NSVersionOfLinkTimeLibrary("System"); | |
7464 | if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 112) /* CFSystemVersionSnowLeopard */) | |
7465 | szone->large_legacy_reset_mprotect = TRUE; | |
7466 | else | |
7467 | szone->large_legacy_reset_mprotect = FALSE; | |
7468 | #endif | |
7469 | ||
7470 | // Initialize the security token. | |
1f2f436a A |
7471 | szone->cookie = (uintptr_t)malloc_entropy[0]; |
7472 | ||
7473 | // Prepare ASLR | |
7474 | #if __i386__ || __LP64__ || TARGET_OS_EMBEDDED | |
7475 | #if __i386__ | |
7476 | uintptr_t stackbase = 0x8fe00000; | |
7477 | int entropic_bits = 3; | |
7478 | #elif __LP64__ | |
7479 | uintptr_t stackbase = USRSTACK64; | |
7480 | int entropic_bits = 16; | |
7481 | #else | |
7482 | uintptr_t stackbase = USRSTACK; | |
7483 | int entropic_bits = 3; | |
7484 | #endif | |
7485 | if (0 != _dyld_get_image_slide((const struct mach_header*)_NSGetMachExecuteHeader())) { | |
7486 | if (0 == entropic_address) { | |
7487 | uintptr_t t = stackbase - MAXSSIZ - ((uintptr_t) (malloc_entropy[1] & ((1 << entropic_bits) - 1)) << SMALL_BLOCKS_ALIGN); | |
7488 | (void)__sync_bool_compare_and_swap(&entropic_limit, 0, t); // Just one initialization please | |
7489 | (void)__sync_bool_compare_and_swap(&entropic_address, 0, t - ENTROPIC_KABILLION); // Just one initialization please | |
7490 | } | |
7491 | debug_flags &= ~DISABLE_ASLR; | |
7492 | } else { | |
7493 | debug_flags |= DISABLE_ASLR; | |
7494 | } | |
7495 | ||
34e8f829 | 7496 | #else |
1f2f436a | 7497 | debug_flags |= DISABLE_ASLR; |
34e8f829 A |
7498 | #endif |
7499 | ||
1f2f436a | 7500 | szone->basic_zone.version = 8; |
34e8f829 A |
7501 | szone->basic_zone.size = (void *)szone_size; |
7502 | szone->basic_zone.malloc = (void *)szone_malloc; | |
7503 | szone->basic_zone.calloc = (void *)szone_calloc; | |
7504 | szone->basic_zone.valloc = (void *)szone_valloc; | |
7505 | szone->basic_zone.free = (void *)szone_free; | |
7506 | szone->basic_zone.realloc = (void *)szone_realloc; | |
7507 | szone->basic_zone.destroy = (void *)szone_destroy; | |
7508 | szone->basic_zone.batch_malloc = (void *)szone_batch_malloc; | |
7509 | szone->basic_zone.batch_free = (void *)szone_batch_free; | |
7510 | szone->basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect; | |
7511 | szone->basic_zone.memalign = (void *)szone_memalign; | |
7512 | szone->basic_zone.free_definite_size = (void *)szone_free_definite_size; | |
1f2f436a A |
7513 | szone->basic_zone.pressure_relief = (void *)szone_pressure_relief; |
7514 | ||
7515 | szone->basic_zone.reserved1 = 0; /* Set to zero once and for all as required by CFAllocator. */ | |
7516 | szone->basic_zone.reserved2 = 0; /* Set to zero once and for all as required by CFAllocator. */ | |
7517 | mprotect(szone, sizeof(szone->basic_zone), PROT_READ); /* Prevent overwriting the function pointers in basic_zone. */ | |
7518 | ||
34e8f829 A |
7519 | szone->debug_flags = debug_flags; |
7520 | LOCK_INIT(szone->large_szone_lock); | |
7521 | ||
7522 | #if defined(__ppc__) || defined(__ppc64__) | |
7523 | /* | |
7524 | * In the interest of compatibility for PPC applications executing via Rosetta, | |
7525 | * arrange to zero-fill allocations as occurred by side effect in Leopard and earlier. | |
7526 | */ | |
7527 | zeroify_scalable_zone((malloc_zone_t *)szone); | |
7528 | #endif | |
7529 | ||
1f2f436a A |
7530 | #if defined(__i386__) || defined(__x86_64__) |
7531 | szone->cpu_id_key = (pthread_key_t) -1; // Unused. _COMM_PAGE_CPU_NUMBER preferred. | |
7532 | #else | |
7533 | int err; | |
34e8f829 A |
7534 | if ((err = pthread_key_create(&(szone->cpu_id_key), NULL))) { |
7535 | malloc_printf("*** ERROR -pthread_key_create failure err=%d.\n", err); | |
7536 | szone->cpu_id_key = (pthread_key_t) -1; | |
7537 | } | |
1f2f436a | 7538 | #endif |
34e8f829 A |
7539 | |
7540 | // Query the number of configured processors. | |
7541 | // Uniprocessor case gets just one tiny and one small magazine (whose index is zero). This gives | |
7542 | // the same behavior as the original scalable malloc. MP gets per-CPU magazines | |
7543 | // that scale (way) better. | |
1f2f436a A |
7544 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
7545 | int nproc = *(uint8_t *)(uintptr_t)_COMM_PAGE_NCPUS; | |
7546 | #else | |
34e8f829 | 7547 | int nproc = sysconf(_SC_NPROCESSORS_CONF); |
1f2f436a | 7548 | #endif |
34e8f829 A |
7549 | szone->num_tiny_magazines = (nproc > 1) ? MIN(nproc, TINY_MAX_MAGAZINES) : 1; |
7550 | ||
7551 | // FIXME vm_allocate() based on number of configured CPUs | |
7552 | magazine_t *tiny_magazines = allocate_pages(NULL, TINY_MAGAZINE_PAGED_SIZE, 0, | |
7553 | SCALABLE_MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC); | |
7554 | if (NULL == tiny_magazines) | |
7555 | return NULL; | |
7556 | ||
7557 | szone->tiny_magazines = &(tiny_magazines[1]); // szone->tiny_magazines[-1] is the Depot | |
7558 | ||
7559 | // The magazines are indexed in [0 .. (num_tiny_magazines - 1)] | |
7560 | // Find the smallest power of 2 that exceeds (num_tiny_magazines - 1) | |
7561 | szone->num_tiny_magazines_mask_shift = 0; | |
7562 | int i = 1; | |
7563 | while( i <= (szone->num_tiny_magazines - 1) ) { | |
7564 | szone->num_tiny_magazines_mask_shift++; | |
7565 | i <<= 1; | |
7566 | } | |
7567 | ||
7568 | // Now if i <= TINY_MAX_MAGAZINES we'll never access tiny_magazines[] out of bounds. | |
7569 | if (i > TINY_MAX_MAGAZINES) { | |
7570 | malloc_printf("*** FATAL ERROR - magazine mask exceeds allocated magazines.\n"); | |
7571 | exit(-1); | |
7572 | } | |
7573 | ||
7574 | // Reduce i by 1 to obtain a mask covering [0 .. (num_tiny_magazines - 1)] | |
7575 | szone->num_tiny_magazines_mask = i - 1; // A mask used for hashing to a magazine index (and a safety aid) | |
511daa4c A |
7576 | #if TARGET_OS_EMBEDDED |
7577 | szone->last_tiny_advise = 0; | |
7578 | #endif | |
34e8f829 A |
7579 | |
7580 | // Init the tiny_magazine locks | |
7581 | LOCK_INIT(szone->tiny_regions_lock); | |
7582 | LOCK_INIT(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX].magazine_lock); | |
7583 | for (i = 0; i < szone->num_tiny_magazines; ++i) { | |
7584 | LOCK_INIT(szone->tiny_magazines[i].magazine_lock); | |
7585 | } | |
7586 | ||
7587 | szone->num_small_magazines = (nproc > 1) ? MIN(nproc, SMALL_MAX_MAGAZINES) : 1; | |
7588 | ||
7589 | // FIXME vm_allocate() based on number of configured CPUs | |
7590 | magazine_t *small_magazines = allocate_pages(NULL, SMALL_MAGAZINE_PAGED_SIZE, 0, | |
7591 | SCALABLE_MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC); | |
7592 | if (NULL == small_magazines) | |
7593 | return NULL; | |
7594 | ||
7595 | szone->small_magazines = &(small_magazines[1]); // szone->small_magazines[-1] is the Depot | |
7596 | ||
7597 | // The magazines are indexed in [0 .. (num_small_magazines - 1)] | |
7598 | // Find the smallest power of 2 that exceeds (num_small_magazines - 1) | |
7599 | szone->num_small_magazines_mask_shift = 0; | |
7600 | while( i <= (szone->num_small_magazines - 1) ) { | |
7601 | szone->num_small_magazines_mask_shift++; | |
7602 | i <<= 1; | |
7603 | } | |
7604 | ||
7605 | // Now if i <= SMALL_MAX_MAGAZINES we'll never access small_magazines[] out of bounds. | |
7606 | if (i > SMALL_MAX_MAGAZINES) { | |
7607 | malloc_printf("*** FATAL ERROR - magazine mask exceeds allocated magazines.\n"); | |
7608 | exit(-1); | |
7609 | } | |
7610 | ||
7611 | // Reduce i by 1 to obtain a mask covering [0 .. (num_small_magazines - 1)] | |
7612 | szone->num_small_magazines_mask = i - 1; // A mask used for hashing to a magazine index (and a safety aid) | |
511daa4c A |
7613 | #if TARGET_OS_EMBEDDED |
7614 | szone->last_small_advise = 0; | |
7615 | #endif | |
34e8f829 A |
7616 | |
7617 | // Init the small_magazine locks | |
7618 | LOCK_INIT(szone->small_regions_lock); | |
7619 | LOCK_INIT(szone->small_magazines[DEPOT_MAGAZINE_INDEX].magazine_lock); | |
7620 | for (i = 0; i < szone->num_small_magazines; ++i) { | |
7621 | LOCK_INIT(szone->small_magazines[i].magazine_lock); | |
7622 | } | |
7623 | ||
7624 | CHECK(szone, __PRETTY_FUNCTION__); | |
7625 | return (malloc_zone_t *)szone; | |
7626 | } | |
7627 | ||
7628 | // | |
7629 | // purgeable zones have their own "large" allocation pool, but share "tiny" and "large" | |
7630 | // heaps with a helper_zone identified in the call to create_purgeable_zone() | |
7631 | // | |
7632 | static size_t | |
7633 | purgeable_size(szone_t *szone, const void *ptr) | |
7634 | { | |
1f2f436a A |
7635 | // Only claim our large allocations, leave the shared tiny/small for the helper zone to claim. |
7636 | return szone_size_try_large(szone, ptr); | |
34e8f829 A |
7637 | } |
7638 | ||
7639 | static void * | |
7640 | purgeable_malloc(szone_t *szone, size_t size) { | |
7641 | if (size <= szone->large_threshold) | |
7642 | return szone_malloc(szone->helper_zone, size); | |
7643 | else | |
7644 | return szone_malloc(szone, size); | |
7645 | } | |
7646 | ||
7647 | static void * | |
7648 | purgeable_calloc(szone_t *szone, size_t num_items, size_t size) | |
7649 | { | |
7650 | size_t total_bytes = num_items * size; | |
7651 | ||
7652 | // Check for overflow of integer multiplication | |
7653 | if (num_items > 1) { | |
7654 | #if __LP64__ /* size_t is uint64_t */ | |
7655 | if ((num_items | size) & 0xffffffff00000000ul) { | |
7656 | // num_items or size equals or exceeds sqrt(2^64) == 2^32, appeal to wider arithmetic | |
7657 | __uint128_t product = ((__uint128_t)num_items) * ((__uint128_t)size); | |
7658 | if ((uint64_t)(product >> 64)) // compiles to test on upper register of register pair | |
7659 | return NULL; | |
7660 | } | |
7661 | #else /* size_t is uint32_t */ | |
7662 | if ((num_items | size) & 0xffff0000ul) { | |
7663 | // num_items or size equals or exceeds sqrt(2^32) == 2^16, appeal to wider arithmetic | |
7664 | uint64_t product = ((uint64_t)num_items) * ((uint64_t)size); | |
7665 | if ((uint32_t)(product >> 32)) // compiles to test on upper register of register pair | |
7666 | return NULL; | |
7667 | } | |
7668 | #endif | |
7669 | } | |
7670 | ||
7671 | if (total_bytes <= szone->large_threshold) | |
7672 | return szone_calloc(szone->helper_zone, 1, total_bytes); | |
7673 | else | |
7674 | return szone_calloc(szone, 1, total_bytes); | |
7675 | } | |
7676 | ||
7677 | static void * | |
7678 | purgeable_valloc(szone_t *szone, size_t size) | |
7679 | { | |
7680 | if (size <= szone->large_threshold) | |
7681 | return szone_valloc(szone->helper_zone, size); | |
7682 | else | |
7683 | return szone_valloc(szone, size); | |
7684 | } | |
7685 | ||
7686 | static void | |
7687 | purgeable_free(szone_t *szone, void *ptr) | |
7688 | { | |
7689 | large_entry_t *entry; | |
7690 | ||
7691 | SZONE_LOCK(szone); | |
7692 | entry = large_entry_for_pointer_no_lock(szone, ptr); | |
7693 | SZONE_UNLOCK(szone); | |
7694 | if (entry) { | |
7695 | return free_large(szone, ptr); | |
7696 | } else { | |
7697 | return szone_free(szone->helper_zone, ptr); | |
7698 | } | |
7699 | } | |
7700 | ||
7701 | static void | |
7702 | purgeable_free_definite_size(szone_t *szone, void *ptr, size_t size) | |
7703 | { | |
7704 | if (size <= szone->large_threshold) | |
7705 | return szone_free_definite_size(szone->helper_zone, ptr, size); | |
7706 | else | |
7707 | return szone_free_definite_size(szone, ptr, size); | |
7708 | } | |
7709 | ||
7710 | static void * | |
7711 | purgeable_realloc(szone_t *szone, void *ptr, size_t new_size) | |
7712 | { | |
1f2f436a A |
7713 | size_t old_size; |
7714 | ||
7715 | if (NULL == ptr) { | |
7716 | // If ptr is a null pointer, realloc() shall be equivalent to malloc() for the specified size. | |
7717 | return purgeable_malloc(szone, new_size); | |
7718 | } else if (0 == new_size) { | |
7719 | // If size is 0 and ptr is not a null pointer, the object pointed to is freed. | |
7720 | purgeable_free(szone, ptr); | |
7721 | // If size is 0, either a null pointer or a unique pointer that can be successfully passed | |
7722 | // to free() shall be returned. | |
7723 | return purgeable_malloc(szone, 1); | |
7724 | } | |
7725 | ||
7726 | old_size = purgeable_size(szone, ptr); // Now ptr can be safely size()'d | |
7727 | if (!old_size) | |
7728 | old_size = szone_size(szone->helper_zone, ptr); | |
7729 | ||
7730 | if (!old_size) { | |
7731 | szone_error(szone, 1, "pointer being reallocated was not allocated", ptr, NULL); | |
7732 | return NULL; | |
7733 | } | |
7734 | ||
7735 | // Distinguish 4 cases: {oldsize, newsize} x { <= , > large_threshold } | |
7736 | // and deal with the allocation crossing from the purgeable zone to the helper zone and vice versa. | |
7737 | if (old_size <= szone->large_threshold) { | |
7738 | if (new_size <= szone->large_threshold) | |
7739 | return szone_realloc(szone->helper_zone, ptr, new_size); | |
7740 | else { | |
7741 | // allocation crosses from helper to purgeable zone | |
7742 | void * new_ptr = purgeable_malloc(szone, new_size); | |
7743 | if (new_ptr) { | |
7744 | memcpy(new_ptr, ptr, old_size); | |
7745 | szone_free_definite_size(szone->helper_zone, ptr, old_size); | |
7746 | } | |
7747 | return new_ptr; // in state VM_PURGABLE_NONVOLATILE | |
7748 | } | |
7749 | } else { | |
7750 | if (new_size <= szone->large_threshold) { | |
7751 | // allocation crosses from purgeable to helper zone | |
7752 | void * new_ptr = szone_malloc(szone->helper_zone, new_size); | |
7753 | if (new_ptr) { | |
7754 | memcpy(new_ptr, ptr, new_size); | |
7755 | purgeable_free_definite_size(szone, ptr, old_size); | |
7756 | } | |
7757 | return new_ptr; | |
7758 | } else { | |
7759 | void * new_ptr = purgeable_malloc(szone, new_size); | |
7760 | if (new_ptr) { | |
7761 | memcpy(new_ptr, ptr, MIN(old_size, new_size)); | |
7762 | purgeable_free_definite_size(szone, ptr, old_size); | |
7763 | } | |
7764 | return new_ptr; // in state VM_PURGABLE_NONVOLATILE | |
7765 | } | |
7766 | } | |
7767 | /* NOTREACHED */ | |
34e8f829 A |
7768 | } |
7769 | ||
7770 | static void | |
7771 | purgeable_destroy(szone_t *szone) | |
7772 | { | |
7773 | /* destroy large entries */ | |
7774 | size_t index = szone->num_large_entries; | |
7775 | large_entry_t *large; | |
7776 | vm_range_t range_to_deallocate; | |
7777 | ||
7778 | while (index--) { | |
7779 | large = szone->large_entries + index; | |
7780 | if (large->address) { | |
7781 | // we deallocate_pages, including guard pages | |
7782 | deallocate_pages(szone, (void *)(large->address), large->size, szone->debug_flags); | |
7783 | } | |
7784 | } | |
7785 | large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate); | |
7786 | if (range_to_deallocate.size) | |
7787 | deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0); | |
7788 | ||
7789 | /* Now destroy the separate szone region */ | |
1f2f436a | 7790 | deallocate_pages(szone, (void *)szone, SZONE_PAGED_SIZE, 0); |
34e8f829 A |
7791 | } |
7792 | ||
7793 | static unsigned | |
7794 | purgeable_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count) | |
7795 | { | |
7796 | return szone_batch_malloc(szone->helper_zone, size, results, count); | |
7797 | } | |
7798 | ||
7799 | static void | |
7800 | purgeable_batch_free(szone_t *szone, void **to_be_freed, unsigned count) | |
7801 | { | |
7802 | return szone_batch_free(szone->helper_zone, to_be_freed, count); | |
7803 | } | |
7804 | ||
7805 | static void * | |
7806 | purgeable_memalign(szone_t *szone, size_t alignment, size_t size) | |
7807 | { | |
7808 | if (size <= szone->large_threshold) | |
7809 | return szone_memalign(szone->helper_zone, alignment, size); | |
7810 | else | |
7811 | return szone_memalign(szone, alignment, size); | |
7812 | } | |
7813 | ||
7814 | static kern_return_t | |
7815 | purgeable_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address, | |
7816 | memory_reader_t reader, vm_range_recorder_t recorder) | |
7817 | { | |
7818 | szone_t *szone; | |
7819 | kern_return_t err; | |
7820 | ||
7821 | if (!reader) reader = _szone_default_reader; | |
7822 | ||
7823 | err = reader(task, zone_address, sizeof(szone_t), (void **)&szone); | |
7824 | if (err) return err; | |
7825 | ||
7826 | err = large_in_use_enumerator(task, context, type_mask, | |
7827 | (vm_address_t)szone->large_entries, szone->num_large_entries, reader, recorder); | |
7828 | return err; | |
7829 | } | |
7830 | ||
7831 | static size_t | |
7832 | purgeable_good_size(szone_t *szone, size_t size) | |
7833 | { | |
7834 | if (size <= szone->large_threshold) | |
7835 | return szone_good_size(szone->helper_zone, size); | |
7836 | else | |
7837 | return szone_good_size(szone, size); | |
7838 | } | |
7839 | ||
7840 | static boolean_t | |
7841 | purgeable_check(szone_t *szone) | |
7842 | { | |
7843 | return 1; | |
7844 | } | |
7845 | ||
7846 | static void | |
7847 | purgeable_print(szone_t *szone, boolean_t verbose) | |
7848 | { | |
7849 | _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, | |
7ba935f9 | 7850 | "Scalable zone %p: inUse=%u(%y) flags=%d\n", |
34e8f829 A |
7851 | szone, szone->num_large_objects_in_use, szone->num_bytes_in_large_objects, szone->debug_flags); |
7852 | } | |
7853 | ||
7854 | static void | |
7855 | purgeable_log(malloc_zone_t *zone, void *log_address) | |
7856 | { | |
7857 | szone_t *szone = (szone_t *)zone; | |
7858 | ||
7859 | szone->log_address = log_address; | |
7860 | } | |
7861 | ||
7862 | static void | |
7863 | purgeable_force_lock(szone_t *szone) | |
7864 | { | |
7865 | SZONE_LOCK(szone); | |
7866 | } | |
7867 | ||
7868 | static void | |
7869 | purgeable_force_unlock(szone_t *szone) | |
7870 | { | |
7871 | SZONE_UNLOCK(szone); | |
7872 | } | |
7873 | ||
7874 | static void | |
7875 | purgeable_statistics(szone_t *szone, malloc_statistics_t *stats) | |
7876 | { | |
7877 | stats->blocks_in_use = szone->num_large_objects_in_use; | |
7878 | stats->size_in_use = stats->max_size_in_use = stats->size_allocated = szone->num_bytes_in_large_objects; | |
7879 | } | |
7880 | ||
7881 | static boolean_t | |
7882 | purgeable_locked(szone_t *szone) | |
7883 | { | |
7884 | int tookLock; | |
7885 | ||
7886 | tookLock = SZONE_TRY_LOCK(szone); | |
7887 | if (tookLock == 0) | |
7888 | return 1; | |
7889 | SZONE_UNLOCK(szone); | |
7890 | return 0; | |
7891 | } | |
7892 | ||
1f2f436a A |
7893 | static size_t |
7894 | purgeable_pressure_relief(szone_t *szone, size_t goal) | |
7895 | { | |
7896 | return szone_pressure_relief(szone, goal) + szone_pressure_relief(szone->helper_zone, goal); | |
7897 | } | |
7898 | ||
34e8f829 A |
7899 | static const struct malloc_introspection_t purgeable_introspect = { |
7900 | (void *)purgeable_ptr_in_use_enumerator, | |
7901 | (void *)purgeable_good_size, | |
7902 | (void *)purgeable_check, | |
7903 | (void *)purgeable_print, | |
7904 | purgeable_log, | |
7905 | (void *)purgeable_force_lock, | |
7906 | (void *)purgeable_force_unlock, | |
7907 | (void *)purgeable_statistics, | |
7908 | (void *)purgeable_locked, | |
1f2f436a | 7909 | NULL, NULL, NULL, NULL, /* Zone enumeration version 7 and forward. */ |
34e8f829 A |
7910 | }; // marked as const to spare the DATA section |
7911 | ||
1f2f436a | 7912 | __private_extern__ malloc_zone_t * |
34e8f829 A |
7913 | create_purgeable_zone(size_t initial_size, malloc_zone_t *malloc_default_zone, unsigned debug_flags) |
7914 | { | |
7915 | szone_t *szone; | |
1f2f436a | 7916 | uint64_t hw_memsize = 0; |
34e8f829 | 7917 | |
1f2f436a A |
7918 | /* get memory for the zone. */ |
7919 | szone = allocate_pages(NULL, SZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC); | |
34e8f829 A |
7920 | if (!szone) |
7921 | return NULL; | |
7922 | ||
7923 | /* set up the szone structure */ | |
7924 | #if 0 | |
7925 | #warning LOG enabled | |
7926 | szone->log_address = ~0; | |
7927 | #endif | |
7928 | ||
1f2f436a A |
7929 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
7930 | hw_memsize = *(uint64_t *)(uintptr_t)_COMM_PAGE_MEMORY_SIZE; | |
7931 | #else | |
7932 | size_t uint64_t_size = sizeof(hw_memsize); | |
7933 | ||
7934 | sysctlbyname("hw.memsize", &hw_memsize, &uint64_t_size, 0, 0); | |
7935 | #endif | |
34e8f829 A |
7936 | /* Purgeable zone does not participate in the adaptive "largemem" sizing. */ |
7937 | szone->is_largemem = 0; | |
7938 | szone->large_threshold = LARGE_THRESHOLD; | |
7939 | szone->vm_copy_threshold = VM_COPY_THRESHOLD; | |
7940 | ||
7941 | #if LARGE_CACHE | |
1f2f436a A |
7942 | szone->large_entry_cache_reserve_limit = |
7943 | hw_memsize >> 10; // madvise(..., MADV_REUSABLE) death-row arrivals above this threshold [~0.1%] | |
7944 | ||
34e8f829 A |
7945 | /* <rdar://problem/6610904> Reset protection when returning a previous large allocation? */ |
7946 | int32_t libSystemVersion = NSVersionOfLinkTimeLibrary("System"); | |
7947 | if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 112) /* CFSystemVersionSnowLeopard */) | |
7948 | szone->large_legacy_reset_mprotect = TRUE; | |
7949 | else | |
7950 | szone->large_legacy_reset_mprotect = FALSE; | |
7951 | #endif | |
7952 | ||
1f2f436a | 7953 | szone->basic_zone.version = 8; |
34e8f829 A |
7954 | szone->basic_zone.size = (void *)purgeable_size; |
7955 | szone->basic_zone.malloc = (void *)purgeable_malloc; | |
7956 | szone->basic_zone.calloc = (void *)purgeable_calloc; | |
7957 | szone->basic_zone.valloc = (void *)purgeable_valloc; | |
7958 | szone->basic_zone.free = (void *)purgeable_free; | |
7959 | szone->basic_zone.realloc = (void *)purgeable_realloc; | |
7960 | szone->basic_zone.destroy = (void *)purgeable_destroy; | |
7961 | szone->basic_zone.batch_malloc = (void *)purgeable_batch_malloc; | |
7962 | szone->basic_zone.batch_free = (void *)purgeable_batch_free; | |
7963 | szone->basic_zone.introspect = (struct malloc_introspection_t *)&purgeable_introspect; | |
7964 | szone->basic_zone.memalign = (void *)purgeable_memalign; | |
7965 | szone->basic_zone.free_definite_size = (void *)purgeable_free_definite_size; | |
1f2f436a | 7966 | szone->basic_zone.pressure_relief = (void *)purgeable_pressure_relief; |
7ba935f9 | 7967 | |
1f2f436a A |
7968 | szone->basic_zone.reserved1 = 0; /* Set to zero once and for all as required by CFAllocator. */ |
7969 | szone->basic_zone.reserved2 = 0; /* Set to zero once and for all as required by CFAllocator. */ | |
7970 | mprotect(szone, sizeof(szone->basic_zone), PROT_READ); /* Prevent overwriting the function pointers in basic_zone. */ | |
7971 | ||
34e8f829 | 7972 | szone->debug_flags = debug_flags | SCALABLE_MALLOC_PURGEABLE; |
1f2f436a | 7973 | |
34e8f829 A |
7974 | /* Purgeable zone does not support SCALABLE_MALLOC_ADD_GUARD_PAGES. */ |
7975 | if (szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) { | |
7976 | _malloc_printf(ASL_LEVEL_INFO, "purgeable zone does not support guard pages\n"); | |
7977 | szone->debug_flags &= ~SCALABLE_MALLOC_ADD_GUARD_PAGES; | |
7978 | } | |
7979 | ||
7980 | LOCK_INIT(szone->large_szone_lock); | |
7981 | ||
7982 | szone->helper_zone = (struct szone_s *)malloc_default_zone; | |
7983 | ||
7984 | CHECK(szone, __PRETTY_FUNCTION__); | |
7985 | return (malloc_zone_t *)szone; | |
7986 | } | |
7987 | ||
7988 | /* | |
7989 | * For use by CheckFix: create a new zone whose behavior is, apart from | |
7990 | * the use of death-row and per-CPU magazines, that of Leopard. | |
7991 | */ | |
7992 | static NOINLINE void * | |
7993 | legacy_valloc(szone_t *szone, size_t size) | |
7994 | { | |
7995 | void *ptr; | |
7996 | size_t num_pages; | |
7997 | ||
7998 | num_pages = round_page(size) >> vm_page_shift; | |
7999 | ptr = large_malloc(szone, num_pages, 0, TRUE); | |
8000 | #if DEBUG_MALLOC | |
8001 | if (LOG(szone, ptr)) | |
8002 | malloc_printf("legacy_valloc returned %p\n", ptr); | |
8003 | #endif | |
8004 | return ptr; | |
8005 | } | |
8006 | ||
1f2f436a | 8007 | __private_extern__ malloc_zone_t * |
34e8f829 A |
8008 | create_legacy_scalable_zone(size_t initial_size, unsigned debug_flags) |
8009 | { | |
8010 | malloc_zone_t *mzone = create_scalable_zone(initial_size, debug_flags); | |
8011 | szone_t *szone = (szone_t *)mzone; | |
8012 | ||
8013 | if (!szone) | |
8014 | return NULL; | |
8015 | ||
8016 | szone->is_largemem = 0; | |
8017 | szone->num_small_slots = NUM_SMALL_SLOTS; | |
8018 | szone->large_threshold = LARGE_THRESHOLD; | |
8019 | szone->vm_copy_threshold = VM_COPY_THRESHOLD; | |
8020 | ||
1f2f436a | 8021 | mprotect(szone, sizeof(szone->basic_zone), PROT_READ | PROT_WRITE); |
34e8f829 A |
8022 | szone->basic_zone.valloc = (void *)legacy_valloc; |
8023 | szone->basic_zone.free_definite_size = NULL; | |
1f2f436a | 8024 | mprotect(szone, sizeof(szone->basic_zone), PROT_READ); |
34e8f829 A |
8025 | |
8026 | return mzone; | |
8027 | } | |
8028 | ||
8029 | /********* Support code for emacs unexec ************/ | |
8030 | ||
8031 | /* History of freezedry version numbers: | |
8032 | * | |
8033 | * 1) Old malloc (before the scalable malloc implementation in this file | |
8034 | * existed). | |
8035 | * 2) Original freezedrying code for scalable malloc. This code was apparently | |
8036 | * based on the old freezedrying code and was fundamentally flawed in its | |
8037 | * assumption that tracking allocated memory regions was adequate to fake | |
8038 | * operations on freezedried memory. This doesn't work, since scalable | |
8039 | * malloc does not store flags in front of large page-aligned allocations. | |
8040 | * 3) Original szone-based freezedrying code. | |
8041 | * 4) Fresher malloc with tiny zone | |
8042 | * 5) 32/64bit compatible malloc | |
8043 | * 6) Metadata within 1MB and 8MB region for tiny and small | |
8044 | * | |
8045 | * No version backward compatibility is provided, but the version number does | |
8046 | * make it possible for malloc_jumpstart() to return an error if the application | |
8047 | * was freezedried with an older version of malloc. | |
8048 | */ | |
8049 | #define MALLOC_FREEZEDRY_VERSION 6 | |
8050 | ||
8051 | typedef struct { | |
8052 | unsigned version; | |
8053 | unsigned nszones; | |
8054 | szone_t *szones; | |
8055 | } malloc_frozen; | |
8056 | ||
8057 | static void * | |
8058 | frozen_malloc(szone_t *zone, size_t new_size) | |
8059 | { | |
8060 | return malloc(new_size); | |
8061 | } | |
8062 | ||
8063 | static void * | |
8064 | frozen_calloc(szone_t *zone, size_t num_items, size_t size) | |
8065 | { | |
8066 | return calloc(num_items, size); | |
8067 | } | |
8068 | ||
8069 | static void * | |
8070 | frozen_valloc(szone_t *zone, size_t new_size) | |
8071 | { | |
8072 | return valloc(new_size); | |
8073 | } | |
8074 | ||
8075 | static void * | |
8076 | frozen_realloc(szone_t *zone, void *ptr, size_t new_size) | |
8077 | { | |
8078 | size_t old_size = szone_size(zone, ptr); | |
8079 | void *new_ptr; | |
8080 | ||
8081 | if (new_size <= old_size) { | |
8082 | return ptr; | |
8083 | } | |
8084 | new_ptr = malloc(new_size); | |
8085 | if (old_size > 0) { | |
8086 | memcpy(new_ptr, ptr, old_size); | |
8087 | } | |
8088 | return new_ptr; | |
8089 | } | |
8090 | ||
8091 | static void | |
8092 | frozen_free(szone_t *zone, void *ptr) | |
8093 | { | |
8094 | } | |
8095 | ||
8096 | static void | |
8097 | frozen_destroy(szone_t *zone) | |
8098 | { | |
8099 | } | |
8100 | ||
8101 | /********* Pseudo-private API for emacs unexec ************/ | |
8102 | ||
8103 | /* | |
8104 | * malloc_freezedry() records all of the szones in use, so that they can be | |
8105 | * partially reconstituted by malloc_jumpstart(). Due to the differences | |
8106 | * between reconstituted memory regions and those created by the szone code, | |
8107 | * care is taken not to reallocate from the freezedried memory, except in the | |
8108 | * case of a non-growing realloc(). | |
8109 | * | |
8110 | * Due to the flexibility provided by the zone registration mechanism, it is | |
8111 | * impossible to implement generic freezedrying for any zone type. This code | |
8112 | * only handles applications that use the szone allocator, so malloc_freezedry() | |
8113 | * returns 0 (error) if any non-szone zones are encountered. | |
8114 | */ | |
8115 | ||
8116 | uintptr_t | |
8117 | malloc_freezedry(void) | |
8118 | { | |
8119 | extern unsigned malloc_num_zones; | |
8120 | extern malloc_zone_t **malloc_zones; | |
8121 | malloc_frozen *data; | |
8122 | unsigned i; | |
8123 | ||
8124 | /* Allocate space in which to store the freezedry state. */ | |
8125 | data = (malloc_frozen *) malloc(sizeof(malloc_frozen)); | |
8126 | ||
8127 | /* Set freezedry version number so that malloc_jumpstart() can check for compatibility. */ | |
8128 | data->version = MALLOC_FREEZEDRY_VERSION; | |
8129 | ||
8130 | /* Allocate the array of szone pointers. */ | |
8131 | data->nszones = malloc_num_zones; | |
8132 | data->szones = (szone_t *) calloc(malloc_num_zones, sizeof(szone_t)); | |
8133 | ||
8134 | /* | |
8135 | * Fill in the array of szone structures. They are copied rather than | |
8136 | * referenced, since the originals are likely to be clobbered during malloc | |
8137 | * initialization. | |
8138 | */ | |
8139 | for (i = 0; i < malloc_num_zones; i++) { | |
8140 | if (strcmp(malloc_zones[i]->zone_name, "DefaultMallocZone")) { | |
8141 | /* Unknown zone type. */ | |
8142 | free(data->szones); | |
8143 | free(data); | |
8144 | return 0; | |
8145 | } | |
8146 | memcpy(&data->szones[i], malloc_zones[i], sizeof(szone_t)); | |
8147 | } | |
8148 | ||
8149 | return((uintptr_t)data); | |
8150 | } | |
8151 | ||
8152 | int | |
8153 | malloc_jumpstart(uintptr_t cookie) | |
8154 | { | |
8155 | malloc_frozen *data = (malloc_frozen *)cookie; | |
8156 | unsigned i; | |
8157 | ||
8158 | if (data->version != MALLOC_FREEZEDRY_VERSION) { | |
8159 | /* Unsupported freezedry version. */ | |
8160 | return 1; | |
8161 | } | |
8162 | ||
8163 | for (i = 0; i < data->nszones; i++) { | |
8164 | /* Set function pointers. Even the functions that stay the same must be | |
8165 | * set, since there are no guarantees that they will be mapped to the | |
8166 | * same addresses. */ | |
8167 | data->szones[i].basic_zone.size = (void *) szone_size; | |
8168 | data->szones[i].basic_zone.malloc = (void *) frozen_malloc; | |
8169 | data->szones[i].basic_zone.calloc = (void *) frozen_calloc; | |
8170 | data->szones[i].basic_zone.valloc = (void *) frozen_valloc; | |
8171 | data->szones[i].basic_zone.free = (void *) frozen_free; | |
8172 | data->szones[i].basic_zone.realloc = (void *) frozen_realloc; | |
8173 | data->szones[i].basic_zone.destroy = (void *) frozen_destroy; | |
8174 | data->szones[i].basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect; | |
8175 | ||
8176 | /* Register the freezedried zone. */ | |
8177 | malloc_zone_register(&data->szones[i].basic_zone); | |
8178 | } | |
8179 | ||
8180 | return 0; | |
8181 | } |