]> git.saurik.com Git - apple/libdispatch.git/blob - src/allocator.c
libdispatch-500.10.1.tar.gz
[apple/libdispatch.git] / src / allocator.c
1 /*
2 * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22 #include "allocator_internal.h"
23
24 #if DISPATCH_ALLOCATOR
25
26 #ifndef VM_MEMORY_LIBDISPATCH
27 #define VM_MEMORY_LIBDISPATCH 74
28 #endif
29
30 // _dispatch_main_heap is is the first heap in the linked list, where searches
31 // always begin.
32 //
33 // _dispatch_main_heap, and dh_next, are read normally but only written (in
34 // try_create_heap) by cmpxchg. They start life at 0, and are only written
35 // once to non-zero. They are not marked volatile. There is a small risk that
36 // some thread may see a stale 0 value and enter try_create_heap. It will
37 // waste some time in an allocate syscall, but eventually it will try to
38 // cmpxchg, expecting to overwite 0 with an address. This will fail
39 // (because another thread already did this), the thread will deallocate the
40 // unused allocated memory, and continue with the new value.
41 //
42 // If something goes wrong here, the symptom would be a NULL dereference
43 // in alloc_continuation_from_heap or _magazine when derefing the magazine ptr.
44 static dispatch_heap_t _dispatch_main_heap;
45
46 DISPATCH_ALWAYS_INLINE
47 static void
48 set_last_found_page(bitmap_t *val)
49 {
50 dispatch_assert(_dispatch_main_heap);
51 unsigned int cpu = _dispatch_cpu_number();
52 _dispatch_main_heap[cpu].header.last_found_page = val;
53 }
54
55 DISPATCH_ALWAYS_INLINE
56 static bitmap_t *
57 last_found_page(void)
58 {
59 dispatch_assert(_dispatch_main_heap);
60 unsigned int cpu = _dispatch_cpu_number();
61 return _dispatch_main_heap[cpu].header.last_found_page;
62 }
63
64 #pragma mark -
65 #pragma mark dispatch_alloc_bitmaps
66
67 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
68 static bitmap_t *
69 supermap_address(struct dispatch_magazine_s *magazine, unsigned int supermap)
70 {
71 return &magazine->supermaps[supermap];
72 }
73
74 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
75 static bitmap_t *
76 bitmap_address(struct dispatch_magazine_s *magazine, unsigned int supermap,
77 unsigned int map)
78 {
79 return &magazine->maps[supermap][map];
80 }
81
82 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
83 static dispatch_continuation_t
84 continuation_address(struct dispatch_magazine_s *magazine,
85 unsigned int supermap, unsigned int map, unsigned int index)
86 {
87 #if DISPATCH_DEBUG
88 dispatch_assert(supermap < SUPERMAPS_PER_MAGAZINE);
89 dispatch_assert(map < BITMAPS_PER_SUPERMAP);
90 dispatch_assert(index < CONTINUATIONS_PER_BITMAP);
91 #endif
92 return (dispatch_continuation_t)&magazine->conts[supermap][map][index];
93 }
94
95 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
96 static struct dispatch_magazine_s *
97 magazine_for_continuation(dispatch_continuation_t c)
98 {
99 return (struct dispatch_magazine_s *)((uintptr_t)c & MAGAZINE_MASK);
100 }
101
102 DISPATCH_ALWAYS_INLINE_NDEBUG
103 static void
104 get_cont_and_indices_for_bitmap_and_index(bitmap_t *bitmap,
105 unsigned int index, dispatch_continuation_t *continuation_out,
106 bitmap_t **supermap_out, unsigned int *bitmap_index_out)
107 {
108 // m_for_c wants a continuation not a bitmap, but it works because it
109 // just masks off the bottom bits of the address.
110 struct dispatch_magazine_s *m = magazine_for_continuation((void *)bitmap);
111 unsigned int mindex = (unsigned int)(bitmap - m->maps[0]);
112 unsigned int bindex = mindex % BITMAPS_PER_SUPERMAP;
113 unsigned int sindex = mindex / BITMAPS_PER_SUPERMAP;
114 dispatch_assert(&m->maps[sindex][bindex] == bitmap);
115 if (fastpath(continuation_out)) {
116 *continuation_out = continuation_address(m, sindex, bindex, index);
117 }
118 if (fastpath(supermap_out)) *supermap_out = supermap_address(m, sindex);
119 if (fastpath(bitmap_index_out)) *bitmap_index_out = bindex;
120 }
121
122 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
123 static bool
124 continuation_is_in_first_page(dispatch_continuation_t c)
125 {
126 #if PACK_FIRST_PAGE_WITH_CONTINUATIONS
127 // (the base of c's magazine == the base of c's page)
128 // => c is in first page of magazine
129 return (((uintptr_t)c & MAGAZINE_MASK) ==
130 ((uintptr_t)c & ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK));
131 #else
132 (void)c;
133 return false;
134 #endif
135 }
136
137 DISPATCH_ALWAYS_INLINE_NDEBUG
138 static void
139 get_maps_and_indices_for_continuation(dispatch_continuation_t c,
140 bitmap_t **supermap_out, unsigned int *bitmap_index_out,
141 bitmap_t **bitmap_out, unsigned int *index_out)
142 {
143 unsigned int cindex, sindex, index, mindex;
144 padded_continuation *p = (padded_continuation *)c;
145 struct dispatch_magazine_s *m = magazine_for_continuation(c);
146 #if PACK_FIRST_PAGE_WITH_CONTINUATIONS
147 if (fastpath(continuation_is_in_first_page(c))) {
148 cindex = (unsigned int)(p - m->fp_conts);
149 index = cindex % CONTINUATIONS_PER_BITMAP;
150 mindex = cindex / CONTINUATIONS_PER_BITMAP;
151 if (fastpath(supermap_out)) *supermap_out = NULL;
152 if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex;
153 if (fastpath(bitmap_out)) *bitmap_out = &m->fp_maps[mindex];
154 if (fastpath(index_out)) *index_out = index;
155 return;
156 }
157 #endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
158 cindex = (unsigned int)(p - (padded_continuation *)m->conts);
159 sindex = cindex / (BITMAPS_PER_SUPERMAP * CONTINUATIONS_PER_BITMAP);
160 mindex = (cindex / CONTINUATIONS_PER_BITMAP) % BITMAPS_PER_SUPERMAP;
161 index = cindex % CONTINUATIONS_PER_BITMAP;
162 if (fastpath(supermap_out)) *supermap_out = &m->supermaps[sindex];
163 if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex;
164 if (fastpath(bitmap_out)) *bitmap_out = &m->maps[sindex][mindex];
165 if (fastpath(index_out)) *index_out = index;
166 }
167
168 // Base address of page, or NULL if this page shouldn't be madvise()d
169 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
170 static void *
171 madvisable_page_base_for_continuation(dispatch_continuation_t c)
172 {
173 if (fastpath(continuation_is_in_first_page(c))) {
174 return NULL;
175 }
176 void *page_base = (void *)((uintptr_t)c &
177 ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK);
178 #if DISPATCH_DEBUG
179 struct dispatch_magazine_s *m = magazine_for_continuation(c);
180 if (slowpath(page_base < (void *)&m->conts)) {
181 DISPATCH_CRASH("madvisable continuation too low");
182 }
183 if (slowpath(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1]
184 [BITMAPS_PER_SUPERMAP-1][CONTINUATIONS_PER_BITMAP-1])) {
185 DISPATCH_CRASH("madvisable continuation too high");
186 }
187 #endif
188 return page_base;
189 }
190
191 // Bitmap that controls the first few continuations in the same page as
192 // the continuations controlled by the passed bitmap. Undefined results if the
193 // passed bitmap controls continuations in the first page.
194 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
195 static bitmap_t *
196 first_bitmap_in_same_page(bitmap_t *b)
197 {
198 #if DISPATCH_DEBUG
199 struct dispatch_magazine_s *m;
200 m = magazine_for_continuation((void*)b);
201 dispatch_assert(b >= &m->maps[0][0]);
202 dispatch_assert(b < &m->maps[SUPERMAPS_PER_MAGAZINE]
203 [BITMAPS_PER_SUPERMAP]);
204 #endif
205 const uintptr_t PAGE_BITMAP_MASK = (BITMAPS_PER_PAGE *
206 BYTES_PER_BITMAP) - 1;
207 return (bitmap_t *)((uintptr_t)b & ~PAGE_BITMAP_MASK);
208 }
209
210 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
211 static bool
212 bitmap_is_full(bitmap_t bits)
213 {
214 return (bits == BITMAP_ALL_ONES);
215 }
216
217 #define NO_BITS_WERE_UNSET (UINT_MAX)
218
219 // max_index is the 0-based position of the most significant bit that is
220 // allowed to be set.
221 DISPATCH_ALWAYS_INLINE_NDEBUG
222 static unsigned int
223 bitmap_set_first_unset_bit_upto_index(volatile bitmap_t *bitmap,
224 unsigned int max_index)
225 {
226 // No barriers needed in acquire path: the just-allocated
227 // continuation is "uninitialized", so the caller shouldn't
228 // load from it before storing, so we don't need to guard
229 // against reordering those loads.
230 dispatch_assert(sizeof(*bitmap) == sizeof(unsigned long));
231 return dispatch_atomic_set_first_bit(bitmap,max_index);
232 }
233
234 DISPATCH_ALWAYS_INLINE
235 static unsigned int
236 bitmap_set_first_unset_bit(volatile bitmap_t *bitmap)
237 {
238 return bitmap_set_first_unset_bit_upto_index(bitmap, UINT_MAX);
239 }
240
241 #define CLEAR_EXCLUSIVELY true
242 #define CLEAR_NONEXCLUSIVELY false
243
244 // Return true if this bit was the last in the bitmap, and it is now all zeroes
245 DISPATCH_ALWAYS_INLINE_NDEBUG
246 static bool
247 bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index,
248 bool exclusively)
249 {
250 #if DISPATCH_DEBUG
251 dispatch_assert(index < CONTINUATIONS_PER_BITMAP);
252 #endif
253 const bitmap_t mask = BITMAP_C(1) << index;
254 bitmap_t b;
255
256 if (exclusively == CLEAR_EXCLUSIVELY) {
257 if (slowpath((*bitmap & mask) == 0)) {
258 DISPATCH_CRASH("Corruption: failed to clear bit exclusively");
259 }
260 }
261
262 // and-and-fetch
263 b = dispatch_atomic_and(bitmap, ~mask, release);
264 return b == 0;
265 }
266
267 DISPATCH_ALWAYS_INLINE_NDEBUG
268 static void
269 mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap,
270 unsigned int bitmap_index, volatile bitmap_t *bitmap)
271 {
272 #if DISPATCH_DEBUG
273 dispatch_assert(bitmap_index < BITMAPS_PER_SUPERMAP);
274 #endif
275 const bitmap_t mask = BITMAP_C(1) << bitmap_index;
276 bitmap_t s, s_new, s_masked;
277
278 if (!bitmap_is_full(*bitmap)) {
279 return;
280 }
281 s_new = *supermap;
282 for (;;) {
283 // No barriers because supermaps are only advisory, they
284 // don't protect access to other memory.
285 s = s_new;
286 s_masked = s | mask;
287 if (dispatch_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) ||
288 !bitmap_is_full(*bitmap)) {
289 return;
290 }
291 }
292 }
293
294 #pragma mark -
295 #pragma mark dispatch_alloc_continuation_alloc
296
297 #if PACK_FIRST_PAGE_WITH_CONTINUATIONS
298 DISPATCH_ALWAYS_INLINE_NDEBUG
299 static dispatch_continuation_t
300 alloc_continuation_from_first_page(struct dispatch_magazine_s *magazine)
301 {
302 unsigned int i, index, continuation_index;
303
304 // TODO: unroll if this is hot?
305 for (i = 0; i < FULL_BITMAPS_IN_FIRST_PAGE; i++) {
306 index = bitmap_set_first_unset_bit(&magazine->fp_maps[i]);
307 if (fastpath(index != NO_BITS_WERE_UNSET)) goto found;
308 }
309 if (REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE) {
310 index = bitmap_set_first_unset_bit_upto_index(&magazine->fp_maps[i],
311 REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE - 1);
312 if (fastpath(index != NO_BITS_WERE_UNSET)) goto found;
313 }
314 return NULL;
315
316 found:
317 continuation_index = (i * CONTINUATIONS_PER_BITMAP) + index;
318 return (dispatch_continuation_t)&magazine->fp_conts[continuation_index];
319 }
320 #endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
321
322 DISPATCH_ALWAYS_INLINE_NDEBUG
323 static dispatch_continuation_t
324 alloc_continuation_from_magazine(struct dispatch_magazine_s *magazine)
325 {
326 unsigned int s, b, index;
327
328 for (s = 0; s < SUPERMAPS_PER_MAGAZINE; s++) {
329 volatile bitmap_t *supermap = supermap_address(magazine, s);
330 if (bitmap_is_full(*supermap)) {
331 continue;
332 }
333 for (b = 0; b < BITMAPS_PER_SUPERMAP; b++) {
334 volatile bitmap_t *bitmap = bitmap_address(magazine, s, b);
335 index = bitmap_set_first_unset_bit(bitmap);
336 if (index != NO_BITS_WERE_UNSET) {
337 set_last_found_page(
338 first_bitmap_in_same_page((bitmap_t *)bitmap));
339 mark_bitmap_as_full_if_still_full(supermap, b, bitmap);
340 return continuation_address(magazine, s, b, index);
341 }
342 }
343 }
344 return NULL;
345 }
346
347 DISPATCH_NOINLINE
348 static void
349 _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr)
350 {
351 #if HAVE_MACH
352 kern_return_t kr;
353 mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE;
354 mach_vm_offset_t vm_mask = ~MAGAZINE_MASK;
355 mach_vm_address_t vm_addr = vm_page_size;
356 while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size,
357 vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH),
358 MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
359 VM_INHERIT_DEFAULT))) {
360 if (kr != KERN_NO_SPACE) {
361 (void)dispatch_assume_zero(kr);
362 DISPATCH_CLIENT_CRASH("Could not allocate heap");
363 }
364 _dispatch_temporary_resource_shortage();
365 vm_addr = vm_page_size;
366 }
367 uintptr_t aligned_region = (uintptr_t)vm_addr;
368 #else // HAVE_MACH
369 const size_t region_sz = (1 + MAGAZINES_PER_HEAP) * BYTES_PER_MAGAZINE;
370 void *region_p;
371 while (!dispatch_assume((region_p = mmap(NULL, region_sz,
372 PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE,
373 VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), 0)) != MAP_FAILED)) {
374 _dispatch_temporary_resource_shortage();
375 }
376 uintptr_t region = (uintptr_t)region_p;
377 uintptr_t region_end = region + region_sz;
378 uintptr_t aligned_region, aligned_region_end;
379 uintptr_t bottom_slop_len, top_slop_len;
380 // Realign if needed; find the slop at top/bottom to unmap
381 if ((region & ~(MAGAZINE_MASK)) == 0) {
382 bottom_slop_len = 0;
383 aligned_region = region;
384 aligned_region_end = region_end - BYTES_PER_MAGAZINE;
385 top_slop_len = BYTES_PER_MAGAZINE;
386 } else {
387 aligned_region = (region & MAGAZINE_MASK) + BYTES_PER_MAGAZINE;
388 aligned_region_end = aligned_region +
389 (MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
390 bottom_slop_len = aligned_region - region;
391 top_slop_len = BYTES_PER_MAGAZINE - bottom_slop_len;
392 }
393 #if DISPATCH_DEBUG
394 // Double-check our math.
395 dispatch_assert(aligned_region % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
396 dispatch_assert(aligned_region % vm_kernel_page_size == 0);
397 dispatch_assert(aligned_region_end % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
398 dispatch_assert(aligned_region_end % vm_kernel_page_size == 0);
399 dispatch_assert(aligned_region_end > aligned_region);
400 dispatch_assert(top_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
401 dispatch_assert(bottom_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
402 dispatch_assert(aligned_region_end + top_slop_len == region_end);
403 dispatch_assert(region + bottom_slop_len == aligned_region);
404 dispatch_assert(region_sz == bottom_slop_len + top_slop_len +
405 MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
406 if (bottom_slop_len) {
407 (void)dispatch_assume_zero(mprotect((void *)region, bottom_slop_len,
408 PROT_NONE));
409 }
410 if (top_slop_len) {
411 (void)dispatch_assume_zero(mprotect((void *)aligned_region_end,
412 top_slop_len, PROT_NONE));
413 }
414 #else
415 if (bottom_slop_len) {
416 (void)dispatch_assume_zero(munmap((void *)region, bottom_slop_len));
417 }
418 if (top_slop_len) {
419 (void)dispatch_assume_zero(munmap((void *)aligned_region_end,
420 top_slop_len));
421 }
422 #endif // DISPATCH_DEBUG
423 #endif // HAVE_MACH
424
425 if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region,
426 relaxed)) {
427 // If we lost the race to link in the new region, unmap the whole thing.
428 #if DISPATCH_DEBUG
429 (void)dispatch_assume_zero(mprotect((void *)aligned_region,
430 MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE, PROT_NONE));
431 #else
432 (void)dispatch_assume_zero(munmap((void *)aligned_region,
433 MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE));
434 #endif
435 }
436 }
437
438 DISPATCH_NOINLINE
439 static dispatch_continuation_t
440 _dispatch_alloc_continuation_from_heap(dispatch_heap_t heap)
441 {
442 dispatch_continuation_t cont;
443
444 unsigned int cpu_number = _dispatch_cpu_number();
445 #ifdef DISPATCH_DEBUG
446 dispatch_assert(cpu_number < NUM_CPU);
447 #endif
448
449 #if PACK_FIRST_PAGE_WITH_CONTINUATIONS
450 // First try the continuations in the first page for this CPU
451 cont = alloc_continuation_from_first_page(&(heap[cpu_number]));
452 if (fastpath(cont)) {
453 return cont;
454 }
455 #endif
456 // Next, try the rest of the magazine for this CPU
457 cont = alloc_continuation_from_magazine(&(heap[cpu_number]));
458 return cont;
459 }
460
461 DISPATCH_NOINLINE
462 static dispatch_continuation_t
463 _dispatch_alloc_continuation_from_heap_slow(void)
464 {
465 dispatch_heap_t *heap = &_dispatch_main_heap;
466 dispatch_continuation_t cont;
467
468 for (;;) {
469 if (!fastpath(*heap)) {
470 _dispatch_alloc_try_create_heap(heap);
471 }
472 cont = _dispatch_alloc_continuation_from_heap(*heap);
473 if (fastpath(cont)) {
474 return cont;
475 }
476 // If we have tuned our parameters right, 99.999% of apps should
477 // never reach this point! The ones that do have gone off the rails...
478 //
479 // Magazine is full? Onto the next heap!
480 // We tried 'stealing' from other CPUs' magazines. The net effect
481 // was worse performance from more wasted search time and more
482 // cache contention.
483
484 // rdar://11378331
485 // Future optimization: start at the page we last used, start
486 // in the *zone* we last used. But this would only improve deeply
487 // pathological cases like dispatch_starfish
488 heap = &(*heap)->header.dh_next;
489 }
490 }
491
492 DISPATCH_ALLOC_NOINLINE
493 static dispatch_continuation_t
494 _dispatch_alloc_continuation_alloc(void)
495 {
496 dispatch_continuation_t cont;
497
498 if (fastpath(_dispatch_main_heap)) {
499 // Start looking in the same page where we found a continuation
500 // last time.
501 bitmap_t *last = last_found_page();
502 if (fastpath(last)) {
503 unsigned int i;
504 for (i = 0; i < BITMAPS_PER_PAGE; i++) {
505 bitmap_t *cur = last + i;
506 unsigned int index = bitmap_set_first_unset_bit(cur);
507 if (fastpath(index != NO_BITS_WERE_UNSET)) {
508 bitmap_t *supermap;
509 unsigned int bindex;
510 get_cont_and_indices_for_bitmap_and_index(cur,
511 index, &cont, &supermap, &bindex);
512 mark_bitmap_as_full_if_still_full(supermap, bindex,
513 cur);
514 return cont;
515 }
516 }
517 }
518
519 cont = _dispatch_alloc_continuation_from_heap(_dispatch_main_heap);
520 if (fastpath(cont)) {
521 return cont;
522 }
523 }
524 return _dispatch_alloc_continuation_from_heap_slow();
525 }
526
527 #pragma mark -
528 #pragma mark dispatch_alloc_continuation_free
529
530 DISPATCH_NOINLINE
531 static void
532 _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c)
533 {
534 void *page = madvisable_page_base_for_continuation(c);
535 if (!page) {
536 // page can't be madvised; maybe it contains non-continuations
537 return;
538 }
539 // Are all the continuations in this page unallocated?
540 volatile bitmap_t *page_bitmaps;
541 get_maps_and_indices_for_continuation((dispatch_continuation_t)page, NULL,
542 NULL, (bitmap_t **)&page_bitmaps, NULL);
543 unsigned int i;
544 for (i = 0; i < BITMAPS_PER_PAGE; i++) {
545 if (page_bitmaps[i] != 0) {
546 return;
547 }
548 }
549 // They are all unallocated, so we could madvise the page. Try to
550 // take ownership of them all.
551 int last_locked = 0;
552 do {
553 if (!dispatch_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0),
554 BITMAP_ALL_ONES, relaxed)) {
555 // We didn't get one; since there is a cont allocated in
556 // the page, we can't madvise. Give up and unlock all.
557 goto unlock;
558 }
559 } while (++last_locked < (signed)BITMAPS_PER_PAGE);
560 #if DISPATCH_DEBUG
561 //fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), "
562 // "[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next,
563 // last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]);
564 // Scribble to expose use-after-free bugs
565 // madvise (syscall) flushes these stores
566 memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE);
567 #endif
568 (void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE,
569 MADV_FREE));
570
571 unlock:
572 while (last_locked > 1) {
573 page_bitmaps[--last_locked] = BITMAP_C(0);
574 }
575 if (last_locked) {
576 dispatch_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed);
577 }
578 return;
579 }
580
581 DISPATCH_ALLOC_NOINLINE
582 static void
583 _dispatch_alloc_continuation_free(dispatch_continuation_t c)
584 {
585 bitmap_t *b, *s;
586 unsigned int b_idx, idx;
587
588 get_maps_and_indices_for_continuation(c, &s, &b_idx, &b, &idx);
589 bool bitmap_now_empty = bitmap_clear_bit(b, idx, CLEAR_EXCLUSIVELY);
590 if (slowpath(s)) {
591 (void)bitmap_clear_bit(s, b_idx, CLEAR_NONEXCLUSIVELY);
592 }
593 // We only try to madvise(2) pages outside of the first page.
594 // (Allocations in the first page do not have a supermap entry.)
595 if (slowpath(bitmap_now_empty) && slowpath(s)) {
596 return _dispatch_alloc_maybe_madvise_page(c);
597 }
598 }
599
600 #pragma mark -
601 #pragma mark dispatch_alloc_init
602
603 #if DISPATCH_DEBUG
604 static void
605 _dispatch_alloc_init(void)
606 {
607 // Double-check our math. These are all compile time checks and don't
608 // generate code.
609
610 dispatch_assert(sizeof(bitmap_t) == BYTES_PER_BITMAP);
611 dispatch_assert(sizeof(bitmap_t) == BYTES_PER_SUPERMAP);
612 dispatch_assert(sizeof(struct dispatch_magazine_header_s) ==
613 SIZEOF_HEADER);
614
615 dispatch_assert(sizeof(struct dispatch_continuation_s) <=
616 DISPATCH_CONTINUATION_SIZE);
617
618 // Magazines should be the right size, so they pack neatly into an array of
619 // heaps.
620 dispatch_assert(sizeof(struct dispatch_magazine_s) == BYTES_PER_MAGAZINE);
621
622 // The header and maps sizes should match what we computed.
623 dispatch_assert(SIZEOF_HEADER ==
624 sizeof(((struct dispatch_magazine_s *)0x0)->header));
625 dispatch_assert(SIZEOF_MAPS ==
626 sizeof(((struct dispatch_magazine_s *)0x0)->maps));
627
628 // The main array of continuations should start at the second page,
629 // self-aligned.
630 dispatch_assert(offsetof(struct dispatch_magazine_s, conts) %
631 (CONTINUATIONS_PER_BITMAP * DISPATCH_CONTINUATION_SIZE) == 0);
632 dispatch_assert(offsetof(struct dispatch_magazine_s, conts) ==
633 DISPATCH_ALLOCATOR_PAGE_SIZE);
634
635 #if PACK_FIRST_PAGE_WITH_CONTINUATIONS
636 // The continuations in the first page should actually fit within the first
637 // page.
638 dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) <
639 DISPATCH_ALLOCATOR_PAGE_SIZE);
640 dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) %
641 DISPATCH_CONTINUATION_SIZE == 0);
642 dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) +
643 sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) ==
644 DISPATCH_ALLOCATOR_PAGE_SIZE);
645 #endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
646 // Make sure our alignment will be correct: that is, that we are correctly
647 // aligning to both.
648 dispatch_assert(ROUND_UP_TO_BITMAP_ALIGNMENT(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
649 ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
650 dispatch_assert(ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
651 ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
652 }
653 #elif (DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC) \
654 || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE)
655 static inline void _dispatch_alloc_init(void) {}
656 #endif
657
658 #endif // DISPATCH_ALLOCATOR
659
660 #pragma mark -
661 #pragma mark dispatch_malloc
662
663 #if DISPATCH_CONTINUATION_MALLOC
664
665 #if DISPATCH_USE_MALLOCZONE
666 static malloc_zone_t *_dispatch_ccache_zone;
667
668 #define calloc(n, s) malloc_zone_calloc(_dispatch_ccache_zone, (n), (s))
669 #define free(c) malloc_zone_free(_dispatch_ccache_zone, (c))
670
671 static void
672 _dispatch_malloc_init(void)
673 {
674 _dispatch_ccache_zone = malloc_create_zone(0, 0);
675 dispatch_assert(_dispatch_ccache_zone);
676 malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations");
677 }
678 #else
679 static inline void _dispatch_malloc_init(void) {}
680 #endif // DISPATCH_USE_MALLOCZONE
681
682 static dispatch_continuation_t
683 _dispatch_malloc_continuation_alloc(void)
684 {
685 dispatch_continuation_t dc;
686 while (!(dc = fastpath(calloc(1,
687 ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) {
688 _dispatch_temporary_resource_shortage();
689 }
690 return dc;
691 }
692
693 static inline void
694 _dispatch_malloc_continuation_free(dispatch_continuation_t c)
695 {
696 free(c);
697 }
698 #endif // DISPATCH_CONTINUATION_MALLOC
699
700 #pragma mark -
701 #pragma mark dispatch_continuation_alloc
702
703 #if DISPATCH_ALLOCATOR
704 #if DISPATCH_CONTINUATION_MALLOC
705 #if DISPATCH_USE_NANOZONE
706 extern boolean_t malloc_engaged_nano(void);
707 #else
708 #define malloc_engaged_nano() false
709 #endif // DISPATCH_USE_NANOZONE
710 static int _dispatch_use_dispatch_alloc;
711 #else
712 #define _dispatch_use_dispatch_alloc 1
713 #endif // DISPATCH_CONTINUATION_MALLOC
714 #endif // DISPATCH_ALLOCATOR
715
716 #if (DISPATCH_ALLOCATOR && (DISPATCH_CONTINUATION_MALLOC || DISPATCH_DEBUG)) \
717 || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE)
718 static void
719 _dispatch_continuation_alloc_init(void *ctxt DISPATCH_UNUSED)
720 {
721 #if DISPATCH_ALLOCATOR
722 #if DISPATCH_CONTINUATION_MALLOC
723 bool use_dispatch_alloc = !malloc_engaged_nano();
724 char *e = getenv("LIBDISPATCH_CONTINUATION_ALLOCATOR");
725 if (e) {
726 use_dispatch_alloc = atoi(e);
727 }
728 _dispatch_use_dispatch_alloc = use_dispatch_alloc;
729 #endif // DISPATCH_CONTINUATION_MALLOC
730 if (_dispatch_use_dispatch_alloc)
731 return _dispatch_alloc_init();
732 #endif // DISPATCH_ALLOCATOR
733 #if DISPATCH_CONTINUATION_MALLOC
734 return _dispatch_malloc_init();
735 #endif // DISPATCH_ALLOCATOR
736 }
737
738 static void
739 _dispatch_continuation_alloc_once()
740 {
741 static dispatch_once_t pred;
742 dispatch_once_f(&pred, NULL, _dispatch_continuation_alloc_init);
743 }
744 #else
745 static inline void _dispatch_continuation_alloc_once(void) {}
746 #endif // DISPATCH_ALLOCATOR ... || DISPATCH_CONTINUATION_MALLOC ...
747
748 dispatch_continuation_t
749 _dispatch_continuation_alloc_from_heap(void)
750 {
751 _dispatch_continuation_alloc_once();
752 #if DISPATCH_ALLOCATOR
753 if (_dispatch_use_dispatch_alloc)
754 return _dispatch_alloc_continuation_alloc();
755 #endif
756 #if DISPATCH_CONTINUATION_MALLOC
757 return _dispatch_malloc_continuation_alloc();
758 #endif
759 }
760
761 void
762 _dispatch_continuation_free_to_heap(dispatch_continuation_t c)
763 {
764 #if DISPATCH_ALLOCATOR
765 if (_dispatch_use_dispatch_alloc)
766 return _dispatch_alloc_continuation_free(c);
767 #endif
768 #if DISPATCH_CONTINUATION_MALLOC
769 return _dispatch_malloc_continuation_free(c);
770 #endif
771 }
772