2 * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
22 #include "allocator_internal.h"
24 #if DISPATCH_ALLOCATOR
26 #ifndef VM_MEMORY_LIBDISPATCH
27 #define VM_MEMORY_LIBDISPATCH 74
30 // _dispatch_main_heap is is the first heap in the linked list, where searches
33 // _dispatch_main_heap, and dh_next, are read normally but only written (in
34 // try_create_heap) by cmpxchg. They start life at 0, and are only written
35 // once to non-zero. They are not marked volatile. There is a small risk that
36 // some thread may see a stale 0 value and enter try_create_heap. It will
37 // waste some time in an allocate syscall, but eventually it will try to
38 // cmpxchg, expecting to overwrite 0 with an address. This will fail
39 // (because another thread already did this), the thread will deallocate the
40 // unused allocated memory, and continue with the new value.
42 // If something goes wrong here, the symptom would be a NULL dereference
43 // in alloc_continuation_from_heap or _magazine when derefing the magazine ptr.
44 static dispatch_heap_t _dispatch_main_heap
;
46 DISPATCH_ALWAYS_INLINE
48 set_last_found_page(bitmap_t
*val
)
50 dispatch_assert(_dispatch_main_heap
);
51 unsigned int cpu
= _dispatch_cpu_number();
52 _dispatch_main_heap
[cpu
].header
.last_found_page
= val
;
55 DISPATCH_ALWAYS_INLINE
59 dispatch_assert(_dispatch_main_heap
);
60 unsigned int cpu
= _dispatch_cpu_number();
61 return _dispatch_main_heap
[cpu
].header
.last_found_page
;
65 #pragma mark dispatch_alloc_bitmaps
67 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
69 supermap_address(struct dispatch_magazine_s
*magazine
, unsigned int supermap
)
71 return &magazine
->supermaps
[supermap
];
74 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
76 bitmap_address(struct dispatch_magazine_s
*magazine
, unsigned int supermap
,
79 return &magazine
->maps
[supermap
][map
];
82 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
83 static dispatch_continuation_t
84 continuation_address(struct dispatch_magazine_s
*magazine
,
85 unsigned int supermap
, unsigned int map
, unsigned int index
)
88 dispatch_assert(supermap
< SUPERMAPS_PER_MAGAZINE
);
89 dispatch_assert(map
< BITMAPS_PER_SUPERMAP
);
90 dispatch_assert(index
< CONTINUATIONS_PER_BITMAP
);
92 return (dispatch_continuation_t
)&magazine
->conts
[supermap
][map
][index
];
95 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
96 static struct dispatch_magazine_s
*
97 magazine_for_continuation(dispatch_continuation_t c
)
99 return (struct dispatch_magazine_s
*)((uintptr_t)c
& MAGAZINE_MASK
);
102 DISPATCH_ALWAYS_INLINE_NDEBUG
104 get_cont_and_indices_for_bitmap_and_index(bitmap_t
*bitmap
,
105 unsigned int index
, dispatch_continuation_t
*continuation_out
,
106 bitmap_t
**supermap_out
, unsigned int *bitmap_index_out
)
108 // m_for_c wants a continuation not a bitmap, but it works because it
109 // just masks off the bottom bits of the address.
110 struct dispatch_magazine_s
*m
= magazine_for_continuation((void *)bitmap
);
111 unsigned int mindex
= (unsigned int)(bitmap
- m
->maps
[0]);
112 unsigned int bindex
= mindex
% BITMAPS_PER_SUPERMAP
;
113 unsigned int sindex
= mindex
/ BITMAPS_PER_SUPERMAP
;
114 dispatch_assert(&m
->maps
[sindex
][bindex
] == bitmap
);
115 if (fastpath(continuation_out
)) {
116 *continuation_out
= continuation_address(m
, sindex
, bindex
, index
);
118 if (fastpath(supermap_out
)) *supermap_out
= supermap_address(m
, sindex
);
119 if (fastpath(bitmap_index_out
)) *bitmap_index_out
= bindex
;
122 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
124 continuation_is_in_first_page(dispatch_continuation_t c
)
126 #if PACK_FIRST_PAGE_WITH_CONTINUATIONS
127 // (the base of c's magazine == the base of c's page)
128 // => c is in first page of magazine
129 return (((uintptr_t)c
& MAGAZINE_MASK
) ==
130 ((uintptr_t)c
& ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK
));
137 DISPATCH_ALWAYS_INLINE_NDEBUG
139 get_maps_and_indices_for_continuation(dispatch_continuation_t c
,
140 bitmap_t
**supermap_out
, unsigned int *bitmap_index_out
,
141 bitmap_t
**bitmap_out
, unsigned int *index_out
)
143 unsigned int cindex
, sindex
, index
, mindex
;
144 padded_continuation
*p
= (padded_continuation
*)c
;
145 struct dispatch_magazine_s
*m
= magazine_for_continuation(c
);
146 #if PACK_FIRST_PAGE_WITH_CONTINUATIONS
147 if (fastpath(continuation_is_in_first_page(c
))) {
148 cindex
= (unsigned int)(p
- m
->fp_conts
);
149 index
= cindex
% CONTINUATIONS_PER_BITMAP
;
150 mindex
= cindex
/ CONTINUATIONS_PER_BITMAP
;
151 if (fastpath(supermap_out
)) *supermap_out
= NULL
;
152 if (fastpath(bitmap_index_out
)) *bitmap_index_out
= mindex
;
153 if (fastpath(bitmap_out
)) *bitmap_out
= &m
->fp_maps
[mindex
];
154 if (fastpath(index_out
)) *index_out
= index
;
157 #endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
158 cindex
= (unsigned int)(p
- (padded_continuation
*)m
->conts
);
159 sindex
= cindex
/ (BITMAPS_PER_SUPERMAP
* CONTINUATIONS_PER_BITMAP
);
160 mindex
= (cindex
/ CONTINUATIONS_PER_BITMAP
) % BITMAPS_PER_SUPERMAP
;
161 index
= cindex
% CONTINUATIONS_PER_BITMAP
;
162 if (fastpath(supermap_out
)) *supermap_out
= &m
->supermaps
[sindex
];
163 if (fastpath(bitmap_index_out
)) *bitmap_index_out
= mindex
;
164 if (fastpath(bitmap_out
)) *bitmap_out
= &m
->maps
[sindex
][mindex
];
165 if (fastpath(index_out
)) *index_out
= index
;
168 // Base address of page, or NULL if this page shouldn't be madvise()d
169 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
171 madvisable_page_base_for_continuation(dispatch_continuation_t c
)
173 if (fastpath(continuation_is_in_first_page(c
))) {
176 void *page_base
= (void *)((uintptr_t)c
&
177 ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK
);
179 struct dispatch_magazine_s
*m
= magazine_for_continuation(c
);
180 if (slowpath(page_base
< (void *)&m
->conts
)) {
181 DISPATCH_INTERNAL_CRASH(page_base
, "madvisable continuation too low");
183 if (slowpath(page_base
> (void *)&m
->conts
[SUPERMAPS_PER_MAGAZINE
-1]
184 [BITMAPS_PER_SUPERMAP
-1][CONTINUATIONS_PER_BITMAP
-1])) {
185 DISPATCH_INTERNAL_CRASH(page_base
, "madvisable continuation too high");
191 // Bitmap that controls the first few continuations in the same page as
192 // the continuations controlled by the passed bitmap. Undefined results if the
193 // passed bitmap controls continuations in the first page.
194 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
196 first_bitmap_in_same_page(bitmap_t
*b
)
199 struct dispatch_magazine_s
*m
;
200 m
= magazine_for_continuation((void*)b
);
201 dispatch_assert(b
>= &m
->maps
[0][0]);
202 dispatch_assert(b
< &m
->maps
[SUPERMAPS_PER_MAGAZINE
]
203 [BITMAPS_PER_SUPERMAP
]);
205 const uintptr_t PAGE_BITMAP_MASK
= (BITMAPS_PER_PAGE
*
206 BYTES_PER_BITMAP
) - 1;
207 return (bitmap_t
*)((uintptr_t)b
& ~PAGE_BITMAP_MASK
);
210 DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
212 bitmap_is_full(bitmap_t bits
)
214 return (bits
== BITMAP_ALL_ONES
);
217 #define NO_BITS_WERE_UNSET (UINT_MAX)
219 // max_index is the 0-based position of the most significant bit that is
220 // allowed to be set.
221 DISPATCH_ALWAYS_INLINE_NDEBUG
223 bitmap_set_first_unset_bit_upto_index(volatile bitmap_t
*bitmap
,
224 unsigned int max_index
)
226 // No barriers needed in acquire path: the just-allocated
227 // continuation is "uninitialized", so the caller shouldn't
228 // load from it before storing, so we don't need to guard
229 // against reordering those loads.
230 dispatch_assert(sizeof(*bitmap
) == sizeof(unsigned long));
231 return os_atomic_set_first_bit(bitmap
, max_index
);
234 DISPATCH_ALWAYS_INLINE
236 bitmap_set_first_unset_bit(volatile bitmap_t
*bitmap
)
238 return bitmap_set_first_unset_bit_upto_index(bitmap
, UINT_MAX
);
241 #define CLEAR_EXCLUSIVELY true
242 #define CLEAR_NONEXCLUSIVELY false
244 // Return true if this bit was the last in the bitmap, and it is now all zeroes
245 DISPATCH_ALWAYS_INLINE_NDEBUG
247 bitmap_clear_bit(volatile bitmap_t
*bitmap
, unsigned int index
,
251 dispatch_assert(index
< CONTINUATIONS_PER_BITMAP
);
253 const bitmap_t mask
= BITMAP_C(1) << index
;
256 if (exclusively
== CLEAR_EXCLUSIVELY
) {
257 if (slowpath((*bitmap
& mask
) == 0)) {
258 DISPATCH_CLIENT_CRASH(*bitmap
,
259 "Corruption: failed to clear bit exclusively");
264 b
= os_atomic_and(bitmap
, ~mask
, release
);
268 DISPATCH_ALWAYS_INLINE_NDEBUG
270 mark_bitmap_as_full_if_still_full(volatile bitmap_t
*supermap
,
271 unsigned int bitmap_index
, volatile bitmap_t
*bitmap
)
274 dispatch_assert(bitmap_index
< BITMAPS_PER_SUPERMAP
);
276 const bitmap_t mask
= BITMAP_C(1) << bitmap_index
;
277 bitmap_t s
, s_new
, s_masked
;
279 if (!bitmap_is_full(*bitmap
)) {
284 // No barriers because supermaps are only advisory, they
285 // don't protect access to other memory.
288 if (os_atomic_cmpxchgvw(supermap
, s
, s_masked
, &s_new
, relaxed
) ||
289 !bitmap_is_full(*bitmap
)) {
296 #pragma mark dispatch_alloc_continuation_alloc
298 #if PACK_FIRST_PAGE_WITH_CONTINUATIONS
299 DISPATCH_ALWAYS_INLINE_NDEBUG
300 static dispatch_continuation_t
301 alloc_continuation_from_first_page(struct dispatch_magazine_s
*magazine
)
303 unsigned int i
, index
, continuation_index
;
305 // TODO: unroll if this is hot?
306 for (i
= 0; i
< FULL_BITMAPS_IN_FIRST_PAGE
; i
++) {
307 index
= bitmap_set_first_unset_bit(&magazine
->fp_maps
[i
]);
308 if (fastpath(index
!= NO_BITS_WERE_UNSET
)) goto found
;
310 if (REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE
) {
311 index
= bitmap_set_first_unset_bit_upto_index(&magazine
->fp_maps
[i
],
312 REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE
- 1);
313 if (fastpath(index
!= NO_BITS_WERE_UNSET
)) goto found
;
318 continuation_index
= (i
* CONTINUATIONS_PER_BITMAP
) + index
;
319 return (dispatch_continuation_t
)&magazine
->fp_conts
[continuation_index
];
321 #endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
323 DISPATCH_ALWAYS_INLINE_NDEBUG
324 static dispatch_continuation_t
325 alloc_continuation_from_magazine(struct dispatch_magazine_s
*magazine
)
327 unsigned int s
, b
, index
;
329 for (s
= 0; s
< SUPERMAPS_PER_MAGAZINE
; s
++) {
330 volatile bitmap_t
*supermap
= supermap_address(magazine
, s
);
331 if (bitmap_is_full(*supermap
)) {
334 for (b
= 0; b
< BITMAPS_PER_SUPERMAP
; b
++) {
335 volatile bitmap_t
*bitmap
= bitmap_address(magazine
, s
, b
);
336 index
= bitmap_set_first_unset_bit(bitmap
);
337 if (index
!= NO_BITS_WERE_UNSET
) {
339 first_bitmap_in_same_page((bitmap_t
*)bitmap
));
340 mark_bitmap_as_full_if_still_full(supermap
, b
, bitmap
);
341 return continuation_address(magazine
, s
, b
, index
);
350 _dispatch_alloc_try_create_heap(dispatch_heap_t
*heap_ptr
)
354 mach_vm_size_t vm_size
= MAGAZINES_PER_HEAP
* BYTES_PER_MAGAZINE
;
355 mach_vm_offset_t vm_mask
= ~MAGAZINE_MASK
;
356 mach_vm_address_t vm_addr
= vm_page_size
;
357 while (slowpath(kr
= mach_vm_map(mach_task_self(), &vm_addr
, vm_size
,
358 vm_mask
, VM_FLAGS_ANYWHERE
| VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH
),
359 MEMORY_OBJECT_NULL
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
360 VM_INHERIT_DEFAULT
))) {
361 if (kr
!= KERN_NO_SPACE
) {
362 DISPATCH_CLIENT_CRASH(kr
, "Could not allocate heap");
364 _dispatch_temporary_resource_shortage();
365 vm_addr
= vm_page_size
;
367 uintptr_t aligned_region
= (uintptr_t)vm_addr
;
369 const size_t region_sz
= (1 + MAGAZINES_PER_HEAP
) * BYTES_PER_MAGAZINE
;
371 while (!dispatch_assume((region_p
= mmap(NULL
, region_sz
,
372 PROT_READ
|PROT_WRITE
, MAP_ANON
| MAP_PRIVATE
,
373 VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH
), 0)) != MAP_FAILED
)) {
374 _dispatch_temporary_resource_shortage();
376 uintptr_t region
= (uintptr_t)region_p
;
377 uintptr_t region_end
= region
+ region_sz
;
378 uintptr_t aligned_region
, aligned_region_end
;
379 uintptr_t bottom_slop_len
, top_slop_len
;
380 // Realign if needed; find the slop at top/bottom to unmap
381 if ((region
& ~(MAGAZINE_MASK
)) == 0) {
383 aligned_region
= region
;
384 aligned_region_end
= region_end
- BYTES_PER_MAGAZINE
;
385 top_slop_len
= BYTES_PER_MAGAZINE
;
387 aligned_region
= (region
& MAGAZINE_MASK
) + BYTES_PER_MAGAZINE
;
388 aligned_region_end
= aligned_region
+
389 (MAGAZINES_PER_HEAP
* BYTES_PER_MAGAZINE
);
390 bottom_slop_len
= aligned_region
- region
;
391 top_slop_len
= BYTES_PER_MAGAZINE
- bottom_slop_len
;
394 // Double-check our math.
395 dispatch_assert(aligned_region
% DISPATCH_ALLOCATOR_PAGE_SIZE
== 0);
396 dispatch_assert(aligned_region
% vm_kernel_page_size
== 0);
397 dispatch_assert(aligned_region_end
% DISPATCH_ALLOCATOR_PAGE_SIZE
== 0);
398 dispatch_assert(aligned_region_end
% vm_kernel_page_size
== 0);
399 dispatch_assert(aligned_region_end
> aligned_region
);
400 dispatch_assert(top_slop_len
% DISPATCH_ALLOCATOR_PAGE_SIZE
== 0);
401 dispatch_assert(bottom_slop_len
% DISPATCH_ALLOCATOR_PAGE_SIZE
== 0);
402 dispatch_assert(aligned_region_end
+ top_slop_len
== region_end
);
403 dispatch_assert(region
+ bottom_slop_len
== aligned_region
);
404 dispatch_assert(region_sz
== bottom_slop_len
+ top_slop_len
+
405 MAGAZINES_PER_HEAP
* BYTES_PER_MAGAZINE
);
406 if (bottom_slop_len
) {
407 (void)dispatch_assume_zero(mprotect((void *)region
, bottom_slop_len
,
411 (void)dispatch_assume_zero(mprotect((void *)aligned_region_end
,
412 top_slop_len
, PROT_NONE
));
415 if (bottom_slop_len
) {
416 (void)dispatch_assume_zero(munmap((void *)region
, bottom_slop_len
));
419 (void)dispatch_assume_zero(munmap((void *)aligned_region_end
,
422 #endif // DISPATCH_DEBUG
425 if (!os_atomic_cmpxchg(heap_ptr
, NULL
, (void *)aligned_region
,
427 // If we lost the race to link in the new region, unmap the whole thing.
429 (void)dispatch_assume_zero(mprotect((void *)aligned_region
,
430 MAGAZINES_PER_HEAP
* BYTES_PER_MAGAZINE
, PROT_NONE
));
432 (void)dispatch_assume_zero(munmap((void *)aligned_region
,
433 MAGAZINES_PER_HEAP
* BYTES_PER_MAGAZINE
));
439 static dispatch_continuation_t
440 _dispatch_alloc_continuation_from_heap(dispatch_heap_t heap
)
442 dispatch_continuation_t cont
;
444 unsigned int cpu_number
= _dispatch_cpu_number();
445 #ifdef DISPATCH_DEBUG
446 dispatch_assert(cpu_number
< NUM_CPU
);
449 #if PACK_FIRST_PAGE_WITH_CONTINUATIONS
450 // First try the continuations in the first page for this CPU
451 cont
= alloc_continuation_from_first_page(&(heap
[cpu_number
]));
452 if (fastpath(cont
)) {
456 // Next, try the rest of the magazine for this CPU
457 cont
= alloc_continuation_from_magazine(&(heap
[cpu_number
]));
462 static dispatch_continuation_t
463 _dispatch_alloc_continuation_from_heap_slow(void)
465 dispatch_heap_t
*heap
= &_dispatch_main_heap
;
466 dispatch_continuation_t cont
;
469 if (!fastpath(*heap
)) {
470 _dispatch_alloc_try_create_heap(heap
);
472 cont
= _dispatch_alloc_continuation_from_heap(*heap
);
473 if (fastpath(cont
)) {
476 // If we have tuned our parameters right, 99.999% of apps should
477 // never reach this point! The ones that do have gone off the rails...
479 // Magazine is full? Onto the next heap!
480 // We tried 'stealing' from other CPUs' magazines. The net effect
481 // was worse performance from more wasted search time and more
485 // Future optimization: start at the page we last used, start
486 // in the *zone* we last used. But this would only improve deeply
487 // pathological cases like dispatch_starfish
488 heap
= &(*heap
)->header
.dh_next
;
492 DISPATCH_ALLOC_NOINLINE
493 static dispatch_continuation_t
494 _dispatch_alloc_continuation_alloc(void)
496 dispatch_continuation_t cont
;
498 if (fastpath(_dispatch_main_heap
)) {
499 // Start looking in the same page where we found a continuation
501 bitmap_t
*last
= last_found_page();
502 if (fastpath(last
)) {
504 for (i
= 0; i
< BITMAPS_PER_PAGE
; i
++) {
505 bitmap_t
*cur
= last
+ i
;
506 unsigned int index
= bitmap_set_first_unset_bit(cur
);
507 if (fastpath(index
!= NO_BITS_WERE_UNSET
)) {
510 get_cont_and_indices_for_bitmap_and_index(cur
,
511 index
, &cont
, &supermap
, &bindex
);
512 mark_bitmap_as_full_if_still_full(supermap
, bindex
,
519 cont
= _dispatch_alloc_continuation_from_heap(_dispatch_main_heap
);
520 if (fastpath(cont
)) {
524 return _dispatch_alloc_continuation_from_heap_slow();
528 #pragma mark dispatch_alloc_continuation_free
532 _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c
)
534 void *page
= madvisable_page_base_for_continuation(c
);
536 // page can't be madvised; maybe it contains non-continuations
539 // Are all the continuations in this page unallocated?
540 volatile bitmap_t
*page_bitmaps
;
541 get_maps_and_indices_for_continuation((dispatch_continuation_t
)page
, NULL
,
542 NULL
, (bitmap_t
**)&page_bitmaps
, NULL
);
544 for (i
= 0; i
< BITMAPS_PER_PAGE
; i
++) {
545 if (page_bitmaps
[i
] != 0) {
549 // They are all unallocated, so we could madvise the page. Try to
550 // take ownership of them all.
553 if (!os_atomic_cmpxchg(&page_bitmaps
[last_locked
], BITMAP_C(0),
554 BITMAP_ALL_ONES
, relaxed
)) {
555 // We didn't get one; since there is a cont allocated in
556 // the page, we can't madvise. Give up and unlock all.
559 } while (++last_locked
< (signed)BITMAPS_PER_PAGE
);
561 //fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), "
562 // "[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next,
563 // last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]);
564 // Scribble to expose use-after-free bugs
565 // madvise (syscall) flushes these stores
566 memset(page
, DISPATCH_ALLOCATOR_SCRIBBLE
, DISPATCH_ALLOCATOR_PAGE_SIZE
);
568 (void)dispatch_assume_zero(madvise(page
, DISPATCH_ALLOCATOR_PAGE_SIZE
,
572 while (last_locked
> 1) {
573 page_bitmaps
[--last_locked
] = BITMAP_C(0);
576 os_atomic_store(&page_bitmaps
[0], BITMAP_C(0), relaxed
);
581 DISPATCH_ALLOC_NOINLINE
583 _dispatch_alloc_continuation_free(dispatch_continuation_t c
)
586 unsigned int b_idx
, idx
;
588 get_maps_and_indices_for_continuation(c
, &s
, &b_idx
, &b
, &idx
);
589 bool bitmap_now_empty
= bitmap_clear_bit(b
, idx
, CLEAR_EXCLUSIVELY
);
591 (void)bitmap_clear_bit(s
, b_idx
, CLEAR_NONEXCLUSIVELY
);
593 // We only try to madvise(2) pages outside of the first page.
594 // (Allocations in the first page do not have a supermap entry.)
595 if (slowpath(bitmap_now_empty
) && slowpath(s
)) {
596 return _dispatch_alloc_maybe_madvise_page(c
);
601 #pragma mark dispatch_alloc_init
605 _dispatch_alloc_init(void)
607 // Double-check our math. These are all compile time checks and don't
610 dispatch_assert(sizeof(bitmap_t
) == BYTES_PER_BITMAP
);
611 dispatch_assert(sizeof(bitmap_t
) == BYTES_PER_SUPERMAP
);
612 dispatch_assert(sizeof(struct dispatch_magazine_header_s
) ==
615 dispatch_assert(sizeof(struct dispatch_continuation_s
) <=
616 DISPATCH_CONTINUATION_SIZE
);
618 // Magazines should be the right size, so they pack neatly into an array of
620 dispatch_assert(sizeof(struct dispatch_magazine_s
) == BYTES_PER_MAGAZINE
);
622 // The header and maps sizes should match what we computed.
623 dispatch_assert(SIZEOF_HEADER
==
624 sizeof(((struct dispatch_magazine_s
*)0x0)->header
));
625 dispatch_assert(SIZEOF_MAPS
==
626 sizeof(((struct dispatch_magazine_s
*)0x0)->maps
));
628 // The main array of continuations should start at the second page,
630 dispatch_assert(offsetof(struct dispatch_magazine_s
, conts
) %
631 (CONTINUATIONS_PER_BITMAP
* DISPATCH_CONTINUATION_SIZE
) == 0);
632 dispatch_assert(offsetof(struct dispatch_magazine_s
, conts
) ==
633 DISPATCH_ALLOCATOR_PAGE_SIZE
);
635 #if PACK_FIRST_PAGE_WITH_CONTINUATIONS
636 // The continuations in the first page should actually fit within the first
638 dispatch_assert(offsetof(struct dispatch_magazine_s
, fp_conts
) <
639 DISPATCH_ALLOCATOR_PAGE_SIZE
);
640 dispatch_assert(offsetof(struct dispatch_magazine_s
, fp_conts
) %
641 DISPATCH_CONTINUATION_SIZE
== 0);
642 dispatch_assert(offsetof(struct dispatch_magazine_s
, fp_conts
) +
643 sizeof(((struct dispatch_magazine_s
*)0x0)->fp_conts
) ==
644 DISPATCH_ALLOCATOR_PAGE_SIZE
);
645 #endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
646 // Make sure our alignment will be correct: that is, that we are correctly
648 dispatch_assert(ROUND_UP_TO_BITMAP_ALIGNMENT(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
649 ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
650 dispatch_assert(ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
651 ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
653 #elif (DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC) \
654 || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE)
655 static inline void _dispatch_alloc_init(void) {}
658 #endif // DISPATCH_ALLOCATOR
661 #pragma mark dispatch_malloc
663 #if DISPATCH_CONTINUATION_MALLOC
665 #if DISPATCH_USE_MALLOCZONE
666 static malloc_zone_t
*_dispatch_ccache_zone
;
668 #define calloc(n, s) malloc_zone_calloc(_dispatch_ccache_zone, (n), (s))
669 #define free(c) malloc_zone_free(_dispatch_ccache_zone, (c))
672 _dispatch_malloc_init(void)
674 _dispatch_ccache_zone
= malloc_create_zone(0, 0);
675 dispatch_assert(_dispatch_ccache_zone
);
676 malloc_set_zone_name(_dispatch_ccache_zone
, "DispatchContinuations");
679 #define _dispatch_malloc_init() ((void)0)
680 #endif // DISPATCH_USE_MALLOCZONE
682 static dispatch_continuation_t
683 _dispatch_malloc_continuation_alloc(void)
685 dispatch_continuation_t dc
;
686 while (!(dc
= fastpath(calloc(1,
687 ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc
)))))) {
688 _dispatch_temporary_resource_shortage();
694 _dispatch_malloc_continuation_free(dispatch_continuation_t c
)
698 #endif // DISPATCH_CONTINUATION_MALLOC
701 #pragma mark dispatch_continuation_alloc
703 #if DISPATCH_ALLOCATOR
704 #if DISPATCH_CONTINUATION_MALLOC
705 #if DISPATCH_USE_NANOZONE
706 extern boolean_t
malloc_engaged_nano(void);
708 #define malloc_engaged_nano() false
709 #endif // DISPATCH_USE_NANOZONE
710 static int _dispatch_use_dispatch_alloc
;
712 #define _dispatch_use_dispatch_alloc 1
713 #endif // DISPATCH_CONTINUATION_MALLOC
714 #endif // DISPATCH_ALLOCATOR
716 #if (DISPATCH_ALLOCATOR && (DISPATCH_CONTINUATION_MALLOC || DISPATCH_DEBUG)) \
717 || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE)
719 _dispatch_continuation_alloc_init(void *ctxt DISPATCH_UNUSED
)
721 #if DISPATCH_ALLOCATOR
722 #if DISPATCH_CONTINUATION_MALLOC
723 bool use_dispatch_alloc
= !malloc_engaged_nano();
724 char *e
= getenv("LIBDISPATCH_CONTINUATION_ALLOCATOR");
726 use_dispatch_alloc
= atoi(e
);
728 _dispatch_use_dispatch_alloc
= use_dispatch_alloc
;
729 #endif // DISPATCH_CONTINUATION_MALLOC
730 if (_dispatch_use_dispatch_alloc
)
731 return _dispatch_alloc_init();
732 #endif // DISPATCH_ALLOCATOR
733 #if DISPATCH_CONTINUATION_MALLOC
734 return _dispatch_malloc_init();
735 #endif // DISPATCH_ALLOCATOR
739 _dispatch_continuation_alloc_once()
741 static dispatch_once_t pred
;
742 dispatch_once_f(&pred
, NULL
, _dispatch_continuation_alloc_init
);
745 static inline void _dispatch_continuation_alloc_once(void) {}
746 #endif // DISPATCH_ALLOCATOR ... || DISPATCH_CONTINUATION_MALLOC ...
748 dispatch_continuation_t
749 _dispatch_continuation_alloc_from_heap(void)
751 _dispatch_continuation_alloc_once();
752 #if DISPATCH_ALLOCATOR
753 if (_dispatch_use_dispatch_alloc
)
754 return _dispatch_alloc_continuation_alloc();
756 #if DISPATCH_CONTINUATION_MALLOC
757 return _dispatch_malloc_continuation_alloc();
762 _dispatch_continuation_free_to_heap(dispatch_continuation_t c
)
764 #if DISPATCH_ALLOCATOR
765 if (_dispatch_use_dispatch_alloc
)
766 return _dispatch_alloc_continuation_free(c
);
768 #if DISPATCH_CONTINUATION_MALLOC
769 return _dispatch_malloc_continuation_free(c
);