// once to non-zero. They are not marked volatile. There is a small risk that
// some thread may see a stale 0 value and enter try_create_heap. It will
// waste some time in an allocate syscall, but eventually it will try to
-// cmpxchg, expecting to overwite 0 with an address. This will fail
+// cmpxchg, expecting to overwrite 0 with an address. This will fail
// (because another thread already did this), the thread will deallocate the
// unused allocated memory, and continue with the new value.
//
#if DISPATCH_DEBUG
struct dispatch_magazine_s *m = magazine_for_continuation(c);
if (slowpath(page_base < (void *)&m->conts)) {
- DISPATCH_CRASH("madvisable continuation too low");
+ DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too low");
}
if (slowpath(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1]
[BITMAPS_PER_SUPERMAP-1][CONTINUATIONS_PER_BITMAP-1])) {
- DISPATCH_CRASH("madvisable continuation too high");
+ DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too high");
}
#endif
return page_base;
// load from it before storing, so we don't need to guard
// against reordering those loads.
dispatch_assert(sizeof(*bitmap) == sizeof(unsigned long));
- return dispatch_atomic_set_first_bit(bitmap,max_index);
+ return os_atomic_set_first_bit(bitmap, max_index);
}
DISPATCH_ALWAYS_INLINE
if (exclusively == CLEAR_EXCLUSIVELY) {
if (slowpath((*bitmap & mask) == 0)) {
- DISPATCH_CRASH("Corruption: failed to clear bit exclusively");
+ DISPATCH_CLIENT_CRASH(*bitmap,
+ "Corruption: failed to clear bit exclusively");
}
}
// and-and-fetch
- b = dispatch_atomic_and(bitmap, ~mask, release);
+ b = os_atomic_and(bitmap, ~mask, release);
return b == 0;
}
dispatch_assert(bitmap_index < BITMAPS_PER_SUPERMAP);
#endif
const bitmap_t mask = BITMAP_C(1) << bitmap_index;
- bitmap_t s, s_new, s_masked;
+ bitmap_t s, s_new;
- if (!bitmap_is_full(*bitmap)) {
- return;
- }
- s_new = *supermap;
- for (;;) {
- // No barriers because supermaps are only advisory, they
- // don't protect access to other memory.
- s = s_new;
- s_masked = s | mask;
- if (dispatch_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) ||
- !bitmap_is_full(*bitmap)) {
- return;
+ // No barriers because supermaps are only advisory, they
+ // don't protect access to other memory.
+ os_atomic_rmw_loop(supermap, s, s_new, relaxed, {
+ if (!bitmap_is_full(*bitmap)) {
+ os_atomic_rmw_loop_give_up(return);
}
- }
+ s_new = s | mask;
+ });
}
#pragma mark -
MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT))) {
if (kr != KERN_NO_SPACE) {
- (void)dispatch_assume_zero(kr);
- DISPATCH_CLIENT_CRASH("Could not allocate heap");
+ DISPATCH_CLIENT_CRASH(kr, "Could not allocate heap");
}
_dispatch_temporary_resource_shortage();
vm_addr = vm_page_size;
#endif // DISPATCH_DEBUG
#endif // HAVE_MACH
- if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region,
+ if (!os_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region,
relaxed)) {
// If we lost the race to link in the new region, unmap the whole thing.
#if DISPATCH_DEBUG
// take ownership of them all.
int last_locked = 0;
do {
- if (!dispatch_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0),
+ if (!os_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0),
BITMAP_ALL_ONES, relaxed)) {
// We didn't get one; since there is a cont allocated in
// the page, we can't madvise. Give up and unlock all.
page_bitmaps[--last_locked] = BITMAP_C(0);
}
if (last_locked) {
- dispatch_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed);
+ os_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed);
}
return;
}
sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) ==
DISPATCH_ALLOCATOR_PAGE_SIZE);
#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
+ // Make sure our alignment will be correct: that is, that we are correctly
+ // aligning to both.
+ dispatch_assert(ROUND_UP_TO_BITMAP_ALIGNMENT(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
+ ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
+ dispatch_assert(ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
+ ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
}
#elif (DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC) \
|| (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE)
malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations");
}
#else
-static inline void _dispatch_malloc_init(void) {}
+#define _dispatch_malloc_init() ((void)0)
#endif // DISPATCH_USE_MALLOCZONE
static dispatch_continuation_t
return _dispatch_malloc_continuation_free(c);
#endif
}
-