]> git.saurik.com Git - apple/libdispatch.git/blobdiff - src/allocator.c
libdispatch-913.30.4.tar.gz
[apple/libdispatch.git] / src / allocator.c
index 7b4c165294d775e42e6b710022fe9ac0c40da923..e6ea77217854efe0c70c232ad193bd35ac1c2db7 100644 (file)
@@ -35,7 +35,7 @@
 // once to non-zero. They are not marked volatile. There is a small risk that
 // some thread may see a stale 0 value and enter try_create_heap. It will
 // waste some time in an allocate syscall, but eventually it will try to
-// cmpxchg, expecting to overwite 0 with an address. This will fail
+// cmpxchg, expecting to overwrite 0 with an address. This will fail
 // (because another thread already did this), the thread will deallocate the
 // unused allocated memory, and continue with the new value.
 //
@@ -127,7 +127,7 @@ continuation_is_in_first_page(dispatch_continuation_t c)
        // (the base of c's magazine == the base of c's page)
        // => c is in first page of magazine
        return (((uintptr_t)c & MAGAZINE_MASK) ==
-                       ((uintptr_t)c & ~(uintptr_t)PAGE_MASK));
+                       ((uintptr_t)c & ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK));
 #else
        (void)c;
        return false;
@@ -173,15 +173,16 @@ madvisable_page_base_for_continuation(dispatch_continuation_t c)
        if (fastpath(continuation_is_in_first_page(c))) {
                return NULL;
        }
-       void *page_base = (void *)((uintptr_t)c & ~(uintptr_t)PAGE_MASK);
+       void *page_base = (void *)((uintptr_t)c &
+                       ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK);
 #if DISPATCH_DEBUG
        struct dispatch_magazine_s *m = magazine_for_continuation(c);
        if (slowpath(page_base < (void *)&m->conts)) {
-               DISPATCH_CRASH("madvisable continuation too low");
+               DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too low");
        }
        if (slowpath(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1]
                        [BITMAPS_PER_SUPERMAP-1][CONTINUATIONS_PER_BITMAP-1])) {
-               DISPATCH_CRASH("madvisable continuation too high");
+               DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too high");
        }
 #endif
        return page_base;
@@ -226,13 +227,8 @@ bitmap_set_first_unset_bit_upto_index(volatile bitmap_t *bitmap,
        // continuation is "uninitialized", so the caller shouldn't
        // load from it before storing, so we don't need to guard
        // against reordering those loads.
-#if defined(__x86_64__) // TODO rdar://problem/11477843
-       dispatch_assert(sizeof(*bitmap) == sizeof(uint64_t));
-       return dispatch_atomic_set_first_bit((volatile uint64_t *)bitmap,max_index);
-#else
-       dispatch_assert(sizeof(*bitmap) == sizeof(uint32_t));
-       return dispatch_atomic_set_first_bit((volatile uint32_t *)bitmap,max_index);
-#endif
+       dispatch_assert(sizeof(*bitmap) == sizeof(unsigned long));
+       return os_atomic_set_first_bit(bitmap, max_index);
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -257,15 +253,15 @@ bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index,
        const bitmap_t mask = BITMAP_C(1) << index;
        bitmap_t b;
 
-       b = *bitmap;
        if (exclusively == CLEAR_EXCLUSIVELY) {
-               if (slowpath((b & mask) == 0)) {
-                       DISPATCH_CRASH("Corruption: failed to clear bit exclusively");
+               if (slowpath((*bitmap & mask) == 0)) {
+                       DISPATCH_CLIENT_CRASH(*bitmap,
+                                       "Corruption: failed to clear bit exclusively");
                }
        }
 
        // and-and-fetch
-       b = dispatch_atomic_and(bitmap, ~mask, release);
+       b = os_atomic_and(bitmap, ~mask, release);
        return b == 0;
 }
 
@@ -278,22 +274,16 @@ mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap,
        dispatch_assert(bitmap_index < BITMAPS_PER_SUPERMAP);
 #endif
        const bitmap_t mask = BITMAP_C(1) << bitmap_index;
-       bitmap_t s, s_new, s_masked;
+       bitmap_t s, s_new;
 
-       if (!bitmap_is_full(*bitmap)) {
-               return;
-       }
-       s_new = *supermap;
-       for (;;) {
-               // No barriers because supermaps are only advisory, they
-               // don't protect access to other memory.
-               s = s_new;
-               s_masked = s | mask;
-               if (dispatch_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) ||
-                               !bitmap_is_full(*bitmap)) {
-                       return;
+       // No barriers because supermaps are only advisory, they
+       // don't protect access to other memory.
+       os_atomic_rmw_loop(supermap, s, s_new, relaxed, {
+               if (!bitmap_is_full(*bitmap)) {
+                       os_atomic_rmw_loop_give_up(return);
                }
-       }
+               s_new = s | mask;
+       });
 }
 
 #pragma mark -
@@ -363,8 +353,7 @@ _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr)
                        MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
                        VM_INHERIT_DEFAULT))) {
                if (kr != KERN_NO_SPACE) {
-                       (void)dispatch_assume_zero(kr);
-                       DISPATCH_CLIENT_CRASH("Could not allocate heap");
+                       DISPATCH_CLIENT_CRASH(kr, "Could not allocate heap");
                }
                _dispatch_temporary_resource_shortage();
                vm_addr = vm_page_size;
@@ -397,11 +386,13 @@ _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr)
        }
 #if DISPATCH_DEBUG
        // Double-check our math.
-       dispatch_assert(aligned_region % PAGE_SIZE == 0);
-       dispatch_assert(aligned_region_end % PAGE_SIZE == 0);
+       dispatch_assert(aligned_region % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
+       dispatch_assert(aligned_region % vm_kernel_page_size == 0);
+       dispatch_assert(aligned_region_end % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
+       dispatch_assert(aligned_region_end % vm_kernel_page_size == 0);
        dispatch_assert(aligned_region_end > aligned_region);
-       dispatch_assert(top_slop_len % PAGE_SIZE == 0);
-       dispatch_assert(bottom_slop_len % PAGE_SIZE == 0);
+       dispatch_assert(top_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
+       dispatch_assert(bottom_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
        dispatch_assert(aligned_region_end + top_slop_len == region_end);
        dispatch_assert(region + bottom_slop_len == aligned_region);
        dispatch_assert(region_sz == bottom_slop_len + top_slop_len +
@@ -425,7 +416,7 @@ _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr)
 #endif // DISPATCH_DEBUG
 #endif // HAVE_MACH
 
-       if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region,
+       if (!os_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region,
                        relaxed)) {
                // If we lost the race to link in the new region, unmap the whole thing.
 #if DISPATCH_DEBUG
@@ -553,7 +544,7 @@ _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c)
        // take ownership of them all.
        int last_locked = 0;
        do {
-               if (!dispatch_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0),
+               if (!os_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0),
                                BITMAP_ALL_ONES, relaxed)) {
                        // We didn't get one; since there is a cont allocated in
                        // the page, we can't madvise. Give up and unlock all.
@@ -566,16 +557,17 @@ _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c)
        //              last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]);
        // Scribble to expose use-after-free bugs
        // madvise (syscall) flushes these stores
-       memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, PAGE_SIZE);
+       memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE);
 #endif
-       (void)dispatch_assume_zero(madvise(page, PAGE_SIZE, MADV_FREE));
+       (void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE,
+                       MADV_FREE));
 
 unlock:
        while (last_locked > 1) {
                page_bitmaps[--last_locked] = BITMAP_C(0);
        }
        if (last_locked) {
-               dispatch_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed);
+               os_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed);
        }
        return;
 }
@@ -631,19 +623,29 @@ _dispatch_alloc_init(void)
        // self-aligned.
        dispatch_assert(offsetof(struct dispatch_magazine_s, conts) %
                        (CONTINUATIONS_PER_BITMAP * DISPATCH_CONTINUATION_SIZE) == 0);
-       dispatch_assert(offsetof(struct dispatch_magazine_s, conts) == PAGE_SIZE);
+       dispatch_assert(offsetof(struct dispatch_magazine_s, conts) ==
+                       DISPATCH_ALLOCATOR_PAGE_SIZE);
 
 #if PACK_FIRST_PAGE_WITH_CONTINUATIONS
        // The continuations in the first page should actually fit within the first
        // page.
-       dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) < PAGE_SIZE);
+       dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) <
+                       DISPATCH_ALLOCATOR_PAGE_SIZE);
        dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) %
                        DISPATCH_CONTINUATION_SIZE == 0);
        dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) +
-                       sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) == PAGE_SIZE);
+                       sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) ==
+                                       DISPATCH_ALLOCATOR_PAGE_SIZE);
 #endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
-}
-#else
+       // Make sure our alignment will be correct: that is, that we are correctly
+       // aligning to both.
+       dispatch_assert(ROUND_UP_TO_BITMAP_ALIGNMENT(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
+                       ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
+       dispatch_assert(ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
+                       ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
+}
+#elif (DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC) \
+               || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE)
 static inline void _dispatch_alloc_init(void) {}
 #endif
 
@@ -668,7 +670,7 @@ _dispatch_malloc_init(void)
        malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations");
 }
 #else
-static inline void _dispatch_malloc_init(void) {}
+#define _dispatch_malloc_init() ((void)0)
 #endif // DISPATCH_USE_MALLOCZONE
 
 static dispatch_continuation_t
@@ -761,4 +763,3 @@ _dispatch_continuation_free_to_heap(dispatch_continuation_t c)
        return _dispatch_malloc_continuation_free(c);
 #endif
 }
-