+ block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ index = TINY_INDEX_FOR_PTR(ptr);
+ byte_index = index >> 3;
+
+ block_header += byte_index;
+ index &= 7;
+ *is_free = 0;
+ if (!BITMAP32_BIT(*block_header, index))
+ return 0;
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ if (!BITMAP32_BIT(*in_use, index)) {
+ *is_free = 1;
+ return get_tiny_free_size(ptr);
+ }
+ uint32_t *addr = (uint32_t *)((uintptr_t)block_header & ~3);
+ uint32_t word0 = OSReadLittleInt32(addr, 0) >> index;
+ uint32_t word1 = OSReadLittleInt32(addr, 4) << (8 - index);
+ uint32_t bits = (((uintptr_t)block_header & 3) * 8); // precision loss on LP64 OK here
+ uint32_t word = (word0 >> bits) | (word1 << (24 - bits));
+ uint32_t result = ffs(word >> 1);
+ return result;
+}
+
+static INLINE void
+set_tiny_meta_header_in_use(const void *ptr, msize_t msize)
+{
+ unsigned char *block_header;
+ unsigned char *in_use;
+ msize_t index;
+ unsigned byte_index;
+ msize_t clr_msize;
+ unsigned end_bit;
+
+ block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ index = TINY_INDEX_FOR_PTR(ptr);
+ byte_index = index >> 3;
+
+#if DEBUG_MALLOC
+ if (msize >= 32)
+ malloc_printf("set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr, msize);
+ if ((unsigned)index + (unsigned)msize > 0x10000)
+ malloc_printf("set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr, msize);
+#endif
+ block_header += byte_index;
+ index &= 7;
+ BITMAP32_SET(*block_header, index);
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ BITMAP32_SET(*in_use, index);
+ index++;
+ clr_msize = msize-1;
+ if (clr_msize) {
+ byte_index = index >> 3;
+ block_header += byte_index; in_use += byte_index;
+ index &= 7;
+ end_bit = index + clr_msize;
+ bitarray_mclr(block_header, index, end_bit);
+ bitarray_mclr(in_use, index, end_bit);
+ }
+ BITARRAY_SET(block_header, index+clr_msize); // we set the block_header bit for the following block to reaffirm next block is a block
+#if DEBUG_MALLOC
+ {
+ boolean_t ff;
+ msize_t mf;
+
+ mf = get_tiny_meta_header(ptr, &ff);
+ if (msize != mf) {
+ malloc_printf("setting header for tiny in_use %p : %d\n", ptr, msize);
+ malloc_printf("reading header for tiny %p : %d %d\n", ptr, mf, ff);
+ }
+ }
+#endif
+}
+
+static INLINE void
+set_tiny_meta_header_middle(const void *ptr)
+{
+ // indicates this block is in the middle of an in use block
+ unsigned char *block_header;
+ unsigned char *in_use;
+ msize_t index;
+
+ block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ index = TINY_INDEX_FOR_PTR(ptr);
+
+ BITARRAY_CLR(block_header, index);
+ BITARRAY_CLR(in_use, index);
+}
+
+static INLINE void
+set_tiny_meta_header_free(const void *ptr, msize_t msize)
+{
+ // !msize is acceptable and means 65536
+ unsigned char *block_header;
+ unsigned char *in_use;
+ msize_t index;
+
+ block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ index = TINY_INDEX_FOR_PTR(ptr);
+
+#if DEBUG_MALLOC
+ if ((unsigned)index + (unsigned)msize > 0x10000) {
+ malloc_printf("setting header for tiny free %p msize too large: %d\n", ptr, msize);
+ }
+#endif
+ BITARRAY_SET(block_header, index);
+ BITARRAY_CLR(in_use, index);
+ // mark the end of this block if msize is > 1. For msize == 0, the whole
+ // region is free, so there is no following block. For msize == 1, there is
+ // no space to write the size on 64 bit systems. The size for 1 quantum
+ // blocks is computed from the metadata bitmaps.
+ if (msize > 1) {
+ void *follower = FOLLOWING_TINY_PTR(ptr, msize);
+ TINY_PREVIOUS_MSIZE(follower) = msize;
+ TINY_FREE_SIZE(ptr) = msize;
+ }
+ if (msize == 0) {
+ TINY_FREE_SIZE(ptr) = msize;
+ }
+#if DEBUG_MALLOC
+ boolean_t ff;
+ msize_t mf = get_tiny_meta_header(ptr, &ff);
+ if ((msize != mf) || !ff) {
+ malloc_printf("setting header for tiny free %p : %u\n", ptr, msize);
+ malloc_printf("reading header for tiny %p : %u %u\n", ptr, mf, ff);
+ }
+#endif
+}
+
+static INLINE boolean_t
+tiny_meta_header_is_free(const void *ptr)
+{
+ unsigned char *block_header;
+ unsigned char *in_use;
+ msize_t index;
+
+ block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ index = TINY_INDEX_FOR_PTR(ptr);
+ if (!BITARRAY_BIT(block_header, index))
+ return 0;
+ return !BITARRAY_BIT(in_use, index);
+}
+
+static INLINE void *
+tiny_previous_preceding_free(void *ptr, msize_t *prev_msize)
+{
+ // returns the previous block, assuming and verifying it's free
+ unsigned char *block_header;
+ unsigned char *in_use;
+ msize_t index;
+ msize_t previous_msize;
+ msize_t previous_index;
+ void *previous_ptr;
+
+ block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ index = TINY_INDEX_FOR_PTR(ptr);
+
+ if (!index)
+ return NULL;
+ if ((previous_msize = get_tiny_previous_free_msize(ptr)) > index)
+ return NULL;
+
+ previous_index = index - previous_msize;
+ previous_ptr = (void *)(TINY_REGION_FOR_PTR(ptr) + TINY_BYTES_FOR_MSIZE(previous_index));
+ if (!BITARRAY_BIT(block_header, previous_index))
+ return NULL;
+ if (BITARRAY_BIT(in_use, previous_index))
+ return NULL;
+ if (get_tiny_free_size(previous_ptr) != previous_msize)
+ return NULL;
+
+ // conservative check did match true check
+ *prev_msize = previous_msize;
+ return previous_ptr;
+}
+
+/*
+ * Adds an item to the proper free list, and also marks the meta-header of the
+ * block properly.
+ * Assumes szone has been locked
+ */
+static void
+tiny_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize)
+{
+ grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1;
+ free_list_t *free_ptr = ptr;
+ free_list_t *free_head = szone->tiny_free_list[slot];
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
+ }
+ if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) {
+ szone_error(szone, "tiny_free_list_add_ptr: Unaligned ptr", ptr, NULL);
+ }
+#endif
+ set_tiny_meta_header_free(ptr, msize);
+ if (free_head) {
+ free_list_checksum(szone, free_head, __PRETTY_FUNCTION__);
+#if DEBUG_MALLOC
+ if (free_list_unchecksum_ptr(free_head->previous)) {
+ szone_error(szone, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr,
+ "ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, free_head, free_head->previous.p);
+ }
+ if (! tiny_meta_header_is_free(free_head)) {
+ szone_error(szone, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr,
+ "ptr=%p slot=%d free_head=%p\n", ptr, slot, free_head);
+ }
+#endif
+ free_head->previous.u = free_list_checksum_ptr(free_ptr);
+ } else {
+ BITMAP32_SET(szone->tiny_bitmap, slot);
+ }
+ free_ptr->previous.p = NULL;
+ free_ptr->next.p = free_head;
+ free_list_set_checksum(szone, free_ptr);
+ szone->tiny_free_list[slot] = free_ptr;
+}
+
+/*
+ * Removes the item pointed to by ptr in the proper free list.
+ * Assumes szone has been locked
+ */
+static INLINE void
+tiny_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize)
+{
+ grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1;
+ free_list_t *free_ptr = ptr, *next, *previous;
+ free_list_checksum(szone, free_ptr, __PRETTY_FUNCTION__);
+
+ next = free_list_unchecksum_ptr(free_ptr->next);
+ previous = free_list_unchecksum_ptr(free_ptr->previous);
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
+ }
+#endif
+ if (!previous) {
+ // The block to remove is the head of the free list
+#if DEBUG_MALLOC
+ if (szone->tiny_free_list[slot] != ptr) {
+ szone_error(szone, "tiny_free_list_remove_ptr: Internal invariant broken (szone->tiny_free_list[slot])", ptr,
+ "ptr=%p slot=%d msize=%d szone->tiny_free_list[slot]=%p\n",
+ ptr, slot, msize, szone->tiny_free_list[slot]);
+ return;
+ }
+#endif
+ szone->tiny_free_list[slot] = next;
+ if (!next) BITMAP32_CLR(szone->tiny_bitmap, slot);
+ } else {
+ // We know free_ptr is already checksummed, so we don't need to do it
+ // again.
+ previous->next = free_ptr->next;
+ }
+ if (next) {
+ // We know free_ptr is already checksummed, so we don't need to do it
+ // again.
+ next->previous = free_ptr->previous;
+ }
+}
+
+/*
+ * tiny_region_for_ptr_no_lock - Returns the tiny region containing the pointer,
+ * or NULL if not found.
+ */
+static INLINE region_t *
+tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr)
+{
+ return hash_lookup_region_no_lock(szone->tiny_regions,
+ szone->num_tiny_regions_allocated,
+ TINY_REGION_FOR_PTR(ptr));
+}
+
+static INLINE void
+tiny_free_no_lock(szone_t *szone, region_t *region, void *ptr, msize_t msize)
+{
+ size_t original_size = TINY_BYTES_FOR_MSIZE(msize);
+ void *next_block = ((char *)ptr + original_size);
+ msize_t previous_msize;
+ void *previous;
+ msize_t next_msize;
+ free_list_t *big_free_block;
+ free_list_t *after_next_block;
+ free_list_t *before_next_block;
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
+ }
+ if (! msize) {
+ szone_error(szone, "trying to free tiny block that is too small", ptr,
+ "in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
+ }
+#endif
+ // We try to coalesce this block with the preceeding one
+ previous = tiny_previous_preceding_free(ptr, &previous_msize);
+ if (previous) {
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr) || LOG(szone,previous)) {
+ malloc_printf("in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous);
+ }
+#endif
+ // clear the meta_header since this is no longer the start of a block
+ set_tiny_meta_header_middle(ptr);
+ tiny_free_list_remove_ptr(szone, previous, previous_msize);
+ ptr = previous;
+ msize += previous_msize;
+ }
+ // We try to coalesce with the next block
+ if ((next_block < TINY_REGION_END(*region)) && tiny_meta_header_is_free(next_block)) {
+ next_msize = get_tiny_free_size(next_block);
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr) || LOG(szone, next_block)) {
+ malloc_printf("in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n",
+ ptr, msize, next_block, next_msize);
+ }
+#endif
+ // If we are coalescing with the next block, and the next block is in
+ // the last slot of the free list, then we optimize this case here to
+ // avoid removing next_block from the slot 31 and then adding ptr back
+ // to slot 31.
+ if (next_msize >= NUM_TINY_SLOTS) {
+ msize += next_msize;
+ big_free_block = (free_list_t *)next_block;
+ free_list_checksum(szone, big_free_block, __PRETTY_FUNCTION__);
+ after_next_block = free_list_unchecksum_ptr(big_free_block->next);
+ before_next_block = free_list_unchecksum_ptr(big_free_block->previous);
+ if (!before_next_block) {
+ szone->tiny_free_list[NUM_TINY_SLOTS-1] = ptr;
+ } else {
+ before_next_block->next.u = free_list_checksum_ptr(ptr);
+ }
+ if (after_next_block) {
+ after_next_block->previous.u = free_list_checksum_ptr(ptr);
+ }
+ // we don't need to checksum these since they are already checksummed
+ ((free_list_t *)ptr)->previous = big_free_block->previous;
+ ((free_list_t *)ptr)->next = big_free_block->next;
+
+ // clear the meta_header to enable coalescing backwards
+ set_tiny_meta_header_middle(big_free_block);
+ set_tiny_meta_header_free(ptr, msize);
+ goto tiny_free_ending;
+ }
+ tiny_free_list_remove_ptr(szone, next_block, next_msize);
+ set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards
+ msize += next_msize;
+ }
+#if !TINY_CACHE
+ // The tiny cache already scribbles free blocks as they go through the
+ // cache, so we do not need to do it here.
+ if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize) {
+ memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize));
+ }
+#endif
+ tiny_free_list_add_ptr(szone, ptr, msize);
+ tiny_free_ending:
+ // When in proper debug mode we write on the memory to help debug memory smashers
+ szone->num_tiny_objects--;
+ szone->num_bytes_in_tiny_objects -= original_size; // we use original_size and not msize to avoid double counting the coalesced blocks
+}
+
+// Allocates from the last region or a freshly allocated region
+static void *
+tiny_malloc_from_region_no_lock(szone_t *szone, msize_t msize)
+{
+ void *last_block, *ptr, *aligned_address;
+ unsigned char *last_header;
+ msize_t last_msize, last_index;
+
+ // Before anything we transform any remaining tiny_bytes_free_at_end into a
+ // regular free block. We take special care here to update the bitfield
+ // information, since we are bypassing the normal free codepath. If there
+ // is more than one quanta worth of memory in tiny_bytes_free_at_end, then
+ // there will be two block headers:
+ // 1) header for the free space at end, msize = 1
+ // 2) header inserted by set_tiny_meta_header_in_use after block
+ // We must clear the second one so that when the free block's size is
+ // queried, we do not think the block is only 1 quantum in size because
+ // of the second set header bit.
+ if (szone->tiny_bytes_free_at_end) {
+ last_block = TINY_REGION_END(szone->last_tiny_region) - szone->tiny_bytes_free_at_end;
+ last_msize = TINY_MSIZE_FOR_BYTES(szone->tiny_bytes_free_at_end);
+ last_header = TINY_BLOCK_HEADER_FOR_PTR(last_block);
+ last_index = TINY_INDEX_FOR_PTR(last_block);
+
+ if (last_index != (NUM_TINY_BLOCKS - 1))
+ BITARRAY_CLR(last_header, last_index + 1);
+
+ tiny_free_list_add_ptr(szone, last_block, last_msize);
+ szone->tiny_bytes_free_at_end = 0;
+ }
+ // time to create a new region
+ aligned_address = allocate_pages(szone, TINY_REGION_SIZE, TINY_BLOCKS_ALIGN, 0, VM_MEMORY_MALLOC_TINY);
+ if (!aligned_address) // out of memory!
+ return NULL;
+ // We set the padding after block_header to be all 1
+ ((uint32_t *)(aligned_address + TINY_HEADER_START + (NUM_TINY_BLOCKS >> 3)))[0] = ~0;
+
+ // Check to see if the hash ring of tiny regions needs to grow. Try to
+ // avoid the hash ring becoming too dense.
+ if (szone->num_tiny_regions_allocated < (2 * szone->num_tiny_regions)) {
+ region_t *new_regions;
+ size_t new_size;
+ new_regions = hash_regions_grow_no_lock(szone, szone->tiny_regions,
+ szone->num_tiny_regions_allocated,
+ &new_size);
+ // Do not deallocate the current tiny_regions allocation since someone may
+ // be iterating it. Instead, just leak it.
+ szone->tiny_regions = new_regions;
+ szone->num_tiny_regions_allocated = new_size;
+ }
+ // Insert the new region into the hash ring, and update malloc statistics
+ hash_region_insert_no_lock(szone->tiny_regions,
+ szone->num_tiny_regions_allocated,
+ aligned_address);
+ szone->last_tiny_region = aligned_address;
+
+ szone->num_tiny_regions++;
+ ptr = aligned_address;
+ set_tiny_meta_header_in_use(ptr, msize);
+ szone->num_tiny_objects++;
+ szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(msize);
+
+ // We put a header on the last block so that it appears in use (for coalescing, etc...)
+ set_tiny_meta_header_in_use(ptr + TINY_BYTES_FOR_MSIZE(msize), 1);
+ szone->tiny_bytes_free_at_end = TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS - msize);
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
+ }
+#endif
+ return ptr;
+}
+
+static INLINE boolean_t
+try_realloc_tiny_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size)
+{
+ // returns 1 on success
+ msize_t index;
+ msize_t old_msize;
+ unsigned next_index;
+ void *next_block;
+ boolean_t is_free;
+ msize_t next_msize, coalesced_msize, leftover_msize;
+ void *leftover;
+
+ index = TINY_INDEX_FOR_PTR(ptr);
+ old_msize = TINY_MSIZE_FOR_BYTES(old_size);
+ next_index = index + old_msize;
+
+ if (next_index >= NUM_TINY_BLOCKS) {
+ return 0;
+ }
+ next_block = (char *)ptr + old_size;
+ SZONE_LOCK(szone);
+ is_free = tiny_meta_header_is_free(next_block);
+ if (!is_free) {
+ SZONE_UNLOCK(szone);
+ return 0; // next_block is in use;
+ }
+ next_msize = get_tiny_free_size(next_block);
+ if (old_size + TINY_MSIZE_FOR_BYTES(next_msize) < new_size) {
+ SZONE_UNLOCK(szone);
+ return 0; // even with next block, not enough
+ }
+ tiny_free_list_remove_ptr(szone, next_block, next_msize);
+ set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards
+ coalesced_msize = TINY_MSIZE_FOR_BYTES(new_size - old_size + TINY_QUANTUM - 1);
+ leftover_msize = next_msize - coalesced_msize;
+ if (leftover_msize) {
+ leftover = next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize);
+ tiny_free_list_add_ptr(szone, leftover, leftover_msize);
+ }
+ set_tiny_meta_header_in_use(ptr, old_msize + coalesced_msize);
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in try_realloc_tiny_in_place(), ptr=%p, msize=%d\n", ptr, old_msize + coalesced_msize);
+ }
+#endif
+ szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(coalesced_msize);
+ SZONE_UNLOCK(szone);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ return 1;
+}
+
+static boolean_t
+tiny_check_region(szone_t *szone, region_t region)
+{
+ uintptr_t start, ptr, region_end;
+ boolean_t prev_free = 0;
+ boolean_t is_free;
+ msize_t msize;
+ free_list_t *free_head;
+ void *follower, *previous, *next;
+
+ /* establish region limits */
+ start = (uintptr_t)TINY_REGION_ADDRESS(region);
+ ptr = start;
+ region_end = (uintptr_t)TINY_REGION_END(region);
+
+ /*
+ * The last region may have a trailing chunk which has not been converted into inuse/freelist
+ * blocks yet.
+ */
+ if (region == szone->last_tiny_region)
+ region_end -= szone->tiny_bytes_free_at_end;
+
+
+ /*
+ * Scan blocks within the region.
+ */
+ while (ptr < region_end) {
+ /*
+ * If the first block is free, and its size is 65536 (msize = 0) then the entire region is
+ * free.
+ */
+ msize = get_tiny_meta_header((void *)ptr, &is_free);
+ if (is_free && !msize && (ptr == start)) {
+ return 1;
+ }
+
+ /*
+ * If the block's size is 65536 (msize = 0) then since we're not the first entry the size is
+ * corrupt.
+ */
+ if (!msize) {
+ malloc_printf("*** invariant broken for tiny block %p this msize=%d - size is too small\n",
+ ptr, msize);
+ return 0;
+ }
+
+ if (!is_free) {
+ /*
+ * In use blocks cannot be more than 31 quanta large.
+ */
+ prev_free = 0;
+ if (msize > 31 * TINY_QUANTUM) {
+ malloc_printf("*** invariant broken for %p this tiny msize=%d[%p] - size is too large\n",
+ ptr, msize, msize);
+ return 0;
+ }
+ /* move to next block */
+ ptr += TINY_BYTES_FOR_MSIZE(msize);
+ } else {
+ /*
+ * Free blocks must have been coalesced, we cannot have a free block following another
+ * free block.
+ */
+ if (prev_free) {
+ malloc_printf("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n",
+ ptr, msize);
+ return 0;
+ }
+ prev_free = 1;
+ /*
+ * Check the integrity of this block's entry in its freelist.
+ */
+ free_head = (free_list_t *)ptr;
+ free_list_checksum(szone, free_head, __PRETTY_FUNCTION__);
+ previous = free_list_unchecksum_ptr(free_head->previous);
+ next = free_list_unchecksum_ptr(free_head->next);
+ if (previous && !tiny_meta_header_is_free(previous)) {
+ malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n",
+ ptr, previous);
+ return 0;
+ }
+ if (next && !tiny_meta_header_is_free(next)) {
+ malloc_printf("*** invariant broken for %p (next in free list %p is not a free pointer)\n",
+ ptr, next);
+ return 0;
+ }
+ /*
+ * Check the free block's trailing size value.
+ */
+ follower = FOLLOWING_TINY_PTR(ptr, msize);
+ if (((uintptr_t)follower != region_end) && (get_tiny_previous_free_msize(follower) != msize)) {
+ malloc_printf("*** invariant broken for tiny free %p followed by %p in region [%p-%p] "
+ "(end marker incorrect) should be %d; in fact %d\n",
+ ptr, follower, TINY_REGION_ADDRESS(region), region_end, msize, get_tiny_previous_free_msize(follower));
+ return 0;
+ }
+ /* move to next block */
+ ptr = (uintptr_t)follower;
+ }
+ }
+ /*
+ * Ensure that we scanned the entire region
+ */
+ if (ptr != region_end) {
+ malloc_printf("*** invariant broken for region end %p - %p\n", ptr, region_end);
+ return 0;
+ }
+ /*
+ * Check the trailing block's integrity.
+ */
+ if (region == szone->last_tiny_region) {
+ if (szone->tiny_bytes_free_at_end) {
+ msize = get_tiny_meta_header((void *)ptr, &is_free);
+ if (is_free || (msize != 1)) {
+ malloc_printf("*** invariant broken for blocker block %p - %d %d\n", ptr, msize, is_free);
+ }
+ }
+ }
+ return 1;
+}
+
+static kern_return_t
+tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone, memory_reader_t reader, vm_range_recorder_t recorder)
+{
+ size_t num_regions = szone->num_tiny_regions_allocated;
+ void *last_tiny_free = szone->last_tiny_free;
+ size_t index;
+ region_t *regions;
+ vm_range_t buffer[MAX_RECORDER_BUFFER];
+ unsigned count = 0;
+ kern_return_t err;
+ region_t region;
+ vm_range_t range;
+ vm_range_t admin_range;
+ vm_range_t ptr_range;
+ unsigned char *mapped_region;
+ unsigned char *block_header;
+ unsigned char *in_use;
+ unsigned block_index;
+ unsigned block_limit;
+ boolean_t is_free;
+ msize_t msize;
+ void *mapped_ptr;
+ unsigned bit;
+ vm_address_t last_tiny_free_ptr = 0;
+ msize_t last_tiny_free_msize = 0;
+
+ if (last_tiny_free) {
+ last_tiny_free_ptr = (uintptr_t) last_tiny_free & ~(TINY_QUANTUM - 1);
+ last_tiny_free_msize = (uintptr_t) last_tiny_free & (TINY_QUANTUM - 1);
+ }
+
+ err = reader(task, (vm_address_t)szone->tiny_regions, sizeof(region_t) * num_regions, (void **)®ions);
+ if (err) return err;
+ for (index = 0; index < num_regions; ++index) {
+ region = regions[index];
+ if (region) {
+ range.address = (vm_address_t)TINY_REGION_ADDRESS(region);
+ range.size = (vm_size_t)TINY_REGION_SIZE;
+ if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
+ admin_range.address = range.address + TINY_HEADER_START;
+ admin_range.size = TINY_HEADER_SIZE;
+ recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1);
+ }
+ if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) {
+ ptr_range.address = range.address;
+ ptr_range.size = NUM_TINY_BLOCKS * TINY_QUANTUM;
+ recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1);
+ }
+ if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
+ err = reader(task, range.address, range.size, (void **)&mapped_region);
+ if (err)
+ return err;
+
+ block_header = (unsigned char *)(mapped_region + TINY_HEADER_START);
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ block_index = 0;
+ block_limit = NUM_TINY_BLOCKS;
+ if (region == szone->last_tiny_region)
+ block_limit -= TINY_MSIZE_FOR_BYTES(szone->tiny_bytes_free_at_end);
+
+ while (block_index < block_limit) {
+ vm_size_t block_offset = TINY_BYTES_FOR_MSIZE(block_index);
+ is_free = !BITARRAY_BIT(in_use, block_index);
+ if (is_free) {
+ mapped_ptr = mapped_region + block_offset;
+
+ // mapped_region, the address at which 'range' in 'task' has been
+ // mapped into our process, is not necessarily aligned to
+ // TINY_BLOCKS_ALIGN.
+ //
+ // Since the code in get_tiny_free_size() assumes the pointer came
+ // from a properly aligned tiny region, and mapped_region is not
+ // necessarily aligned, then do the size calculation directly.
+ // If the next bit is set in the header bitmap, then the size is one
+ // quantum. Otherwise, read the size field.
+ if (!BITARRAY_BIT(block_header, block_index+1))
+ msize = TINY_FREE_SIZE(mapped_ptr);
+ else
+ msize = 1;
+
+ if (!msize)
+ break;
+ } else if (range.address + block_offset != last_tiny_free_ptr) {
+ msize = 1;
+ bit = block_index + 1;
+ while (! BITARRAY_BIT(block_header, bit)) {
+ bit++;
+ msize ++;
+ }
+ buffer[count].address = range.address + block_offset;
+ buffer[count].size = TINY_BYTES_FOR_MSIZE(msize);
+ count++;
+ if (count >= MAX_RECORDER_BUFFER) {
+ recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
+ count = 0;
+ }
+ } else {
+ // Block is not free but it matches last_tiny_free_ptr so even
+ // though it is not marked free in the bitmap, we treat it as if
+ // it is and move on
+ msize = last_tiny_free_msize;
+ }
+ block_index += msize;
+ }
+ }
+ }
+ }
+ if (count) {
+ recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
+ }
+ return 0;
+}
+
+static void *
+tiny_malloc_from_free_list(szone_t *szone, msize_t msize)
+{
+ // Assumes we've locked the region
+ free_list_t *ptr;
+ msize_t this_msize;
+ grain_t slot = msize - 1;
+ free_list_t **free_list = szone->tiny_free_list;
+ free_list_t **the_slot = free_list + slot;
+ free_list_t *next;
+ free_list_t **limit;
+ unsigned bitmap;
+ msize_t leftover_msize;
+ free_list_t *leftover_ptr;
+
+ // Assumes locked
+ CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
+
+ // Look for an exact match by checking the freelist for this msize.
+ //
+ ptr = *the_slot;
+ if (ptr) {
+ next = free_list_unchecksum_ptr(ptr->next);
+ if (next) {
+ next->previous = ptr->previous;
+ } else {
+ BITMAP32_CLR(szone->tiny_bitmap, slot);
+ }
+ *the_slot = next;
+ this_msize = msize;
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr)) {
+ malloc_printf("in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr, this_msize);
+ }
+#endif
+ goto return_tiny_alloc;
+ }
+
+ // Mask off the bits representing slots holding free blocks smaller than the
+ // size we need. If there are no larger free blocks, try allocating from
+ // the free space at the end of the tiny region.
+ bitmap = szone->tiny_bitmap & ~ ((1 << slot) - 1);
+ if (!bitmap)
+ goto try_tiny_malloc_from_end;
+
+ slot = BITMAP32_CTZ(bitmap);
+ limit = free_list + NUM_TINY_SLOTS - 1;
+ free_list += slot;
+
+ // Iterate over freelists looking for free blocks, starting at first list
+ // which is not empty, and contains blocks which are large enough to satisfy
+ // our request.
+ while (free_list < limit) {
+ ptr = *free_list;
+ if (ptr) {
+ next = free_list_unchecksum_ptr(ptr->next);
+ *free_list = next;
+ this_msize = get_tiny_free_size(ptr);
+ if (next) {
+ next->previous = ptr->previous;
+ } else {
+ BITMAP32_CLR(szone->tiny_bitmap, this_msize - 1);
+ }
+ goto add_leftover_and_proceed;
+ }
+ free_list++;
+ }
+
+ // We are now looking at the last slot, which contains blocks equal to, or
+ // due to coalescing of free blocks, larger than 31 * tiny quantum size.
+ // If the last freelist is not empty, and the head contains a block that is
+ // larger than our request, then the remainder is put back on the free list.
+ ptr = *limit;
+ if (ptr) {
+ free_list_checksum(szone, ptr, __PRETTY_FUNCTION__);
+ this_msize = get_tiny_free_size(ptr);
+ next = free_list_unchecksum_ptr(ptr->next);
+ if (this_msize - msize >= NUM_TINY_SLOTS) {
+ // the leftover will go back to the free list, so we optimize by
+ // modifying the free list rather than a pop and push of the head
+ leftover_msize = this_msize - msize;
+ leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
+ *limit = leftover_ptr;
+ if (next) {
+ next->previous.u = free_list_checksum_ptr(leftover_ptr);
+ }
+ leftover_ptr->previous = ptr->previous;
+ leftover_ptr->next = ptr->next;
+ set_tiny_meta_header_free(leftover_ptr, leftover_msize);
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize);
+ }
+#endif
+ this_msize = msize;
+ goto return_tiny_alloc;
+ }
+ if (next) {
+ next->previous = ptr->previous;
+ }
+ *limit = next;
+ goto add_leftover_and_proceed;
+ }
+
+try_tiny_malloc_from_end:
+ // Let's see if we can use szone->tiny_bytes_free_at_end
+ if (szone->tiny_bytes_free_at_end >= TINY_BYTES_FOR_MSIZE(msize)) {
+ ptr = (free_list_t *)(TINY_REGION_END(szone->last_tiny_region) - szone->tiny_bytes_free_at_end);
+ szone->tiny_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(msize);
+ if (szone->tiny_bytes_free_at_end) {
+ // let's add an in use block after ptr to serve as boundary
+ set_tiny_meta_header_in_use((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize), 1);
+ }
+ this_msize = msize;
+#if DEBUG_MALLOC
+ if (LOG(szone, ptr)) {
+ malloc_printf("in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr, msize);
+ }
+#endif
+ goto return_tiny_alloc;
+ }
+ return NULL;
+
+add_leftover_and_proceed:
+ if (!this_msize || (this_msize > msize)) {
+ leftover_msize = this_msize - msize;
+ leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize);
+ }
+#endif
+ tiny_free_list_add_ptr(szone, leftover_ptr, leftover_msize);
+ this_msize = msize;
+ }
+
+return_tiny_alloc:
+ szone->num_tiny_objects++;
+ szone->num_bytes_in_tiny_objects += TINY_BYTES_FOR_MSIZE(this_msize);
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize);
+ }
+#endif
+ set_tiny_meta_header_in_use(ptr, this_msize);
+ return ptr;
+}
+
+static INLINE void *
+tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested)
+{
+ boolean_t locked = 0;
+ void *ptr;
+
+#if DEBUG_MALLOC
+ if (!msize) {
+ szone_error(szone, "invariant broken (!msize) in allocation (region)", NULL, NULL);
+ return(NULL);
+ }
+#endif
+#if TINY_CACHE
+ ptr = szone->last_tiny_free;
+ if ((((uintptr_t)ptr) & (TINY_QUANTUM - 1)) == msize) {
+ // we have a candidate - let's lock to make sure
+ LOCK_AND_NOTE_LOCKED(szone, locked);
+ if (ptr == szone->last_tiny_free) {
+ szone->last_tiny_free = NULL;
+ SZONE_UNLOCK(szone);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ ptr = (void *)((uintptr_t)ptr & ~ (TINY_QUANTUM - 1));
+ if (cleared_requested) {
+ memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
+ }
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr, msize);
+ }
+#endif
+ return ptr;
+ }
+ }
+#endif
+ // Except in rare occasions where we need to add a new region, we are going to end up locking, so we might as well lock right away to avoid doing unnecessary optimistic probes
+ if (!locked) LOCK_AND_NOTE_LOCKED(szone, locked);
+ ptr = tiny_malloc_from_free_list(szone, msize);
+ if (ptr) {
+ SZONE_UNLOCK(szone);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ if (cleared_requested) {
+ memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
+ }
+ return ptr;
+ }
+ ptr = tiny_malloc_from_region_no_lock(szone, msize);
+ // we don't clear because this freshly allocated space is pristine
+ SZONE_UNLOCK(szone);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ return ptr;
+}
+
+static INLINE void
+free_tiny(szone_t *szone, void *ptr, region_t *tiny_region)
+{
+ msize_t msize;
+ boolean_t is_free;
+#if TINY_CACHE
+ void *ptr2;
+#endif
+
+ // ptr is known to be in tiny_region
+ SZONE_LOCK(szone);
+#if TINY_CACHE
+ ptr2 = szone->last_tiny_free;
+ /* check that we don't already have this pointer in the cache */
+ if (ptr == (void *)((uintptr_t)ptr2 & ~ (TINY_QUANTUM - 1))) {
+ szone_error(szone, "double free", ptr, NULL);
+ return;
+ }
+#endif /* TINY_CACHE */
+ msize = get_tiny_meta_header(ptr, &is_free);
+ if (is_free) {
+ szone_error(szone, "double free", ptr, NULL);
+ return;
+ }
+#if DEBUG_MALLOC
+ if (!msize) {
+ malloc_printf("*** szone_free() block in use is too large: %p\n", ptr);
+ return;
+ }
+#endif
+#if TINY_CACHE
+ if (msize < TINY_QUANTUM) { // to see if the bits fit in the last 4 bits
+ if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize)
+ memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize));
+ szone->last_tiny_free = (void *)(((uintptr_t)ptr) | msize);
+ if (!ptr2) {
+ SZONE_UNLOCK(szone);
+ CHECK(szone, __PRETTY_FUNCTION__);
+ return;
+ }
+ msize = (uintptr_t)ptr2 & (TINY_QUANTUM - 1);
+ ptr = (void *)(((uintptr_t)ptr2) & ~(TINY_QUANTUM - 1));
+ tiny_region = tiny_region_for_ptr_no_lock(szone, ptr);
+ if (!tiny_region) {
+ szone_error(szone, "double free (tiny cache)", ptr, NULL);
+ }
+ }
+#endif
+ tiny_free_no_lock(szone, tiny_region, ptr, msize);
+ SZONE_UNLOCK(szone);
+ CHECK(szone, __PRETTY_FUNCTION__);
+}
+
+static void
+print_tiny_free_list(szone_t *szone)
+{
+ grain_t slot = 0;
+ free_list_t *ptr;
+ _SIMPLE_STRING b = _simple_salloc();
+
+ if (b) {
+ _simple_sappend(b, "tiny free sizes: ");
+ while (slot < NUM_TINY_SLOTS) {
+ ptr = szone->tiny_free_list[slot];
+ if (ptr) {
+ _simple_sprintf(b, "%s%y[%d]; ", (slot == NUM_TINY_SLOTS-1) ? ">=" : "", (slot+1)*TINY_QUANTUM, free_list_count(ptr));
+ }
+ slot++;
+ }
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
+ _simple_sfree(b);
+ }
+}
+
+static void
+print_tiny_region(boolean_t verbose, region_t region, size_t bytes_at_end)
+{
+ unsigned counts[1024];
+ unsigned in_use = 0;
+ uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(region);
+ uintptr_t current = start;
+ uintptr_t limit = (uintptr_t)TINY_REGION_END(region) - bytes_at_end;
+ boolean_t is_free;
+ msize_t msize;
+ unsigned ci;
+ _SIMPLE_STRING b;
+
+ memset(counts, 0, 1024 * sizeof(unsigned));
+ while (current < limit) {
+ msize = get_tiny_meta_header((void *)current, &is_free);
+ if (is_free & !msize && (current == start)) {
+ // first block is all free
+ break;
+ }
+ if (!msize) {
+ malloc_printf("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize);
+ break;
+ }
+ if (!is_free) {
+ // block in use
+ if (msize > 32)
+ malloc_printf("*** error at %p msize for in_use is %d\n", (void *)current, msize);
+ if (msize < 1024)
+ counts[msize]++;
+ in_use++;
+ }
+ current += TINY_BYTES_FOR_MSIZE(msize);
+ }
+ if ((b = _simple_salloc()) != NULL) {
+ _simple_sprintf(b, "Tiny region [%p-%p, %y]\t", (void *)start, TINY_REGION_END(region), (int)TINY_REGION_SIZE);
+ _simple_sprintf(b, "In_use=%d ", in_use);
+ if (bytes_at_end) _simple_sprintf(b, "untouched=%ly", bytes_at_end);
+ if (verbose && in_use) {
+ _simple_sappend(b, "\n\tSizes in use: ");
+ for (ci = 0; ci < 1024; ci++)
+ if (counts[ci])
+ _simple_sprintf(b, "%d[%d]", TINY_BYTES_FOR_MSIZE(ci), counts[ci]);
+ }
+ _malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
+ _simple_sfree(b);
+ }
+}
+
+static boolean_t
+tiny_free_list_check(szone_t *szone, grain_t slot)
+{
+ unsigned count = 0;
+ free_list_t *ptr = szone->tiny_free_list[slot];
+ free_list_t *previous = NULL;
+ boolean_t is_free;
+
+ CHECK_LOCKED(szone, __PRETTY_FUNCTION__);
+ while (ptr) {
+ free_list_checksum(szone, ptr, __PRETTY_FUNCTION__);
+ is_free = tiny_meta_header_is_free(ptr);
+ if (! is_free) {
+ malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ return 0;
+ }
+ if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) {
+ malloc_printf("*** unaligned ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ return 0;
+ }
+ if (!tiny_region_for_ptr_no_lock(szone, ptr)) {
+ malloc_printf("*** ptr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ return 0;
+ }
+ if (free_list_unchecksum_ptr(ptr->previous) != previous) {
+ malloc_printf("*** previous incorrectly set slot=%d count=%d ptr=%p\n", slot, count, ptr);
+ return 0;
+ }
+ previous = ptr;
+ ptr = free_list_unchecksum_ptr(ptr->next);
+ count++;
+ }
+ return 1;
+}
+
+/********************* SMALL FREE LIST UTILITIES ************************/
+
+/*
+ * Mark a block as free. Only the first quantum of a block is marked thusly,
+ * the remainder are marked "middle".
+ */
+static INLINE void
+small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize)
+{
+ meta_headers[index] = msize | SMALL_IS_FREE;
+}
+
+/*
+ * Mark a block as in use. Only the first quantum of a block is marked thusly,
+ * the remainder are marked "middle".
+ */
+static INLINE void
+small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize)
+{
+ meta_headers[index] = msize;
+}
+
+/*
+ * Mark a quantum as being the second or later in a block.
+ */
+static INLINE void
+small_meta_header_set_middle(msize_t *meta_headers, msize_t index)
+{
+ meta_headers[index] = 0;
+}
+
+// Adds an item to the proper free list
+// Also marks the header of the block properly
+// Assumes szone has been locked
+static void
+small_free_list_add_ptr(szone_t *szone, void *ptr, msize_t msize)
+{
+ grain_t slot = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1;
+ free_list_t *free_ptr = ptr;
+ free_list_t *free_head = szone->small_free_list[slot];
+ void *follower;
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
+ }
+ if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) {
+ szone_error(szone, "small_free_list_add_ptr: Unaligned ptr", ptr, NULL);
+ }
+#endif
+ if (free_head) {
+ free_list_checksum(szone, free_head, __PRETTY_FUNCTION__);
+#if DEBUG_MALLOC
+ if (free_list_unchecksum_ptr(free_head->previous)) {
+ szone_error(szone, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr,
+ "ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, free_head, free_head->previous.p);
+ }
+ if (!SMALL_PTR_IS_FREE(free_head)) {
+ szone_error(szone, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr,
+ "ptr=%p slot=%d free_head=%p\n", ptr, slot, free_head);
+ }
+#endif
+ free_head->previous.u = free_list_checksum_ptr(free_ptr);
+ } else {
+ BITMAP32_SET(szone->small_bitmap, slot);
+ }
+ free_ptr->previous.p = NULL;
+ free_ptr->next.p = free_head;
+ free_list_set_checksum(szone, free_ptr);
+ szone->small_free_list[slot] = free_ptr;
+ follower = ptr + SMALL_BYTES_FOR_MSIZE(msize);
+ SMALL_PREVIOUS_MSIZE(follower) = msize;
+}
+
+// Removes item in the proper free list
+// msize could be read, but all callers have it so we pass it in
+// Assumes szone has been locked
+static void
+small_free_list_remove_ptr(szone_t *szone, void *ptr, msize_t msize)
+{
+ grain_t slot = (msize <= NUM_SMALL_SLOTS) ? msize - 1 : NUM_SMALL_SLOTS - 1;
+ free_list_t *free_ptr = ptr, *next, *previous;
+ free_list_checksum(szone, free_ptr, __PRETTY_FUNCTION__);
+
+ next = free_list_unchecksum_ptr(free_ptr->next);
+ previous = free_list_unchecksum_ptr(free_ptr->previous);
+
+#if DEBUG_MALLOC
+ if (LOG(szone,ptr)) {
+ malloc_printf("In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
+ }
+#endif
+ if (!previous) {
+ // The block to remove is the head of the free list
+#if DEBUG_MALLOC
+ if (szone->small_free_list[slot] != ptr) {
+ szone_error(szone, "small_free_list_remove_ptr: Internal invariant broken (szone->small_free_list[grain])", ptr,
+ "ptr=%p slot=%d msize=%d szone->small_free_list[slot]=%p\n",
+ ptr, slot, msize, szone->small_free_list[slot]);
+ return;
+ }
+#endif
+ szone->small_free_list[slot] = next;
+ if (!next) BITMAP32_CLR(szone->small_bitmap, slot);
+ } else {
+ // We know free_ptr is already checksummed, so we don't need to do it
+ // again.
+ previous->next = free_ptr->next;
+ }
+ if (next) {
+ // We know free_ptr is already checksummed, so we don't need to do it
+ // again.
+ next->previous = free_ptr->previous;