- tiny_region_t *regions;
- unsigned index = 0;
- vm_range_t buffer[MAX_RECORDER_BUFFER];
- unsigned count = 0;
- kern_return_t err;
- tiny_region_t region;
- vm_range_t range;
- vm_range_t admin_range;
- vm_range_t ptr_range;
- unsigned char *mapped_region;
- unsigned char *block_header;
- unsigned char *in_use;
- unsigned block_index;
- unsigned block_limit;
- boolean_t is_free;
- msize_t msize;
- void *mapped_ptr;
- unsigned bit;
-
- err = reader(task, region_address, sizeof(tiny_region_t) * num_regions, (void **)®ions);
- if (err) return err;
- while (index < num_regions) {
- // unsigned num_in_use = 0;
- // unsigned num_free = 0;
- region = regions[index];
- range.address = (vm_address_t)TINY_REGION_ADDRESS(region);
- range.size = (vm_size_t)TINY_REGION_SIZE;
- if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
- admin_range.address = range.address + (1 << TINY_BLOCKS_ALIGN);
- admin_range.size = range.size - (1 << TINY_BLOCKS_ALIGN);
- recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1);
- }
- if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) {
- ptr_range.address = range.address;
- ptr_range.size = 1 << TINY_BLOCKS_ALIGN;
- recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1);
- }
- if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
- err = reader(task, range.address, range.size, (void **)&mapped_region);
- if (err)
- return err;
- block_header = (unsigned char *)(mapped_region + (1 << TINY_BLOCKS_ALIGN));
- in_use = block_header + (NUM_TINY_BLOCKS >> 3) + 4;
- block_index = 0;
- block_limit = NUM_TINY_BLOCKS;
- if (index == num_regions - 1)
- block_limit -= TINY_MSIZE_FOR_BYTES(tiny_bytes_free_at_end);
- while (block_index < block_limit) {
- is_free = !BITARRAY_BIT(in_use, block_index);
- if (is_free) {
- mapped_ptr = mapped_region + TINY_BYTES_FOR_MSIZE(block_index);
- msize = TINY_FREE_SIZE(mapped_ptr);
- if (!msize)
- break;
- } else {
- msize = 1;
- bit = block_index + 1;
- while (! BITARRAY_BIT(block_header, bit)) {
- bit++;
- msize ++;
- }
- buffer[count].address = range.address + TINY_BYTES_FOR_MSIZE(block_index);
- buffer[count].size = TINY_BYTES_FOR_MSIZE(msize);
- count++;
- if (count >= MAX_RECORDER_BUFFER) {
- recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
- count = 0;
- }
- }
- block_index += msize;
- }
- }
- index++;
- }
- if (count) {
- recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
- }
- return 0;
+ size_t num_regions = szone->num_tiny_regions_allocated;
+ void *last_tiny_free = szone->last_tiny_free;
+ size_t index;
+ region_t *regions;
+ vm_range_t buffer[MAX_RECORDER_BUFFER];
+ unsigned count = 0;
+ kern_return_t err;
+ region_t region;
+ vm_range_t range;
+ vm_range_t admin_range;
+ vm_range_t ptr_range;
+ unsigned char *mapped_region;
+ unsigned char *block_header;
+ unsigned char *in_use;
+ unsigned block_index;
+ unsigned block_limit;
+ boolean_t is_free;
+ msize_t msize;
+ void *mapped_ptr;
+ unsigned bit;
+ vm_address_t last_tiny_free_ptr = 0;
+ msize_t last_tiny_free_msize = 0;
+
+ if (last_tiny_free) {
+ last_tiny_free_ptr = (uintptr_t) last_tiny_free & ~(TINY_QUANTUM - 1);
+ last_tiny_free_msize = (uintptr_t) last_tiny_free & (TINY_QUANTUM - 1);
+ }
+
+ err = reader(task, (vm_address_t)szone->tiny_regions, sizeof(region_t) * num_regions, (void **)®ions);
+ if (err) return err;
+ for (index = 0; index < num_regions; ++index) {
+ region = regions[index];
+ if (region) {
+ range.address = (vm_address_t)TINY_REGION_ADDRESS(region);
+ range.size = (vm_size_t)TINY_REGION_SIZE;
+ if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
+ admin_range.address = range.address + TINY_HEADER_START;
+ admin_range.size = TINY_HEADER_SIZE;
+ recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1);
+ }
+ if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) {
+ ptr_range.address = range.address;
+ ptr_range.size = NUM_TINY_BLOCKS * TINY_QUANTUM;
+ recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1);
+ }
+ if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
+ err = reader(task, range.address, range.size, (void **)&mapped_region);
+ if (err)
+ return err;
+
+ block_header = (unsigned char *)(mapped_region + TINY_HEADER_START);
+ in_use = TINY_INUSE_FOR_HEADER(block_header);
+ block_index = 0;
+ block_limit = NUM_TINY_BLOCKS;
+ if (region == szone->last_tiny_region)
+ block_limit -= TINY_MSIZE_FOR_BYTES(szone->tiny_bytes_free_at_end);
+
+ while (block_index < block_limit) {
+ vm_size_t block_offset = TINY_BYTES_FOR_MSIZE(block_index);
+ is_free = !BITARRAY_BIT(in_use, block_index);
+ if (is_free) {
+ mapped_ptr = mapped_region + block_offset;
+
+ // mapped_region, the address at which 'range' in 'task' has been
+ // mapped into our process, is not necessarily aligned to
+ // TINY_BLOCKS_ALIGN.
+ //
+ // Since the code in get_tiny_free_size() assumes the pointer came
+ // from a properly aligned tiny region, and mapped_region is not
+ // necessarily aligned, then do the size calculation directly.
+ // If the next bit is set in the header bitmap, then the size is one
+ // quantum. Otherwise, read the size field.
+ if (!BITARRAY_BIT(block_header, block_index+1))
+ msize = TINY_FREE_SIZE(mapped_ptr);
+ else
+ msize = 1;
+
+ if (!msize)
+ break;
+ } else if (range.address + block_offset != last_tiny_free_ptr) {
+ msize = 1;
+ bit = block_index + 1;
+ while (! BITARRAY_BIT(block_header, bit)) {
+ bit++;
+ msize ++;
+ }
+ buffer[count].address = range.address + block_offset;
+ buffer[count].size = TINY_BYTES_FOR_MSIZE(msize);
+ count++;
+ if (count >= MAX_RECORDER_BUFFER) {
+ recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
+ count = 0;
+ }
+ } else {
+ // Block is not free but it matches last_tiny_free_ptr so even
+ // though it is not marked free in the bitmap, we treat it as if
+ // it is and move on
+ msize = last_tiny_free_msize;
+ }
+ block_index += msize;
+ }
+ }
+ }
+ }
+ if (count) {
+ recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
+ }
+ return 0;