- for (idx = 0; idx < max_zones; idx++)
- {
- if (!strncmp(zoneName, zone_array[idx].zone_name, nameLen)) break;
- }
- if (idx >= max_zones) return (KERN_INVALID_NAME);
- zone = &zone_array[idx];
-
- elemSize = (uint32_t) zone->elem_size;
- maxElems = ptoa(zone->page_count) / elemSize;
-
- if ((zone->alloc_size % elemSize)
- && !leak_scan_debug_flag) return (KERN_INVALID_CAPABILITY);
-
- kr = kmem_alloc_kobject(kernel_map, (vm_offset_t *) &array,
- maxElems * sizeof(uintptr_t), VM_KERN_MEMORY_DIAG);
- if (KERN_SUCCESS != kr) return (kr);
-
- lock_zone(zone);
-
- next = array;
- next = zone_copy_all_allocations_inqueue(zone, &zone->pages.any_free_foreign, next);
- next = zone_copy_all_allocations_inqueue(zone, &zone->pages.intermediate, next);
- next = zone_copy_all_allocations_inqueue(zone, &zone->pages.all_used, next);
- count = (uint32_t)(next - array);
-
- unlock_zone(zone);
-
- zone_leaks_scan(array, count, (uint32_t)zone->elem_size, &found);
- assert(found <= count);
-
- for (idx = 0; idx < count; idx++)
- {
- element = array[idx];
- if (kInstanceFlagReferenced & element) continue;
- element = INSTANCE_PUT(element) & ~kInstanceFlags;
- }
-
- if (zone->zlog_btlog && !corruption_debug_flag)
- {
- // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
- btlog_copy_backtraces_for_elements(zone->zlog_btlog, array, &count, elemSize, proc, refCon);
- }
-
- for (nobtcount = idx = 0; idx < count; idx++)
- {
- element = array[idx];
- if (!element) continue;
- if (kInstanceFlagReferenced & element) continue;
- element = INSTANCE_PUT(element) & ~kInstanceFlags;
-
- // see if we can find any backtrace left in the element
- btcount = (typeof(btcount)) (zone->elem_size / sizeof(uintptr_t));
- if (btcount >= MAX_ZTRACE_DEPTH) btcount = MAX_ZTRACE_DEPTH - 1;
- for (btfound = btidx = 0; btidx < btcount; btidx++)
- {
- bt = ((uintptr_t *)element)[btcount - 1 - btidx];
- if (!VM_KERNEL_IS_SLID(bt)) break;
- zbt[btfound++] = bt;
- }
- if (btfound) (*proc)(refCon, 1, elemSize, &zbt[0], btfound);
- else nobtcount++;
- }
- if (nobtcount)
- {
- // fake backtrace when we found nothing
- zbt[0] = (uintptr_t) &zalloc;
- (*proc)(refCon, nobtcount, elemSize, &zbt[0], 1);
- }
-
- kmem_free(kernel_map, (vm_offset_t) array, maxElems * sizeof(uintptr_t));
-
- return (KERN_SUCCESS);
+ for (idx = 0; idx < max_zones; idx++) {
+ if (!strncmp(zoneName, zone_array[idx].zone_name, nameLen)) {
+ break;
+ }
+ }
+ if (idx >= max_zones) {
+ return KERN_INVALID_NAME;
+ }
+ zone = &zone_array[idx];
+
+ elemSize = (uint32_t) zone->elem_size;
+ maxElems = ptoa(zone->page_count) / elemSize;
+
+ if ((zone->alloc_size % elemSize)
+ && !leak_scan_debug_flag) {
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ kr = kmem_alloc_kobject(kernel_map, (vm_offset_t *) &array,
+ maxElems * sizeof(uintptr_t), VM_KERN_MEMORY_DIAG);
+ if (KERN_SUCCESS != kr) {
+ return kr;
+ }
+
+ lock_zone(zone);
+
+ next = array;
+ next = zone_copy_all_allocations_inqueue(zone, &zone->pages.any_free_foreign, next);
+ next = zone_copy_all_allocations_inqueue(zone, &zone->pages.intermediate, next);
+ next = zone_copy_all_allocations_inqueue(zone, &zone->pages.all_used, next);
+ count = (uint32_t)(next - array);
+
+ unlock_zone(zone);
+
+ zone_leaks_scan(array, count, (uint32_t)zone->elem_size, &found);
+ assert(found <= count);
+
+ for (idx = 0; idx < count; idx++) {
+ element = array[idx];
+ if (kInstanceFlagReferenced & element) {
+ continue;
+ }
+ element = INSTANCE_PUT(element) & ~kInstanceFlags;
+ }
+
+ if (zone->zlog_btlog && !corruption_debug_flag) {
+ // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
+ btlog_copy_backtraces_for_elements(zone->zlog_btlog, array, &count, elemSize, proc, refCon);
+ }
+
+ for (nobtcount = idx = 0; idx < count; idx++) {
+ element = array[idx];
+ if (!element) {
+ continue;
+ }
+ if (kInstanceFlagReferenced & element) {
+ continue;
+ }
+ element = INSTANCE_PUT(element) & ~kInstanceFlags;
+
+ // see if we can find any backtrace left in the element
+ btcount = (typeof(btcount))(zone->elem_size / sizeof(uintptr_t));
+ if (btcount >= MAX_ZTRACE_DEPTH) {
+ btcount = MAX_ZTRACE_DEPTH - 1;
+ }
+ for (btfound = btidx = 0; btidx < btcount; btidx++) {
+ bt = ((uintptr_t *)element)[btcount - 1 - btidx];
+ if (!VM_KERNEL_IS_SLID(bt)) {
+ break;
+ }
+ zbt[btfound++] = bt;
+ }
+ if (btfound) {
+ (*proc)(refCon, 1, elemSize, &zbt[0], btfound);
+ } else {
+ nobtcount++;
+ }
+ }
+ if (nobtcount) {
+ // fake backtrace when we found nothing
+ zbt[0] = (uintptr_t) &zalloc;
+ (*proc)(refCon, nobtcount, elemSize, &zbt[0], 1);
+ }
+
+ kmem_free(kernel_map, (vm_offset_t) array, maxElems * sizeof(uintptr_t));
+
+ return KERN_SUCCESS;