+ IOKitDiagnostics * diags;
+
+ diags = new IOKitDiagnostics;
+ if (diags && !diags->init()) {
+ diags->release();
+ diags = NULL;
+ }
+
+ return diags;
+}
+
+void
+IOKitDiagnostics::updateOffset( OSDictionary * dict,
+ UInt64 value, const char * name )
+{
+ OSNumber * off;
+
+ off = OSNumber::withNumber( value, 64 );
+ if (!off) {
+ return;
+ }
+
+ dict->setObject( name, off );
+ off->release();
+}
+
+bool
+IOKitDiagnostics::serialize(OSSerialize *s) const
+{
+ OSDictionary * dict;
+ bool ok;
+
+ dict = OSDictionary::withCapacity( 5 );
+ if (!dict) {
+ return false;
+ }
+
+ updateOffset( dict, debug_ivars_size, "Instance allocation" );
+ updateOffset( dict, debug_container_malloc_size, "Container allocation" );
+ updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" );
+ updateOffset( dict, debug_iomallocpageable_size, "Pageable allocation" );
+
+ OSMetaClass::serializeClassDictionary(dict);
+
+ ok = dict->serialize( s );
+
+ dict->release();
+
+ return ok;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#if IOTRACKING
+
+#include <libkern/c++/OSCPPDebug.h>
+#include <libkern/c++/OSKext.h>
+#include <kern/zalloc.h>
+
+__private_extern__ "C" void qsort(
+ void * array,
+ size_t nmembers,
+ size_t member_size,
+ int (*)(const void *, const void *));
+
+extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
+extern "C" ppnum_t pmap_valid_page(ppnum_t pn);
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+struct IOTRecursiveLock {
+ lck_mtx_t * mutex;
+ thread_t thread;
+ UInt32 count;
+};
+
+struct IOTrackingQueue {
+ queue_chain_t link;
+ IOTRecursiveLock lock;
+ const char * name;
+ uintptr_t btEntry;
+ size_t allocSize;
+ size_t minCaptureSize;
+ uint32_t siteCount;
+ uint32_t type;
+ uint32_t numSiteQs;
+ uint8_t captureOn;
+ queue_head_t sites[];
+};
+
+struct IOTrackingCallSite {
+ queue_chain_t link;
+ IOTrackingQueue * queue;
+ uint32_t crc;
+
+ vm_tag_t tag;
+ uint32_t count;
+ size_t size[2];
+ uintptr_t bt[kIOTrackingCallSiteBTs];
+
+ queue_head_t instances;
+ IOTracking * addresses;
+};
+
+struct IOTrackingLeaksRef {
+ uintptr_t * instances;
+ uint32_t zoneSize;
+ uint32_t count;
+ uint32_t found;
+ uint32_t foundzlen;
+ size_t bytes;
+};
+
+lck_mtx_t * gIOTrackingLock;
+queue_head_t gIOTrackingQ;
+
+enum{
+ kTrackingAddressFlagAllocated = 0x00000001
+};
+
+#if defined(__LP64__)
+#define IOTrackingAddressFlags(ptr) (ptr->flags)
+#else
+#define IOTrackingAddressFlags(ptr) (ptr->tracking.flags)
+#endif
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static void
+IOTRecursiveLockLock(IOTRecursiveLock * lock)
+{
+ if (lock->thread == current_thread()) {
+ lock->count++;
+ } else {
+ lck_mtx_lock(lock->mutex);
+ assert(lock->thread == NULL);
+ assert(lock->count == 0);
+ lock->thread = current_thread();
+ lock->count = 1;
+ }
+}
+
+static void
+IOTRecursiveLockUnlock(IOTRecursiveLock * lock)
+{
+ assert(lock->thread == current_thread());
+ if (0 == (--lock->count)) {
+ lock->thread = NULL;
+ lck_mtx_unlock(lock->mutex);
+ }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+void
+IOTrackingInit(void)
+{
+ queue_init(&gIOTrackingQ);
+ gIOTrackingLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+IOTrackingQueue *
+IOTrackingQueueAlloc(const char * name, uintptr_t btEntry,
+ size_t allocSize, size_t minCaptureSize,
+ uint32_t type, uint32_t numSiteQs)
+{
+ IOTrackingQueue * queue;
+ uint32_t idx;
+
+ if (!numSiteQs) {
+ numSiteQs = 1;
+ }
+ queue = (typeof(queue))kalloc(sizeof(IOTrackingQueue) + numSiteQs * sizeof(queue->sites[0]));
+ bzero(queue, sizeof(IOTrackingQueue));
+
+ queue->name = name;
+ queue->btEntry = btEntry;
+ queue->allocSize = allocSize;
+ queue->minCaptureSize = minCaptureSize;
+ queue->lock.mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
+ queue->numSiteQs = numSiteQs;
+ queue->type = type;
+ enum { kFlags = (kIOTracking | kIOTrackingBoot) };
+ queue->captureOn = (kFlags == (kFlags & gIOKitDebug))
+ || (kIOTrackingQueueTypeDefaultOn & type);
+
+ for (idx = 0; idx < numSiteQs; idx++) {
+ queue_init(&queue->sites[idx]);
+ }
+
+ lck_mtx_lock(gIOTrackingLock);
+ queue_enter(&gIOTrackingQ, queue, IOTrackingQueue *, link);
+ lck_mtx_unlock(gIOTrackingLock);
+
+ return queue;
+};
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+void
+IOTrackingQueueFree(IOTrackingQueue * queue)
+{
+ lck_mtx_lock(gIOTrackingLock);
+ IOTrackingReset(queue);
+ remque(&queue->link);
+ lck_mtx_unlock(gIOTrackingLock);
+
+ lck_mtx_free(queue->lock.mutex, IOLockGroup);
+
+ kfree(queue, sizeof(IOTrackingQueue) + queue->numSiteQs * sizeof(queue->sites[0]));
+};
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* fasthash
+ * The MIT License
+ *
+ * Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com)
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+// Compression function for Merkle-Damgard construction.
+// This function is generated using the framework provided.
+#define mix(h) ({ \
+ (h) ^= (h) >> 23; \
+ (h) *= 0x2127599bf4325c37ULL; \
+ (h) ^= (h) >> 47; })
+
+static uint64_t
+fasthash64(const void *buf, size_t len, uint64_t seed)
+{
+ const uint64_t m = 0x880355f21e6d1965ULL;
+ const uint64_t *pos = (const uint64_t *)buf;
+ const uint64_t *end = pos + (len / 8);
+ const unsigned char *pos2;
+ uint64_t h = seed ^ (len * m);
+ uint64_t v;
+
+ while (pos != end) {
+ v = *pos++;
+ h ^= mix(v);
+ h *= m;
+ }
+
+ pos2 = (const unsigned char*)pos;
+ v = 0;
+
+ switch (len & 7) {
+ case 7: v ^= (uint64_t)pos2[6] << 48;
+ [[clang::fallthrough]];
+ case 6: v ^= (uint64_t)pos2[5] << 40;
+ [[clang::fallthrough]];
+ case 5: v ^= (uint64_t)pos2[4] << 32;
+ [[clang::fallthrough]];
+ case 4: v ^= (uint64_t)pos2[3] << 24;
+ [[clang::fallthrough]];
+ case 3: v ^= (uint64_t)pos2[2] << 16;
+ [[clang::fallthrough]];
+ case 2: v ^= (uint64_t)pos2[1] << 8;
+ [[clang::fallthrough]];
+ case 1: v ^= (uint64_t)pos2[0];
+ h ^= mix(v);
+ h *= m;
+ }
+
+ return mix(h);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static uint32_t
+fasthash32(const void *buf, size_t len, uint32_t seed)
+{
+ // the following trick converts the 64-bit hashcode to Fermat
+ // residue, which shall retain information from both the higher
+ // and lower parts of hashcode.
+ uint64_t h = fasthash64(buf, len, seed);
+ return h - (h >> 32);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+void
+IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size)
+{
+ uint32_t num;
+ proc_t self;
+
+ if (!queue->captureOn) {
+ return;
+ }
+ if (size < queue->minCaptureSize) {
+ return;
+ }
+
+ assert(!mem->link.next);
+
+ num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs, NULL);
+ num = 0;
+ if ((kernel_task != current_task()) && (self = proc_self())) {
+ bool user_64 = false;
+ mem->btPID = proc_pid(self);
+ num = backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1, NULL,
+ &user_64, NULL);
+ mem->user32 = !user_64;
+ proc_rele(self);
+ }
+ assert(num <= kIOTrackingCallSiteBTs);
+ mem->userCount = num;
+
+ IOTRecursiveLockLock(&queue->lock);
+ queue_enter/*last*/ (&queue->sites[0], mem, IOTrackingUser *, link);
+ queue->siteCount++;
+ IOTRecursiveLockUnlock(&queue->lock);
+}
+
+void
+IOTrackingRemoveUser(IOTrackingQueue * queue, IOTrackingUser * mem)
+{
+ if (!mem->link.next) {
+ return;
+ }
+
+ IOTRecursiveLockLock(&queue->lock);
+ if (mem->link.next) {
+ remque(&mem->link);
+ assert(queue->siteCount);
+ queue->siteCount--;
+ }
+ IOTRecursiveLockUnlock(&queue->lock);
+}
+
+uint64_t gIOTrackingAddTime;
+
+void
+IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool address, vm_tag_t tag)
+{
+ IOTrackingCallSite * site;
+ uint32_t crc, num;
+ uintptr_t bt[kIOTrackingCallSiteBTs + 1];
+ queue_head_t * que;
+
+ if (mem->site) {
+ return;
+ }
+ if (!queue->captureOn) {
+ return;
+ }
+ if (size < queue->minCaptureSize) {
+ return;
+ }
+
+ assert(!mem->link.next);
+
+ num = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1, NULL);
+ if (!num) {
+ return;
+ }
+ num--;
+ crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7);
+
+ IOTRecursiveLockLock(&queue->lock);
+ que = &queue->sites[crc % queue->numSiteQs];
+ queue_iterate(que, site, IOTrackingCallSite *, link)
+ {
+ if (tag != site->tag) {
+ continue;
+ }
+ if (crc == site->crc) {
+ break;
+ }
+ }
+
+ if (queue_end(que, (queue_entry_t) site)) {
+ site = (typeof(site))kalloc(sizeof(IOTrackingCallSite));
+
+ queue_init(&site->instances);
+ site->addresses = (IOTracking *) &site->instances;
+ site->queue = queue;
+ site->crc = crc;
+ site->count = 0;
+ site->tag = tag;
+ memset(&site->size[0], 0, sizeof(site->size));
+ bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0]));
+ assert(num <= kIOTrackingCallSiteBTs);
+ bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0]));
+
+ queue_enter_first(que, site, IOTrackingCallSite *, link);
+ queue->siteCount++;
+ }
+
+ if (address) {
+ queue_enter/*last*/ (&site->instances, mem, IOTracking *, link);
+ if (queue_end(&site->instances, (queue_entry_t)site->addresses)) {
+ site->addresses = mem;
+ }
+ } else {
+ queue_enter_first(&site->instances, mem, IOTracking *, link);
+ }
+
+ mem->site = site;
+ site->size[0] += size;
+ site->count++;
+
+ IOTRecursiveLockUnlock(&queue->lock);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+void
+IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size)
+{
+ if (!mem->link.next) {
+ return;
+ }
+
+ IOTRecursiveLockLock(&queue->lock);
+ if (mem->link.next) {
+ assert(mem->site);
+
+ if (mem == mem->site->addresses) {
+ mem->site->addresses = (IOTracking *) queue_next(&mem->link);
+ }
+ remque(&mem->link);
+
+ assert(mem->site->count);
+ mem->site->count--;
+ assert(mem->site->size[0] >= size);
+ mem->site->size[0] -= size;
+ if (!mem->site->count) {
+ assert(queue_empty(&mem->site->instances));
+ assert(!mem->site->size[0]);
+ assert(!mem->site->size[1]);
+
+ remque(&mem->site->link);
+ assert(queue->siteCount);
+ queue->siteCount--;
+ kfree(mem->site, sizeof(IOTrackingCallSite));
+ }
+ mem->site = NULL;
+ }
+ IOTRecursiveLockUnlock(&queue->lock);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+void
+IOTrackingAlloc(IOTrackingQueue * queue, uintptr_t address, size_t size)
+{
+ IOTrackingAddress * tracking;
+
+ if (!queue->captureOn) {
+ return;
+ }
+ if (size < queue->minCaptureSize) {
+ return;
+ }