+void
+IOTrackingReset(IOTrackingQueue * queue)
+{
+ IOTrackingCallSite * site;
+ IOTrackingUser * user;
+ IOTracking * tracking;
+ IOTrackingAddress * trackingAddress;
+ uint32_t idx;
+ bool addresses;
+
+ IOTRecursiveLockLock(&queue->lock);
+ for (idx = 0; idx < queue->numSiteQs; idx++) {
+ while (!queue_empty(&queue->sites[idx])) {
+ if (kIOTrackingQueueTypeMap & queue->type) {
+ queue_remove_first(&queue->sites[idx], user, IOTrackingUser *, link);
+ user->link.next = user->link.prev = NULL;
+ } else {
+ queue_remove_first(&queue->sites[idx], site, IOTrackingCallSite *, link);
+ addresses = false;
+ while (!queue_empty(&site->instances)) {
+ queue_remove_first(&site->instances, tracking, IOTracking *, link);
+ if (tracking == site->addresses) {
+ addresses = true;
+ }
+ if (addresses) {
+ trackingAddress = (typeof(trackingAddress))tracking;
+ if (kTrackingAddressFlagAllocated & IOTrackingAddressFlags(trackingAddress)) {
+ kfree(tracking, sizeof(IOTrackingAddress));
+ }
+ }
+ }
+ kfree(site, sizeof(IOTrackingCallSite));
+ }
+ }
+ }
+ queue->siteCount = 0;
+ IOTRecursiveLockUnlock(&queue->lock);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static int
+IOTrackingCallSiteInfoCompare(const void * left, const void * right)
+{
+ IOTrackingCallSiteInfo * l = (typeof(l))left;
+ IOTrackingCallSiteInfo * r = (typeof(r))right;
+ size_t lsize, rsize;
+
+ rsize = r->size[0] + r->size[1];
+ lsize = l->size[0] + l->size[1];
+
+ return (rsize > lsize) ? 1 : ((rsize == lsize) ? 0 : -1);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static int
+IOTrackingAddressCompare(const void * left, const void * right)
+{
+ IOTracking * instance;
+ uintptr_t inst, laddr, raddr;
+
+ inst = ((typeof(inst) *)left)[0];
+ instance = (typeof(instance))INSTANCE_GET(inst);
+ if (kInstanceFlagAddress & inst) {
+ laddr = ~((IOTrackingAddress *)instance)->address;
+ } else {
+ laddr = (uintptr_t) (instance + 1);
+ }
+
+ inst = ((typeof(inst) *)right)[0];
+ instance = (typeof(instance))(inst & ~kInstanceFlags);
+ if (kInstanceFlagAddress & inst) {
+ raddr = ~((IOTrackingAddress *)instance)->address;
+ } else {
+ raddr = (uintptr_t) (instance + 1);
+ }
+
+ return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
+}
+
+
+static int
+IOTrackingZoneElementCompare(const void * left, const void * right)
+{
+ uintptr_t inst, laddr, raddr;
+
+ inst = ((typeof(inst) *)left)[0];
+ laddr = INSTANCE_PUT(inst);
+ inst = ((typeof(inst) *)right)[0];
+ raddr = INSTANCE_PUT(inst);
+
+ return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static void
+CopyOutKernelBacktrace(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo)
+{
+ uint32_t j;
+ mach_vm_address_t bt, btEntry;
+
+ btEntry = site->queue->btEntry;
+ for (j = 0; j < kIOTrackingCallSiteBTs; j++) {
+ bt = site->bt[j];
+ if (btEntry
+ && (!bt || (j == (kIOTrackingCallSiteBTs - 1)))) {
+ bt = btEntry;
+ btEntry = 0;
+ }
+ siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt);
+ }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static void
+IOTrackingLeakScan(void * refcon)
+{
+ IOTrackingLeaksRef * ref = (typeof(ref))refcon;
+ uintptr_t * instances;
+ IOTracking * instance;
+ uint64_t vaddr, vincr;
+ ppnum_t ppn;
+ uintptr_t ptr, addr, vphysaddr, inst;
+ size_t size, origsize;
+ uint32_t baseIdx, lim, ptrIdx, count;
+ boolean_t is;
+ AbsoluteTime deadline;
+
+ instances = ref->instances;
+ count = ref->count;
+ size = origsize = ref->zoneSize;
+
+ for (deadline = 0, vaddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
+ ;
+ vaddr += vincr) {
+ if ((mach_absolute_time() > deadline) || (vaddr >= VM_MAX_KERNEL_ADDRESS)) {
+ if (deadline) {
+ ml_set_interrupts_enabled(is);
+ IODelay(10);
+ }
+ if (vaddr >= VM_MAX_KERNEL_ADDRESS) {
+ break;
+ }
+ is = ml_set_interrupts_enabled(false);
+ clock_interval_to_deadline(10, kMillisecondScale, &deadline);
+ }
+
+ ppn = kernel_pmap_present_mapping(vaddr, &vincr, &vphysaddr);
+ // check noencrypt to avoid VM structs (map entries) with pointers
+ if (ppn && (!pmap_valid_page(ppn) || (!ref->zoneSize && pmap_is_noencrypt(ppn)))) {
+ ppn = 0;
+ }
+ if (!ppn) {
+ continue;
+ }
+
+ for (ptrIdx = 0; ptrIdx < (page_size / sizeof(uintptr_t)); ptrIdx++) {
+ ptr = ((uintptr_t *)vphysaddr)[ptrIdx];
+
+ for (lim = count, baseIdx = 0; lim; lim >>= 1) {
+ inst = instances[baseIdx + (lim >> 1)];
+ instance = (typeof(instance))INSTANCE_GET(inst);
+
+ if (ref->zoneSize) {
+ addr = INSTANCE_PUT(inst) & ~kInstanceFlags;
+ } else if (kInstanceFlagAddress & inst) {
+ addr = ~((IOTrackingAddress *)instance)->address;
+ origsize = size = ((IOTrackingAddress *)instance)->size;
+ if (!size) {
+ size = 1;
+ }
+ } else {
+ addr = (uintptr_t) (instance + 1);
+ origsize = size = instance->site->queue->allocSize;
+ }
+ if ((ptr >= addr) && (ptr < (addr + size))
+
+ && (((vaddr + ptrIdx * sizeof(uintptr_t)) < addr)
+ || ((vaddr + ptrIdx * sizeof(uintptr_t)) >= (addr + size)))) {
+ if (!(kInstanceFlagReferenced & inst)) {
+ inst |= kInstanceFlagReferenced;
+ instances[baseIdx + (lim >> 1)] = inst;
+ ref->found++;
+ if (!origsize) {
+ ref->foundzlen++;
+ }
+ }
+ break;
+ }
+ if (ptr > addr) {
+ // move right
+ baseIdx += (lim >> 1) + 1;
+ lim--;
+ }
+ // else move left
+ }
+ }
+ ref->bytes += page_size;
+ }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+extern "C" void
+zone_leaks_scan(uintptr_t * instances, uint32_t count, uint32_t zoneSize, uint32_t * found)
+{
+ IOTrackingLeaksRef ref;
+ IOTrackingCallSiteInfo siteInfo;
+ uint32_t idx;
+
+ qsort(instances, count, sizeof(*instances), &IOTrackingZoneElementCompare);
+
+ bzero(&siteInfo, sizeof(siteInfo));
+ bzero(&ref, sizeof(ref));
+ ref.instances = instances;
+ ref.count = count;
+ ref.zoneSize = zoneSize;
+
+ for (idx = 0; idx < 2; idx++) {
+ ref.bytes = 0;
+ IOTrackingLeakScan(&ref);
+ IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found);
+ if (count <= ref.found) {
+ break;
+ }
+ }
+
+ *found = ref.found;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static void
+ZoneSiteProc(void * refCon, uint32_t siteCount, uint32_t zoneSize,
+ uintptr_t * backtrace, uint32_t btCount)
+{
+ IOTrackingCallSiteInfo siteInfo;
+ OSData * leakData;
+ uint32_t idx;
+
+ leakData = (typeof(leakData))refCon;
+
+ bzero(&siteInfo, sizeof(siteInfo));
+ siteInfo.count = siteCount;
+ siteInfo.size[0] = zoneSize * siteCount;
+
+ for (idx = 0; (idx < btCount) && (idx < kIOTrackingCallSiteBTs); idx++) {
+ siteInfo.bt[0][idx] = VM_KERNEL_UNSLIDE(backtrace[idx]);
+ }
+
+ leakData->appendBytes(&siteInfo, sizeof(siteInfo));
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static OSData *
+IOTrackingLeaks(LIBKERN_CONSUMED OSData * data)
+{
+ IOTrackingLeaksRef ref;
+ IOTrackingCallSiteInfo siteInfo;
+ IOTrackingCallSite * site;
+ OSData * leakData;
+ uintptr_t * instances;
+ IOTracking * instance;
+ uintptr_t inst;
+ uint32_t count, idx, numSites, dups, siteCount;
+
+ instances = (typeof(instances))data->getBytesNoCopy();
+ count = (data->getLength() / sizeof(*instances));
+ qsort(instances, count, sizeof(*instances), &IOTrackingAddressCompare);
+
+ bzero(&siteInfo, sizeof(siteInfo));
+ bzero(&ref, sizeof(ref));
+ ref.instances = instances;
+ ref.count = count;
+ for (idx = 0; idx < 2; idx++) {
+ ref.bytes = 0;
+ IOTrackingLeakScan(&ref);
+ IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d (zlen %d)\n", idx, ref.bytes / 1024 / 1024, count, ref.found, ref.foundzlen);
+ if (count <= ref.found) {
+ break;
+ }
+ }
+
+ leakData = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
+
+ for (numSites = 0, idx = 0; idx < count; idx++) {
+ inst = instances[idx];
+ if (kInstanceFlagReferenced & inst) {
+ continue;
+ }
+ instance = (typeof(instance))INSTANCE_GET(inst);
+ site = instance->site;
+ instances[numSites] = (uintptr_t) site;
+ numSites++;
+ }
+
+ for (idx = 0; idx < numSites; idx++) {
+ inst = instances[idx];
+ if (!inst) {
+ continue;
+ }
+ site = (typeof(site))inst;
+ for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) {
+ if (instances[dups] == (uintptr_t) site) {
+ siteCount++;
+ instances[dups] = 0;
+ }
+ }
+ siteInfo.count = siteCount;
+ siteInfo.size[0] = (site->size[0] * site->count) / siteCount;
+ siteInfo.size[1] = (site->size[1] * site->count) / siteCount;;
+ CopyOutKernelBacktrace(site, &siteInfo);
+ leakData->appendBytes(&siteInfo, sizeof(siteInfo));
+ }
+ data->release();
+
+ return leakData;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static bool
+SkipName(uint32_t options, const char * name, size_t namesLen, const char * names)
+{
+ const char * scan;
+ const char * next;
+ bool exclude, found;
+ size_t qLen, sLen;
+
+ if (!namesLen || !names) {
+ return false;
+ }
+ // <len><name>...<len><name><0>
+ exclude = (0 != (kIOTrackingExcludeNames & options));
+ qLen = strlen(name);
+ scan = names;
+ found = false;
+ do{
+ sLen = scan[0];
+ scan++;
+ next = scan + sLen;
+ if (next >= (names + namesLen)) {
+ break;
+ }
+ found = ((sLen == qLen) && !strncmp(scan, name, sLen));
+ scan = next;
+ }while (!found && (scan < (names + namesLen)));
+
+ return !(exclude ^ found);
+}
+
+#endif /* IOTRACKING */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+static kern_return_t
+IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value,
+ uint32_t intag, uint32_t inzsize,
+ const char * names, size_t namesLen,
+ size_t size, OSObject ** result)
+{
+ kern_return_t ret;
+ OSData * data;
+
+ if (result) {
+ *result = NULL;
+ }
+ data = NULL;
+ ret = kIOReturnNotReady;
+
+#if IOTRACKING
+
+ kern_return_t kr;
+ IOTrackingQueue * queue;
+ IOTracking * instance;
+ IOTrackingCallSite * site;
+ IOTrackingCallSiteInfo siteInfo;
+ IOTrackingUser * user;
+ task_t mapTask;
+ mach_vm_address_t mapAddress;
+ mach_vm_size_t mapSize;
+ uint32_t num, idx, qIdx;
+ uintptr_t instFlags;
+ proc_t proc;
+ bool addresses;
+
+ ret = kIOReturnNotFound;
+ proc = NULL;
+ if (kIOTrackingGetMappings == selector) {
+ if (value != -1ULL) {
+ proc = proc_find(value);
+ if (!proc) {
+ return kIOReturnNotFound;
+ }
+ }
+ }
+
+ bzero(&siteInfo, sizeof(siteInfo));
+ lck_mtx_lock(gIOTrackingLock);
+ queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
+ {
+ if (SkipName(options, queue->name, namesLen, names)) {
+ continue;
+ }
+
+ if (!(kIOTracking & gIOKitDebug) && (kIOTrackingQueueTypeAlloc & queue->type)) {
+ continue;
+ }
+
+ switch (selector) {
+ case kIOTrackingResetTracking:
+ {
+ IOTrackingReset(queue);
+ ret = kIOReturnSuccess;
+ break;
+ }
+
+ case kIOTrackingStartCapture:
+ case kIOTrackingStopCapture:
+ {
+ queue->captureOn = (kIOTrackingStartCapture == selector);
+ ret = kIOReturnSuccess;
+ break;
+ }
+
+ case kIOTrackingSetMinCaptureSize:
+ {
+ queue->minCaptureSize = size;
+ ret = kIOReturnSuccess;
+ break;
+ }
+
+ case kIOTrackingLeaks:
+ {
+ if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
+ break;
+ }
+
+ if (!data) {
+ data = OSData::withCapacity(1024 * sizeof(uintptr_t));
+ }
+
+ IOTRecursiveLockLock(&queue->lock);
+ for (idx = 0; idx < queue->numSiteQs; idx++) {
+ queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link)
+ {
+ addresses = false;
+ queue_iterate(&site->instances, instance, IOTracking *, link)
+ {
+ if (instance == site->addresses) {
+ addresses = true;
+ }
+ instFlags = (typeof(instFlags))instance;
+ if (addresses) {
+ instFlags |= kInstanceFlagAddress;
+ }
+ data->appendBytes(&instFlags, sizeof(instFlags));
+ }
+ }
+ }
+ // queue is locked
+ ret = kIOReturnSuccess;
+ break;
+ }
+
+
+ case kIOTrackingGetTracking:
+ {
+ if (kIOTrackingQueueTypeMap & queue->type) {
+ break;
+ }
+
+ if (!data) {
+ data = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
+ }
+
+ IOTRecursiveLockLock(&queue->lock);
+ num = queue->siteCount;
+ idx = 0;
+ for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
+ queue_iterate(&queue->sites[qIdx], site, IOTrackingCallSite *, link)
+ {
+ assert(idx < num);
+ idx++;
+
+ size_t tsize[2];
+ uint32_t count = site->count;
+ tsize[0] = site->size[0];
+ tsize[1] = site->size[1];
+
+ if (intag || inzsize) {
+ uintptr_t addr;
+ vm_size_t size, zoneSize;
+ vm_tag_t tag;
+
+ if (kIOTrackingQueueTypeAlloc & queue->type) {
+ addresses = false;
+ count = 0;
+ tsize[0] = tsize[1] = 0;
+ queue_iterate(&site->instances, instance, IOTracking *, link)
+ {
+ if (instance == site->addresses) {
+ addresses = true;
+ }
+
+ if (addresses) {
+ addr = ~((IOTrackingAddress *)instance)->address;
+ } else {
+ addr = (uintptr_t) (instance + 1);
+ }
+
+ kr = vm_kern_allocation_info(addr, &size, &tag, &zoneSize);
+ if (KERN_SUCCESS != kr) {
+ continue;
+ }
+
+ if ((VM_KERN_MEMORY_NONE != intag) && (intag != tag)) {
+ continue;
+ }
+ if (inzsize && (inzsize != zoneSize)) {
+ continue;
+ }
+
+ count++;
+ tsize[0] += size;
+ }
+ } else {
+ if (!intag || inzsize || (intag != site->tag)) {
+ continue;
+ }
+ }
+ }
+
+ if (!count) {
+ continue;
+ }
+ if (size && ((tsize[0] + tsize[1]) < size)) {
+ continue;
+ }
+
+ siteInfo.count = count;
+ siteInfo.size[0] = tsize[0];
+ siteInfo.size[1] = tsize[1];
+
+ CopyOutKernelBacktrace(site, &siteInfo);
+ data->appendBytes(&siteInfo, sizeof(siteInfo));
+ }
+ }
+ assert(idx == num);
+ IOTRecursiveLockUnlock(&queue->lock);
+ ret = kIOReturnSuccess;
+ break;
+ }
+
+ case kIOTrackingGetMappings:
+ {
+ if (!(kIOTrackingQueueTypeMap & queue->type)) {
+ break;
+ }
+ if (!data) {
+ data = OSData::withCapacity(page_size);
+ }
+
+ IOTRecursiveLockLock(&queue->lock);
+ num = queue->siteCount;
+ idx = 0;
+ for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
+ queue_iterate(&queue->sites[qIdx], user, IOTrackingUser *, link)
+ {
+ assert(idx < num);
+ idx++;
+
+ kr = IOMemoryMapTracking(user, &mapTask, &mapAddress, &mapSize);
+ if (kIOReturnSuccess != kr) {
+ continue;
+ }
+ if (proc && (mapTask != proc_task(proc))) {
+ continue;
+ }
+ if (size && (mapSize < size)) {
+ continue;
+ }
+
+ siteInfo.count = 1;
+ siteInfo.size[0] = mapSize;
+ siteInfo.address = mapAddress;
+ siteInfo.addressPID = task_pid(mapTask);
+ siteInfo.btPID = user->btPID;
+
+ for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
+ siteInfo.bt[0][j] = VM_KERNEL_UNSLIDE(user->bt[j]);
+ }
+ uint32_t * bt32 = (typeof(bt32)) & user->btUser[0];
+ uint64_t * bt64 = (typeof(bt64))((void *) &user->btUser[0]);
+ for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
+ if (j >= user->userCount) {
+ siteInfo.bt[1][j] = 0;
+ } else if (user->user32) {
+ siteInfo.bt[1][j] = bt32[j];
+ } else {
+ siteInfo.bt[1][j] = bt64[j];
+ }
+ }
+ data->appendBytes(&siteInfo, sizeof(siteInfo));
+ }
+ }
+ assert(idx == num);
+ IOTRecursiveLockUnlock(&queue->lock);
+ ret = kIOReturnSuccess;
+ break;
+ }
+
+ default:
+ ret = kIOReturnUnsupported;
+ break;
+ }
+ }
+
+ if ((kIOTrackingLeaks == selector) && data) {
+ data = IOTrackingLeaks(data);
+ queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
+ {
+ if (SkipName(options, queue->name, namesLen, names)) {
+ continue;
+ }
+ if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
+ continue;
+ }
+ IOTRecursiveLockUnlock(&queue->lock);
+ }
+ }
+
+ lck_mtx_unlock(gIOTrackingLock);
+
+ if ((kIOTrackingLeaks == selector) && namesLen && names) {
+ const char * scan;
+ const char * next;
+ size_t sLen;
+
+ if (!data) {
+ data = OSData::withCapacity(4096 * sizeof(uintptr_t));
+ }
+
+ // <len><name>...<len><name><0>
+ scan = names;
+ do{
+ sLen = scan[0];
+ scan++;
+ next = scan + sLen;
+ if (next >= (names + namesLen)) {
+ break;
+ }
+ kr = zone_leaks(scan, sLen, &ZoneSiteProc, data);
+ if (KERN_SUCCESS == kr) {
+ ret = kIOReturnSuccess;
+ } else if (KERN_INVALID_NAME != kr) {
+ ret = kIOReturnVMError;
+ }
+ scan = next;
+ }while (scan < (names + namesLen));
+ }
+
+ if (data) {
+ switch (selector) {
+ case kIOTrackingLeaks:
+ case kIOTrackingGetTracking:
+ case kIOTrackingGetMappings:
+ {
+ IOTrackingCallSiteInfo * siteInfos;
+ siteInfos = (typeof(siteInfos))data->getBytesNoCopy();
+ num = (data->getLength() / sizeof(*siteInfos));
+ qsort(siteInfos, num, sizeof(*siteInfos), &IOTrackingCallSiteInfoCompare);
+ break;
+ }
+ default: assert(false); break;
+ }
+ }
+
+ *result = data;
+ if (proc) {
+ proc_rele(proc);
+ }
+
+#endif /* IOTRACKING */
+
+ return ret;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <IOKit/IOKitDiagnosticsUserClient.h>
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#undef super
+#define super IOUserClient
+
+OSDefineMetaClassAndStructors(IOKitDiagnosticsClient, IOUserClient)
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+IOUserClient * IOKitDiagnosticsClient::withTask(task_t owningTask)
+{
+ IOKitDiagnosticsClient * inst;
+
+ inst = new IOKitDiagnosticsClient;
+ if (inst && !inst->init()) {
+ inst->release();
+ inst = NULL;
+ }
+
+ return inst;
+}
+
+IOReturn
+IOKitDiagnosticsClient::clientClose(void)
+{
+ terminate();
+ return kIOReturnSuccess;
+}
+
+IOReturn
+IOKitDiagnosticsClient::setProperties(OSObject * properties)
+{
+ IOReturn kr = kIOReturnUnsupported;
+ return kr;
+}
+
+IOReturn
+IOKitDiagnosticsClient::externalMethod(uint32_t selector, IOExternalMethodArguments * args,
+ IOExternalMethodDispatch * dispatch, OSObject * target, void * reference)
+{
+ IOReturn ret = kIOReturnBadArgument;
+ const IOKitDiagnosticsParameters * params;
+ const char * names;
+ size_t namesLen;
+ OSObject * result;