]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOKitDebug.cpp
xnu-4903.270.47.tar.gz
[apple/xnu.git] / iokit / Kernel / IOKitDebug.cpp
1 /*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <sys/sysctl.h>
31 extern "C" {
32 #include <vm/vm_kern.h>
33 #include <kern/task.h>
34 #include <kern/debug.h>
35 }
36
37 #include <libkern/c++/OSContainers.h>
38 #include <libkern/OSDebug.h>
39 #include <libkern/c++/OSCPPDebug.h>
40 #include <kern/backtrace.h>
41
42 #include <IOKit/IOKitDebug.h>
43 #include <IOKit/IOLib.h>
44 #include <IOKit/assert.h>
45 #include <IOKit/IODeviceTreeSupport.h>
46 #include <IOKit/IOService.h>
47
48 #include "IOKitKernelInternal.h"
49
50 #ifdef IOKITDEBUG
51 #define DEBUG_INIT_VALUE IOKITDEBUG
52 #else
53 #define DEBUG_INIT_VALUE 0
54 #endif
55
56 SInt64 gIOKitDebug = DEBUG_INIT_VALUE;
57 SInt64 gIOKitTrace = 0;
58
59 #if DEVELOPMENT || DEBUG
60 #define IODEBUG_CTLFLAGS CTLFLAG_RW
61 #else
62 #define IODEBUG_CTLFLAGS CTLFLAG_RD
63 #endif
64
65 SYSCTL_QUAD(_debug, OID_AUTO, iotrace, CTLFLAG_RW | CTLFLAG_LOCKED, &gIOKitTrace, "trace io");
66
67 static int
68 sysctl_debug_iokit
69 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
70 {
71 SInt64 newValue;
72 int changed, error = sysctl_io_number(req, gIOKitDebug, sizeof(gIOKitDebug), &newValue, &changed);
73 if (changed) {
74 gIOKitDebug = ((gIOKitDebug & ~kIOKitDebugUserOptions) | (newValue & kIOKitDebugUserOptions));
75 }
76 return error;
77 }
78
79 SYSCTL_PROC(_debug, OID_AUTO, iokit,
80 CTLTYPE_QUAD | IODEBUG_CTLFLAGS | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED,
81 &gIOKitDebug, 0, sysctl_debug_iokit, "Q", "boot_arg io");
82
83 int debug_malloc_size;
84 int debug_iomalloc_size;
85
86 vm_size_t debug_iomallocpageable_size;
87 int debug_container_malloc_size;
88 // int debug_ivars_size; // in OSObject.cpp
89
90 extern "C" {
91 #if 0
92 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
93 #else
94 #define DEBG(fmt, args...) { IOLog(fmt, ## args); }
95 #endif
96
97 void
98 IOPrintPlane( const IORegistryPlane * plane )
99 {
100 IORegistryEntry * next;
101 IORegistryIterator * iter;
102 OSOrderedSet * all;
103 char format[] = "%xxxs";
104 IOService * service;
105
106 iter = IORegistryIterator::iterateOver( plane );
107 assert( iter );
108 all = iter->iterateAll();
109 if (all) {
110 DEBG("Count %d\n", all->getCount());
111 all->release();
112 } else {
113 DEBG("Empty\n");
114 }
115
116 iter->reset();
117 while ((next = iter->getNextObjectRecursive())) {
118 snprintf(format + 1, sizeof(format) - 1, "%ds", 2 * next->getDepth( plane ));
119 DEBG( format, "");
120 DEBG( "\033[33m%s", next->getName( plane ));
121 if ((next->getLocation( plane ))) {
122 DEBG("@%s", next->getLocation( plane ));
123 }
124 DEBG("\033[0m <class %s", next->getMetaClass()->getClassName());
125 if ((service = OSDynamicCast(IOService, next))) {
126 DEBG(", busy %ld", (long) service->getBusyState());
127 }
128 DEBG( ">\n");
129 // IOSleep(250);
130 }
131 iter->release();
132 }
133
134 void
135 db_piokjunk(void)
136 {
137 }
138
139 void
140 db_dumpiojunk( const IORegistryPlane * plane __unused )
141 {
142 }
143
144 void
145 IOPrintMemory( void )
146 {
147 // OSMetaClass::printInstanceCounts();
148
149 IOLog("\n"
150 "ivar kalloc() 0x%08x\n"
151 "malloc() 0x%08x\n"
152 "containers kalloc() 0x%08x\n"
153 "IOMalloc() 0x%08x\n"
154 "----------------------------------------\n",
155 debug_ivars_size,
156 debug_malloc_size,
157 debug_container_malloc_size,
158 debug_iomalloc_size
159 );
160 }
161 } /* extern "C" */
162
163 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
164
165 #define super OSObject
166 OSDefineMetaClassAndStructors(IOKitDiagnostics, OSObject)
167
168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
169
170 OSObject * IOKitDiagnostics::diagnostics( void )
171 {
172 IOKitDiagnostics * diags;
173
174 diags = new IOKitDiagnostics;
175 if (diags && !diags->init()) {
176 diags->release();
177 diags = 0;
178 }
179
180 return diags;
181 }
182
183 void
184 IOKitDiagnostics::updateOffset( OSDictionary * dict,
185 UInt64 value, const char * name )
186 {
187 OSNumber * off;
188
189 off = OSNumber::withNumber( value, 64 );
190 if (!off) {
191 return;
192 }
193
194 dict->setObject( name, off );
195 off->release();
196 }
197
198 bool
199 IOKitDiagnostics::serialize(OSSerialize *s) const
200 {
201 OSDictionary * dict;
202 bool ok;
203
204 dict = OSDictionary::withCapacity( 5 );
205 if (!dict) {
206 return false;
207 }
208
209 updateOffset( dict, debug_ivars_size, "Instance allocation" );
210 updateOffset( dict, debug_container_malloc_size, "Container allocation" );
211 updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" );
212 updateOffset( dict, debug_iomallocpageable_size, "Pageable allocation" );
213
214 OSMetaClass::serializeClassDictionary(dict);
215
216 ok = dict->serialize( s );
217
218 dict->release();
219
220 return ok;
221 }
222
223 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
224
225 #if IOTRACKING
226
227 #include <libkern/c++/OSCPPDebug.h>
228 #include <libkern/c++/OSKext.h>
229 #include <kern/zalloc.h>
230
231 __private_extern__ "C" void qsort(
232 void * array,
233 size_t nmembers,
234 size_t member_size,
235 int (*)(const void *, const void *));
236
237 extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
238 extern "C" ppnum_t pmap_valid_page(ppnum_t pn);
239
240 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
241
242 struct IOTRecursiveLock {
243 lck_mtx_t * mutex;
244 thread_t thread;
245 UInt32 count;
246 };
247
248 struct IOTrackingQueue {
249 queue_chain_t link;
250 IOTRecursiveLock lock;
251 const char * name;
252 uintptr_t btEntry;
253 size_t allocSize;
254 size_t minCaptureSize;
255 uint32_t siteCount;
256 uint32_t type;
257 uint32_t numSiteQs;
258 uint8_t captureOn;
259 queue_head_t sites[];
260 };
261
262 struct IOTrackingCallSite {
263 queue_chain_t link;
264 IOTrackingQueue * queue;
265 uint32_t crc;
266
267 vm_tag_t tag;
268 uint32_t count;
269 size_t size[2];
270 uintptr_t bt[kIOTrackingCallSiteBTs];
271
272 queue_head_t instances;
273 IOTracking * addresses;
274 };
275
276 struct IOTrackingLeaksRef {
277 uintptr_t * instances;
278 uint32_t zoneSize;
279 uint32_t count;
280 uint32_t found;
281 uint32_t foundzlen;
282 size_t bytes;
283 };
284
285 lck_mtx_t * gIOTrackingLock;
286 queue_head_t gIOTrackingQ;
287
288 enum{
289 kTrackingAddressFlagAllocated = 0x00000001
290 };
291
292 #if defined(__LP64__)
293 #define IOTrackingAddressFlags(ptr) (ptr->flags)
294 #else
295 #define IOTrackingAddressFlags(ptr) (ptr->tracking.flags)
296 #endif
297
298 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
299
300 static void
301 IOTRecursiveLockLock(IOTRecursiveLock * lock)
302 {
303 if (lock->thread == current_thread()) {
304 lock->count++;
305 } else {
306 lck_mtx_lock(lock->mutex);
307 assert(lock->thread == 0);
308 assert(lock->count == 0);
309 lock->thread = current_thread();
310 lock->count = 1;
311 }
312 }
313
314 static void
315 IOTRecursiveLockUnlock(IOTRecursiveLock * lock)
316 {
317 assert(lock->thread == current_thread());
318 if (0 == (--lock->count)) {
319 lock->thread = 0;
320 lck_mtx_unlock(lock->mutex);
321 }
322 }
323
324 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
325
326 void
327 IOTrackingInit(void)
328 {
329 queue_init(&gIOTrackingQ);
330 gIOTrackingLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
331 }
332
333 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
334
335 IOTrackingQueue *
336 IOTrackingQueueAlloc(const char * name, uintptr_t btEntry,
337 size_t allocSize, size_t minCaptureSize,
338 uint32_t type, uint32_t numSiteQs)
339 {
340 IOTrackingQueue * queue;
341 uint32_t idx;
342
343 if (!numSiteQs) {
344 numSiteQs = 1;
345 }
346 queue = (typeof(queue))kalloc(sizeof(IOTrackingQueue) + numSiteQs * sizeof(queue->sites[0]));
347 bzero(queue, sizeof(IOTrackingQueue));
348
349 queue->name = name;
350 queue->btEntry = btEntry;
351 queue->allocSize = allocSize;
352 queue->minCaptureSize = minCaptureSize;
353 queue->lock.mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
354 queue->numSiteQs = numSiteQs;
355 queue->type = type;
356 enum { kFlags = (kIOTracking | kIOTrackingBoot) };
357 queue->captureOn = (kFlags == (kFlags & gIOKitDebug))
358 || (kIOTrackingQueueTypeDefaultOn & type);
359
360 for (idx = 0; idx < numSiteQs; idx++) {
361 queue_init(&queue->sites[idx]);
362 }
363
364 lck_mtx_lock(gIOTrackingLock);
365 queue_enter(&gIOTrackingQ, queue, IOTrackingQueue *, link);
366 lck_mtx_unlock(gIOTrackingLock);
367
368 return queue;
369 };
370
371 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
372
373 void
374 IOTrackingQueueFree(IOTrackingQueue * queue)
375 {
376 lck_mtx_lock(gIOTrackingLock);
377 IOTrackingReset(queue);
378 remque(&queue->link);
379 lck_mtx_unlock(gIOTrackingLock);
380
381 lck_mtx_free(queue->lock.mutex, IOLockGroup);
382
383 kfree(queue, sizeof(IOTrackingQueue) + queue->numSiteQs * sizeof(queue->sites[0]));
384 };
385
386 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
387
388 /* fasthash
389 * The MIT License
390 *
391 * Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com)
392 *
393 * Permission is hereby granted, free of charge, to any person
394 * obtaining a copy of this software and associated documentation
395 * files (the "Software"), to deal in the Software without
396 * restriction, including without limitation the rights to use, copy,
397 * modify, merge, publish, distribute, sublicense, and/or sell copies
398 * of the Software, and to permit persons to whom the Software is
399 * furnished to do so, subject to the following conditions:
400 *
401 * The above copyright notice and this permission notice shall be
402 * included in all copies or substantial portions of the Software.
403 *
404 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
405 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
406 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
407 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
408 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
409 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
410 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
411 * SOFTWARE.
412 */
413
414
415 // Compression function for Merkle-Damgard construction.
416 // This function is generated using the framework provided.
417 #define mix(h) ({ \
418 (h) ^= (h) >> 23; \
419 (h) *= 0x2127599bf4325c37ULL; \
420 (h) ^= (h) >> 47; })
421
422 static uint64_t
423 fasthash64(const void *buf, size_t len, uint64_t seed)
424 {
425 const uint64_t m = 0x880355f21e6d1965ULL;
426 const uint64_t *pos = (const uint64_t *)buf;
427 const uint64_t *end = pos + (len / 8);
428 const unsigned char *pos2;
429 uint64_t h = seed ^ (len * m);
430 uint64_t v;
431
432 while (pos != end) {
433 v = *pos++;
434 h ^= mix(v);
435 h *= m;
436 }
437
438 pos2 = (const unsigned char*)pos;
439 v = 0;
440
441 switch (len & 7) {
442 case 7: v ^= (uint64_t)pos2[6] << 48;
443 [[clang::fallthrough]];
444 case 6: v ^= (uint64_t)pos2[5] << 40;
445 [[clang::fallthrough]];
446 case 5: v ^= (uint64_t)pos2[4] << 32;
447 [[clang::fallthrough]];
448 case 4: v ^= (uint64_t)pos2[3] << 24;
449 [[clang::fallthrough]];
450 case 3: v ^= (uint64_t)pos2[2] << 16;
451 [[clang::fallthrough]];
452 case 2: v ^= (uint64_t)pos2[1] << 8;
453 [[clang::fallthrough]];
454 case 1: v ^= (uint64_t)pos2[0];
455 h ^= mix(v);
456 h *= m;
457 }
458
459 return mix(h);
460 }
461
462 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
463
464 static uint32_t
465 fasthash32(const void *buf, size_t len, uint32_t seed)
466 {
467 // the following trick converts the 64-bit hashcode to Fermat
468 // residue, which shall retain information from both the higher
469 // and lower parts of hashcode.
470 uint64_t h = fasthash64(buf, len, seed);
471 return h - (h >> 32);
472 }
473
474 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
475
476 void
477 IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size)
478 {
479 uint32_t num;
480 proc_t self;
481
482 if (!queue->captureOn) {
483 return;
484 }
485 if (size < queue->minCaptureSize) {
486 return;
487 }
488
489 assert(!mem->link.next);
490
491 num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs);
492 num = 0;
493 if ((kernel_task != current_task()) && (self = proc_self())) {
494 bool user_64;
495 mem->btPID = proc_pid(self);
496 (void)backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1, &num,
497 &user_64);
498 mem->user32 = !user_64;
499 proc_rele(self);
500 }
501 assert(num <= kIOTrackingCallSiteBTs);
502 mem->userCount = num;
503
504 IOTRecursiveLockLock(&queue->lock);
505 queue_enter/*last*/ (&queue->sites[0], mem, IOTrackingUser *, link);
506 queue->siteCount++;
507 IOTRecursiveLockUnlock(&queue->lock);
508 }
509
510 void
511 IOTrackingRemoveUser(IOTrackingQueue * queue, IOTrackingUser * mem)
512 {
513 if (!mem->link.next) {
514 return;
515 }
516
517 IOTRecursiveLockLock(&queue->lock);
518 if (mem->link.next) {
519 remque(&mem->link);
520 assert(queue->siteCount);
521 queue->siteCount--;
522 }
523 IOTRecursiveLockUnlock(&queue->lock);
524 }
525
526 uint64_t gIOTrackingAddTime;
527
528 void
529 IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool address, vm_tag_t tag)
530 {
531 IOTrackingCallSite * site;
532 uint32_t crc, num;
533 uintptr_t bt[kIOTrackingCallSiteBTs + 1];
534 queue_head_t * que;
535
536 if (mem->site) {
537 return;
538 }
539 if (!queue->captureOn) {
540 return;
541 }
542 if (size < queue->minCaptureSize) {
543 return;
544 }
545
546 assert(!mem->link.next);
547
548 num = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1);
549 if (!num) {
550 return;
551 }
552 num--;
553 crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7);
554
555 IOTRecursiveLockLock(&queue->lock);
556 que = &queue->sites[crc % queue->numSiteQs];
557 queue_iterate(que, site, IOTrackingCallSite *, link)
558 {
559 if (tag != site->tag) {
560 continue;
561 }
562 if (crc == site->crc) {
563 break;
564 }
565 }
566
567 if (queue_end(que, (queue_entry_t) site)) {
568 site = (typeof(site))kalloc(sizeof(IOTrackingCallSite));
569
570 queue_init(&site->instances);
571 site->addresses = (IOTracking *) &site->instances;
572 site->queue = queue;
573 site->crc = crc;
574 site->count = 0;
575 site->tag = tag;
576 memset(&site->size[0], 0, sizeof(site->size));
577 bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0]));
578 assert(num <= kIOTrackingCallSiteBTs);
579 bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0]));
580
581 queue_enter_first(que, site, IOTrackingCallSite *, link);
582 queue->siteCount++;
583 }
584
585 if (address) {
586 queue_enter/*last*/ (&site->instances, mem, IOTracking *, link);
587 if (queue_end(&site->instances, (queue_entry_t)site->addresses)) {
588 site->addresses = mem;
589 }
590 } else {
591 queue_enter_first(&site->instances, mem, IOTracking *, link);
592 }
593
594 mem->site = site;
595 site->size[0] += size;
596 site->count++;
597
598 IOTRecursiveLockUnlock(&queue->lock);
599 }
600
601 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
602
603 void
604 IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size)
605 {
606 if (!mem->link.next) {
607 return;
608 }
609
610 IOTRecursiveLockLock(&queue->lock);
611 if (mem->link.next) {
612 assert(mem->site);
613
614 if (mem == mem->site->addresses) {
615 mem->site->addresses = (IOTracking *) queue_next(&mem->link);
616 }
617 remque(&mem->link);
618
619 assert(mem->site->count);
620 mem->site->count--;
621 assert(mem->site->size[0] >= size);
622 mem->site->size[0] -= size;
623 if (!mem->site->count) {
624 assert(queue_empty(&mem->site->instances));
625 assert(!mem->site->size[0]);
626 assert(!mem->site->size[1]);
627
628 remque(&mem->site->link);
629 assert(queue->siteCount);
630 queue->siteCount--;
631 kfree(mem->site, sizeof(IOTrackingCallSite));
632 }
633 mem->site = NULL;
634 }
635 IOTRecursiveLockUnlock(&queue->lock);
636 }
637
638 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
639
640 void
641 IOTrackingAlloc(IOTrackingQueue * queue, uintptr_t address, size_t size)
642 {
643 IOTrackingAddress * tracking;
644
645 if (!queue->captureOn) {
646 return;
647 }
648 if (size < queue->minCaptureSize) {
649 return;
650 }
651
652 address = ~address;
653 tracking = (typeof(tracking))kalloc(sizeof(IOTrackingAddress));
654 bzero(tracking, sizeof(IOTrackingAddress));
655 IOTrackingAddressFlags(tracking) |= kTrackingAddressFlagAllocated;
656 tracking->address = address;
657 tracking->size = size;
658
659 IOTrackingAdd(queue, &tracking->tracking, size, true, VM_KERN_MEMORY_NONE);
660 }
661
662 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
663
664 void
665 IOTrackingFree(IOTrackingQueue * queue, uintptr_t address, size_t size)
666 {
667 IOTrackingCallSite * site;
668 IOTrackingAddress * tracking;
669 uint32_t idx;
670 bool done;
671
672 address = ~address;
673 IOTRecursiveLockLock(&queue->lock);
674 done = false;
675 for (idx = 0; idx < queue->numSiteQs; idx++) {
676 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link)
677 {
678 tracking = (IOTrackingAddress *) site->addresses;
679 while (!queue_end(&site->instances, &tracking->tracking.link)) {
680 if ((done = (address == tracking->address))) {
681 IOTrackingRemove(queue, &tracking->tracking, size);
682 kfree(tracking, sizeof(IOTrackingAddress));
683 break;
684 } else {
685 tracking = (IOTrackingAddress *) queue_next(&tracking->tracking.link);
686 }
687 }
688 if (done) {
689 break;
690 }
691 }
692 if (done) {
693 break;
694 }
695 }
696 IOTRecursiveLockUnlock(&queue->lock);
697 }
698
699 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
700
701 void
702 IOTrackingAccumSize(IOTrackingQueue * queue, IOTracking * mem, size_t size)
703 {
704 IOTRecursiveLockLock(&queue->lock);
705 if (mem->link.next) {
706 assert(mem->site);
707 assert((size > 0) || (mem->site->size[1] >= -size));
708 mem->site->size[1] += size;
709 }
710 ;
711 IOTRecursiveLockUnlock(&queue->lock);
712 }
713
714 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
715
716 void
717 IOTrackingReset(IOTrackingQueue * queue)
718 {
719 IOTrackingCallSite * site;
720 IOTrackingUser * user;
721 IOTracking * tracking;
722 IOTrackingAddress * trackingAddress;
723 uint32_t idx;
724 bool addresses;
725
726 IOTRecursiveLockLock(&queue->lock);
727 for (idx = 0; idx < queue->numSiteQs; idx++) {
728 while (!queue_empty(&queue->sites[idx])) {
729 if (kIOTrackingQueueTypeMap & queue->type) {
730 queue_remove_first(&queue->sites[idx], user, IOTrackingUser *, link);
731 user->link.next = user->link.prev = NULL;
732 } else {
733 queue_remove_first(&queue->sites[idx], site, IOTrackingCallSite *, link);
734 addresses = false;
735 while (!queue_empty(&site->instances)) {
736 queue_remove_first(&site->instances, tracking, IOTracking *, link);
737 if (tracking == site->addresses) {
738 addresses = true;
739 }
740 if (addresses) {
741 trackingAddress = (typeof(trackingAddress))tracking;
742 if (kTrackingAddressFlagAllocated & IOTrackingAddressFlags(trackingAddress)) {
743 kfree(tracking, sizeof(IOTrackingAddress));
744 }
745 }
746 }
747 kfree(site, sizeof(IOTrackingCallSite));
748 }
749 }
750 }
751 queue->siteCount = 0;
752 IOTRecursiveLockUnlock(&queue->lock);
753 }
754
755 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
756
757 static int
758 IOTrackingCallSiteInfoCompare(const void * left, const void * right)
759 {
760 IOTrackingCallSiteInfo * l = (typeof(l))left;
761 IOTrackingCallSiteInfo * r = (typeof(r))right;
762 size_t lsize, rsize;
763
764 rsize = r->size[0] + r->size[1];
765 lsize = l->size[0] + l->size[1];
766
767 return (rsize > lsize) ? 1 : ((rsize == lsize) ? 0 : -1);
768 }
769
770 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
771
772 static int
773 IOTrackingAddressCompare(const void * left, const void * right)
774 {
775 IOTracking * instance;
776 uintptr_t inst, laddr, raddr;
777
778 inst = ((typeof(inst) *)left)[0];
779 instance = (typeof(instance))INSTANCE_GET(inst);
780 if (kInstanceFlagAddress & inst) {
781 laddr = ~((IOTrackingAddress *)instance)->address;
782 } else {
783 laddr = (uintptr_t) (instance + 1);
784 }
785
786 inst = ((typeof(inst) *)right)[0];
787 instance = (typeof(instance))(inst & ~kInstanceFlags);
788 if (kInstanceFlagAddress & inst) {
789 raddr = ~((IOTrackingAddress *)instance)->address;
790 } else {
791 raddr = (uintptr_t) (instance + 1);
792 }
793
794 return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
795 }
796
797
798 static int
799 IOTrackingZoneElementCompare(const void * left, const void * right)
800 {
801 uintptr_t inst, laddr, raddr;
802
803 inst = ((typeof(inst) *)left)[0];
804 laddr = INSTANCE_PUT(inst);
805 inst = ((typeof(inst) *)right)[0];
806 raddr = INSTANCE_PUT(inst);
807
808 return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
809 }
810
811 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
812
813 static void
814 CopyOutKernelBacktrace(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo)
815 {
816 uint32_t j;
817 mach_vm_address_t bt, btEntry;
818
819 btEntry = site->queue->btEntry;
820 for (j = 0; j < kIOTrackingCallSiteBTs; j++) {
821 bt = site->bt[j];
822 if (btEntry
823 && (!bt || (j == (kIOTrackingCallSiteBTs - 1)))) {
824 bt = btEntry;
825 btEntry = 0;
826 }
827 siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt);
828 }
829 }
830
831 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
832
833 static void
834 IOTrackingLeakScan(void * refcon)
835 {
836 IOTrackingLeaksRef * ref = (typeof(ref))refcon;
837 uintptr_t * instances;
838 IOTracking * instance;
839 uint64_t vaddr, vincr;
840 ppnum_t ppn;
841 uintptr_t ptr, addr, vphysaddr, inst;
842 size_t size, origsize;
843 uint32_t baseIdx, lim, ptrIdx, count;
844 boolean_t is;
845 AbsoluteTime deadline;
846
847 instances = ref->instances;
848 count = ref->count;
849 size = origsize = ref->zoneSize;
850
851 for (deadline = 0, vaddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
852 ;
853 vaddr += vincr) {
854 if ((mach_absolute_time() > deadline) || (vaddr >= VM_MAX_KERNEL_ADDRESS)) {
855 if (deadline) {
856 ml_set_interrupts_enabled(is);
857 IODelay(10);
858 }
859 if (vaddr >= VM_MAX_KERNEL_ADDRESS) {
860 break;
861 }
862 is = ml_set_interrupts_enabled(false);
863 clock_interval_to_deadline(10, kMillisecondScale, &deadline);
864 }
865
866 ppn = kernel_pmap_present_mapping(vaddr, &vincr, &vphysaddr);
867 // check noencrypt to avoid VM structs (map entries) with pointers
868 if (ppn && (!pmap_valid_page(ppn) || (!ref->zoneSize && pmap_is_noencrypt(ppn)))) {
869 ppn = 0;
870 }
871 if (!ppn) {
872 continue;
873 }
874
875 for (ptrIdx = 0; ptrIdx < (page_size / sizeof(uintptr_t)); ptrIdx++) {
876 ptr = ((uintptr_t *)vphysaddr)[ptrIdx];
877
878 for (lim = count, baseIdx = 0; lim; lim >>= 1) {
879 inst = instances[baseIdx + (lim >> 1)];
880 instance = (typeof(instance))INSTANCE_GET(inst);
881
882 if (ref->zoneSize) {
883 addr = INSTANCE_PUT(inst) & ~kInstanceFlags;
884 } else if (kInstanceFlagAddress & inst) {
885 addr = ~((IOTrackingAddress *)instance)->address;
886 origsize = size = ((IOTrackingAddress *)instance)->size;
887 if (!size) {
888 size = 1;
889 }
890 } else {
891 addr = (uintptr_t) (instance + 1);
892 origsize = size = instance->site->queue->allocSize;
893 }
894 if ((ptr >= addr) && (ptr < (addr + size))
895
896 && (((vaddr + ptrIdx * sizeof(uintptr_t)) < addr)
897 || ((vaddr + ptrIdx * sizeof(uintptr_t)) >= (addr + size)))) {
898 if (!(kInstanceFlagReferenced & inst)) {
899 inst |= kInstanceFlagReferenced;
900 instances[baseIdx + (lim >> 1)] = inst;
901 ref->found++;
902 if (!origsize) {
903 ref->foundzlen++;
904 }
905 }
906 break;
907 }
908 if (ptr > addr) {
909 // move right
910 baseIdx += (lim >> 1) + 1;
911 lim--;
912 }
913 // else move left
914 }
915 }
916 ref->bytes += page_size;
917 }
918 }
919
920 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
921
922 extern "C" void
923 zone_leaks_scan(uintptr_t * instances, uint32_t count, uint32_t zoneSize, uint32_t * found)
924 {
925 IOTrackingLeaksRef ref;
926 IOTrackingCallSiteInfo siteInfo;
927 uint32_t idx;
928
929 qsort(instances, count, sizeof(*instances), &IOTrackingZoneElementCompare);
930
931 bzero(&siteInfo, sizeof(siteInfo));
932 bzero(&ref, sizeof(ref));
933 ref.instances = instances;
934 ref.count = count;
935 ref.zoneSize = zoneSize;
936
937 for (idx = 0; idx < 2; idx++) {
938 ref.bytes = 0;
939 IOTrackingLeakScan(&ref);
940 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found);
941 if (count <= ref.found) {
942 break;
943 }
944 }
945
946 *found = ref.found;
947 }
948
949 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
950
951 static void
952 ZoneSiteProc(void * refCon, uint32_t siteCount, uint32_t zoneSize,
953 uintptr_t * backtrace, uint32_t btCount)
954 {
955 IOTrackingCallSiteInfo siteInfo;
956 OSData * leakData;
957 uint32_t idx;
958
959 leakData = (typeof(leakData))refCon;
960
961 bzero(&siteInfo, sizeof(siteInfo));
962 siteInfo.count = siteCount;
963 siteInfo.size[0] = zoneSize * siteCount;
964
965 for (idx = 0; (idx < btCount) && (idx < kIOTrackingCallSiteBTs); idx++) {
966 siteInfo.bt[0][idx] = VM_KERNEL_UNSLIDE(backtrace[idx]);
967 }
968
969 leakData->appendBytes(&siteInfo, sizeof(siteInfo));
970 }
971
972
973 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
974
975 static OSData *
976 IOTrackingLeaks(LIBKERN_CONSUMED OSData * data)
977 {
978 IOTrackingLeaksRef ref;
979 IOTrackingCallSiteInfo siteInfo;
980 IOTrackingCallSite * site;
981 OSData * leakData;
982 uintptr_t * instances;
983 IOTracking * instance;
984 uintptr_t inst;
985 uint32_t count, idx, numSites, dups, siteCount;
986
987 instances = (typeof(instances))data->getBytesNoCopy();
988 count = (data->getLength() / sizeof(*instances));
989 qsort(instances, count, sizeof(*instances), &IOTrackingAddressCompare);
990
991 bzero(&siteInfo, sizeof(siteInfo));
992 bzero(&ref, sizeof(ref));
993 ref.instances = instances;
994 ref.count = count;
995 for (idx = 0; idx < 2; idx++) {
996 ref.bytes = 0;
997 IOTrackingLeakScan(&ref);
998 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d (zlen %d)\n", idx, ref.bytes / 1024 / 1024, count, ref.found, ref.foundzlen);
999 if (count <= ref.found) {
1000 break;
1001 }
1002 }
1003
1004 leakData = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
1005
1006 for (numSites = 0, idx = 0; idx < count; idx++) {
1007 inst = instances[idx];
1008 if (kInstanceFlagReferenced & inst) {
1009 continue;
1010 }
1011 instance = (typeof(instance))INSTANCE_GET(inst);
1012 site = instance->site;
1013 instances[numSites] = (uintptr_t) site;
1014 numSites++;
1015 }
1016
1017 for (idx = 0; idx < numSites; idx++) {
1018 inst = instances[idx];
1019 if (!inst) {
1020 continue;
1021 }
1022 site = (typeof(site))inst;
1023 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) {
1024 if (instances[dups] == (uintptr_t) site) {
1025 siteCount++;
1026 instances[dups] = 0;
1027 }
1028 }
1029 siteInfo.count = siteCount;
1030 siteInfo.size[0] = (site->size[0] * site->count) / siteCount;
1031 siteInfo.size[1] = (site->size[1] * site->count) / siteCount;;
1032 CopyOutKernelBacktrace(site, &siteInfo);
1033 leakData->appendBytes(&siteInfo, sizeof(siteInfo));
1034 }
1035 data->release();
1036
1037 return leakData;
1038 }
1039
1040 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1041
1042 static bool
1043 SkipName(uint32_t options, const char * name, size_t namesLen, const char * names)
1044 {
1045 const char * scan;
1046 const char * next;
1047 bool exclude, found;
1048 size_t qLen, sLen;
1049
1050 if (!namesLen || !names) {
1051 return false;
1052 }
1053 // <len><name>...<len><name><0>
1054 exclude = (0 != (kIOTrackingExcludeNames & options));
1055 qLen = strlen(name);
1056 scan = names;
1057 found = false;
1058 do{
1059 sLen = scan[0];
1060 scan++;
1061 next = scan + sLen;
1062 if (next >= (names + namesLen)) {
1063 break;
1064 }
1065 found = ((sLen == qLen) && !strncmp(scan, name, sLen));
1066 scan = next;
1067 }while (!found && (scan < (names + namesLen)));
1068
1069 return !(exclude ^ found);
1070 }
1071
1072 #endif /* IOTRACKING */
1073
1074 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1075
1076 static kern_return_t
1077 IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value,
1078 uint32_t intag, uint32_t inzsize,
1079 const char * names, size_t namesLen,
1080 size_t size, OSObject ** result)
1081 {
1082 kern_return_t ret;
1083 OSData * data;
1084
1085 if (result) {
1086 *result = 0;
1087 }
1088 data = 0;
1089 ret = kIOReturnNotReady;
1090
1091 #if IOTRACKING
1092
1093 kern_return_t kr;
1094 IOTrackingQueue * queue;
1095 IOTracking * instance;
1096 IOTrackingCallSite * site;
1097 IOTrackingCallSiteInfo siteInfo;
1098 IOTrackingUser * user;
1099 task_t mapTask;
1100 mach_vm_address_t mapAddress;
1101 mach_vm_size_t mapSize;
1102 uint32_t num, idx, qIdx;
1103 uintptr_t instFlags;
1104 proc_t proc;
1105 bool addresses;
1106
1107 ret = kIOReturnNotFound;
1108 proc = NULL;
1109 if (kIOTrackingGetMappings == selector) {
1110 if (value != -1ULL) {
1111 proc = proc_find(value);
1112 if (!proc) {
1113 return kIOReturnNotFound;
1114 }
1115 }
1116 }
1117
1118 bzero(&siteInfo, sizeof(siteInfo));
1119 lck_mtx_lock(gIOTrackingLock);
1120 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
1121 {
1122 if (SkipName(options, queue->name, namesLen, names)) {
1123 continue;
1124 }
1125
1126 if (!(kIOTracking & gIOKitDebug) && (kIOTrackingQueueTypeAlloc & queue->type)) {
1127 continue;
1128 }
1129
1130 switch (selector) {
1131 case kIOTrackingResetTracking:
1132 {
1133 IOTrackingReset(queue);
1134 ret = kIOReturnSuccess;
1135 break;
1136 }
1137
1138 case kIOTrackingStartCapture:
1139 case kIOTrackingStopCapture:
1140 {
1141 queue->captureOn = (kIOTrackingStartCapture == selector);
1142 ret = kIOReturnSuccess;
1143 break;
1144 }
1145
1146 case kIOTrackingSetMinCaptureSize:
1147 {
1148 queue->minCaptureSize = size;
1149 ret = kIOReturnSuccess;
1150 break;
1151 }
1152
1153 case kIOTrackingLeaks:
1154 {
1155 if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
1156 break;
1157 }
1158
1159 if (!data) {
1160 data = OSData::withCapacity(1024 * sizeof(uintptr_t));
1161 }
1162
1163 IOTRecursiveLockLock(&queue->lock);
1164 for (idx = 0; idx < queue->numSiteQs; idx++) {
1165 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link)
1166 {
1167 addresses = false;
1168 queue_iterate(&site->instances, instance, IOTracking *, link)
1169 {
1170 if (instance == site->addresses) {
1171 addresses = true;
1172 }
1173 instFlags = (typeof(instFlags))instance;
1174 if (addresses) {
1175 instFlags |= kInstanceFlagAddress;
1176 }
1177 data->appendBytes(&instFlags, sizeof(instFlags));
1178 }
1179 }
1180 }
1181 // queue is locked
1182 ret = kIOReturnSuccess;
1183 break;
1184 }
1185
1186
1187 case kIOTrackingGetTracking:
1188 {
1189 if (kIOTrackingQueueTypeMap & queue->type) {
1190 break;
1191 }
1192
1193 if (!data) {
1194 data = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
1195 }
1196
1197 IOTRecursiveLockLock(&queue->lock);
1198 num = queue->siteCount;
1199 idx = 0;
1200 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
1201 queue_iterate(&queue->sites[qIdx], site, IOTrackingCallSite *, link)
1202 {
1203 assert(idx < num);
1204 idx++;
1205
1206 size_t tsize[2];
1207 uint32_t count = site->count;
1208 tsize[0] = site->size[0];
1209 tsize[1] = site->size[1];
1210
1211 if (intag || inzsize) {
1212 uintptr_t addr;
1213 vm_size_t size, zoneSize;
1214 vm_tag_t tag;
1215
1216 if (kIOTrackingQueueTypeAlloc & queue->type) {
1217 addresses = false;
1218 count = 0;
1219 tsize[0] = tsize[1] = 0;
1220 queue_iterate(&site->instances, instance, IOTracking *, link)
1221 {
1222 if (instance == site->addresses) {
1223 addresses = true;
1224 }
1225
1226 if (addresses) {
1227 addr = ~((IOTrackingAddress *)instance)->address;
1228 } else {
1229 addr = (uintptr_t) (instance + 1);
1230 }
1231
1232 kr = vm_kern_allocation_info(addr, &size, &tag, &zoneSize);
1233 if (KERN_SUCCESS != kr) {
1234 continue;
1235 }
1236
1237 if ((VM_KERN_MEMORY_NONE != intag) && (intag != tag)) {
1238 continue;
1239 }
1240 if (inzsize && (inzsize != zoneSize)) {
1241 continue;
1242 }
1243
1244 count++;
1245 tsize[0] += size;
1246 }
1247 } else {
1248 if (!intag || inzsize || (intag != site->tag)) {
1249 continue;
1250 }
1251 }
1252 }
1253
1254 if (!count) {
1255 continue;
1256 }
1257 if (size && ((tsize[0] + tsize[1]) < size)) {
1258 continue;
1259 }
1260
1261 siteInfo.count = count;
1262 siteInfo.size[0] = tsize[0];
1263 siteInfo.size[1] = tsize[1];
1264
1265 CopyOutKernelBacktrace(site, &siteInfo);
1266 data->appendBytes(&siteInfo, sizeof(siteInfo));
1267 }
1268 }
1269 assert(idx == num);
1270 IOTRecursiveLockUnlock(&queue->lock);
1271 ret = kIOReturnSuccess;
1272 break;
1273 }
1274
1275 case kIOTrackingGetMappings:
1276 {
1277 if (!(kIOTrackingQueueTypeMap & queue->type)) {
1278 break;
1279 }
1280 if (!data) {
1281 data = OSData::withCapacity(page_size);
1282 }
1283
1284 IOTRecursiveLockLock(&queue->lock);
1285 num = queue->siteCount;
1286 idx = 0;
1287 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
1288 queue_iterate(&queue->sites[qIdx], user, IOTrackingUser *, link)
1289 {
1290 assert(idx < num);
1291 idx++;
1292
1293 kr = IOMemoryMapTracking(user, &mapTask, &mapAddress, &mapSize);
1294 if (kIOReturnSuccess != kr) {
1295 continue;
1296 }
1297 if (proc && (mapTask != proc_task(proc))) {
1298 continue;
1299 }
1300 if (size && (mapSize < size)) {
1301 continue;
1302 }
1303
1304 siteInfo.count = 1;
1305 siteInfo.size[0] = mapSize;
1306 siteInfo.address = mapAddress;
1307 siteInfo.addressPID = task_pid(mapTask);
1308 siteInfo.btPID = user->btPID;
1309
1310 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
1311 siteInfo.bt[0][j] = VM_KERNEL_UNSLIDE(user->bt[j]);
1312 }
1313 uint32_t * bt32 = (typeof(bt32)) & user->btUser[0];
1314 uint64_t * bt64 = (typeof(bt64))((void *) &user->btUser[0]);
1315 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
1316 if (j >= user->userCount) {
1317 siteInfo.bt[1][j] = 0;
1318 } else if (user->user32) {
1319 siteInfo.bt[1][j] = bt32[j];
1320 } else {
1321 siteInfo.bt[1][j] = bt64[j];
1322 }
1323 }
1324 data->appendBytes(&siteInfo, sizeof(siteInfo));
1325 }
1326 }
1327 assert(idx == num);
1328 IOTRecursiveLockUnlock(&queue->lock);
1329 ret = kIOReturnSuccess;
1330 break;
1331 }
1332
1333 default:
1334 ret = kIOReturnUnsupported;
1335 break;
1336 }
1337 }
1338
1339 if ((kIOTrackingLeaks == selector) && data) {
1340 data = IOTrackingLeaks(data);
1341 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
1342 {
1343 if (SkipName(options, queue->name, namesLen, names)) {
1344 continue;
1345 }
1346 if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
1347 continue;
1348 }
1349 IOTRecursiveLockUnlock(&queue->lock);
1350 }
1351 }
1352
1353 lck_mtx_unlock(gIOTrackingLock);
1354
1355 if ((kIOTrackingLeaks == selector) && namesLen && names) {
1356 const char * scan;
1357 const char * next;
1358 size_t sLen;
1359
1360 if (!data) {
1361 data = OSData::withCapacity(4096 * sizeof(uintptr_t));
1362 }
1363
1364 // <len><name>...<len><name><0>
1365 scan = names;
1366 do{
1367 sLen = scan[0];
1368 scan++;
1369 next = scan + sLen;
1370 if (next >= (names + namesLen)) {
1371 break;
1372 }
1373 kr = zone_leaks(scan, sLen, &ZoneSiteProc, data);
1374 if (KERN_SUCCESS == kr) {
1375 ret = kIOReturnSuccess;
1376 } else if (KERN_INVALID_NAME != kr) {
1377 ret = kIOReturnVMError;
1378 }
1379 scan = next;
1380 }while (scan < (names + namesLen));
1381 }
1382
1383 if (data) {
1384 switch (selector) {
1385 case kIOTrackingLeaks:
1386 case kIOTrackingGetTracking:
1387 case kIOTrackingGetMappings:
1388 {
1389 IOTrackingCallSiteInfo * siteInfos;
1390 siteInfos = (typeof(siteInfos))data->getBytesNoCopy();
1391 num = (data->getLength() / sizeof(*siteInfos));
1392 qsort(siteInfos, num, sizeof(*siteInfos), &IOTrackingCallSiteInfoCompare);
1393 break;
1394 }
1395 default: assert(false); break;
1396 }
1397 }
1398
1399 *result = data;
1400 if (proc) {
1401 proc_rele(proc);
1402 }
1403
1404 #endif /* IOTRACKING */
1405
1406 return ret;
1407 }
1408
1409 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1410
1411 #include <IOKit/IOKitDiagnosticsUserClient.h>
1412
1413 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1414
1415 #undef super
1416 #define super IOUserClient
1417
1418 OSDefineMetaClassAndStructors(IOKitDiagnosticsClient, IOUserClient)
1419
1420 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1421
1422 IOUserClient * IOKitDiagnosticsClient::withTask(task_t owningTask)
1423 {
1424 IOKitDiagnosticsClient * inst;
1425
1426 inst = new IOKitDiagnosticsClient;
1427 if (inst && !inst->init()) {
1428 inst->release();
1429 inst = 0;
1430 }
1431
1432 return inst;
1433 }
1434
1435 IOReturn
1436 IOKitDiagnosticsClient::clientClose(void)
1437 {
1438 terminate();
1439 return kIOReturnSuccess;
1440 }
1441
1442 IOReturn
1443 IOKitDiagnosticsClient::setProperties(OSObject * properties)
1444 {
1445 IOReturn kr = kIOReturnUnsupported;
1446 return kr;
1447 }
1448
1449 IOReturn
1450 IOKitDiagnosticsClient::externalMethod(uint32_t selector, IOExternalMethodArguments * args,
1451 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference)
1452 {
1453 IOReturn ret = kIOReturnBadArgument;
1454 const IOKitDiagnosticsParameters * params;
1455 const char * names;
1456 size_t namesLen;
1457 OSObject * result;
1458
1459 if (args->structureInputSize < sizeof(IOKitDiagnosticsParameters)) {
1460 return kIOReturnBadArgument;
1461 }
1462 params = (typeof(params))args->structureInput;
1463 if (!params) {
1464 return kIOReturnBadArgument;
1465 }
1466
1467 names = 0;
1468 namesLen = args->structureInputSize - sizeof(IOKitDiagnosticsParameters);
1469 if (namesLen) {
1470 names = (typeof(names))(params + 1);
1471 }
1472
1473 ret = IOTrackingDebug(selector, params->options, params->value, params->tag, params->zsize, names, namesLen, params->size, &result);
1474
1475 if ((kIOReturnSuccess == ret) && args->structureVariableOutputData) {
1476 *args->structureVariableOutputData = result;
1477 } else if (result) {
1478 result->release();
1479 }
1480
1481 return ret;
1482 }
1483
1484 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */