]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOKitDebug.cpp
9cf95e50124615c9641a5aa0163c22b5df6cec32
[apple/xnu.git] / iokit / Kernel / IOKitDebug.cpp
1 /*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <sys/sysctl.h>
31 extern "C" {
32 #include <vm/vm_kern.h>
33 #include <kern/task.h>
34 #include <kern/debug.h>
35 }
36
37 #include <libkern/c++/OSContainers.h>
38 #include <libkern/OSDebug.h>
39 #include <libkern/c++/OSCPPDebug.h>
40 #include <kern/backtrace.h>
41
42 #include <IOKit/IOKitDebug.h>
43 #include <IOKit/IOLib.h>
44 #include <IOKit/assert.h>
45 #include <IOKit/IODeviceTreeSupport.h>
46 #include <IOKit/IOService.h>
47
48 #include "IOKitKernelInternal.h"
49
50 #ifdef IOKITDEBUG
51 #define DEBUG_INIT_VALUE IOKITDEBUG
52 #else
53 #define DEBUG_INIT_VALUE 0
54 #endif
55
56 SInt64 gIOKitDebug = DEBUG_INIT_VALUE;
57 SInt64 gIOKitTrace = 0;
58
59 #if DEVELOPMENT || DEBUG
60 #define IODEBUG_CTLFLAGS CTLFLAG_RW
61 #else
62 #define IODEBUG_CTLFLAGS CTLFLAG_RD
63 #endif
64
65 SYSCTL_QUAD(_debug, OID_AUTO, iotrace, CTLFLAG_RW | CTLFLAG_LOCKED, &gIOKitTrace, "trace io");
66
67 static int
68 sysctl_debug_iokit
69 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
70 {
71 SInt64 newValue;
72 int changed, error = sysctl_io_number(req, gIOKitDebug, sizeof(gIOKitDebug), &newValue, &changed);
73 if (changed) {
74 gIOKitDebug = ((gIOKitDebug & ~kIOKitDebugUserOptions) | (newValue & kIOKitDebugUserOptions));
75 }
76 return error;
77 }
78
79 SYSCTL_PROC(_debug, OID_AUTO, iokit,
80 CTLTYPE_QUAD | IODEBUG_CTLFLAGS | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED,
81 &gIOKitDebug, 0, sysctl_debug_iokit, "Q", "boot_arg io");
82
83 int debug_malloc_size;
84 int debug_iomalloc_size;
85
86 vm_size_t debug_iomallocpageable_size;
87 int debug_container_malloc_size;
88 // int debug_ivars_size; // in OSObject.cpp
89
90 extern "C" {
91 #if 0
92 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
93 #else
94 #define DEBG(fmt, args...) { IOLog(fmt, ## args); }
95 #endif
96
97 void
98 IOPrintPlane( const IORegistryPlane * plane )
99 {
100 IORegistryEntry * next;
101 IORegistryIterator * iter;
102 OSOrderedSet * all;
103 char format[] = "%xxxs";
104 IOService * service;
105
106 iter = IORegistryIterator::iterateOver( plane );
107 assert( iter );
108 all = iter->iterateAll();
109 if (all) {
110 DEBG("Count %d\n", all->getCount());
111 all->release();
112 } else {
113 DEBG("Empty\n");
114 }
115
116 iter->reset();
117 while ((next = iter->getNextObjectRecursive())) {
118 snprintf(format + 1, sizeof(format) - 1, "%ds", 2 * next->getDepth( plane ));
119 DEBG( format, "");
120 DEBG( "\033[33m%s", next->getName( plane ));
121 if ((next->getLocation( plane ))) {
122 DEBG("@%s", next->getLocation( plane ));
123 }
124 DEBG("\033[0m <class %s", next->getMetaClass()->getClassName());
125 if ((service = OSDynamicCast(IOService, next))) {
126 DEBG(", busy %ld", (long) service->getBusyState());
127 }
128 DEBG( ">\n");
129 // IOSleep(250);
130 }
131 iter->release();
132 }
133
134 void
135 db_piokjunk(void)
136 {
137 }
138
139 void
140 db_dumpiojunk( const IORegistryPlane * plane __unused )
141 {
142 }
143
144 void
145 IOPrintMemory( void )
146 {
147 // OSMetaClass::printInstanceCounts();
148
149 IOLog("\n"
150 "ivar kalloc() 0x%08x\n"
151 "malloc() 0x%08x\n"
152 "containers kalloc() 0x%08x\n"
153 "IOMalloc() 0x%08x\n"
154 "----------------------------------------\n",
155 debug_ivars_size,
156 debug_malloc_size,
157 debug_container_malloc_size,
158 debug_iomalloc_size
159 );
160 }
161 } /* extern "C" */
162
163 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
164
165 #define super OSObject
166 OSDefineMetaClassAndStructors(IOKitDiagnostics, OSObject)
167
168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
169
170 OSObject * IOKitDiagnostics::diagnostics( void )
171 {
172 IOKitDiagnostics * diags;
173
174 diags = new IOKitDiagnostics;
175 if (diags && !diags->init()) {
176 diags->release();
177 diags = NULL;
178 }
179
180 return diags;
181 }
182
183 void
184 IOKitDiagnostics::updateOffset( OSDictionary * dict,
185 UInt64 value, const char * name )
186 {
187 OSNumber * off;
188
189 off = OSNumber::withNumber( value, 64 );
190 if (!off) {
191 return;
192 }
193
194 dict->setObject( name, off );
195 off->release();
196 }
197
198 bool
199 IOKitDiagnostics::serialize(OSSerialize *s) const
200 {
201 OSDictionary * dict;
202 bool ok;
203
204 dict = OSDictionary::withCapacity( 5 );
205 if (!dict) {
206 return false;
207 }
208
209 updateOffset( dict, debug_ivars_size, "Instance allocation" );
210 updateOffset( dict, debug_container_malloc_size, "Container allocation" );
211 updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" );
212 updateOffset( dict, debug_iomallocpageable_size, "Pageable allocation" );
213
214 OSMetaClass::serializeClassDictionary(dict);
215
216 ok = dict->serialize( s );
217
218 dict->release();
219
220 return ok;
221 }
222
223 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
224
225 #if IOTRACKING
226
227 #include <libkern/c++/OSCPPDebug.h>
228 #include <libkern/c++/OSKext.h>
229 #include <kern/zalloc.h>
230
231 __private_extern__ "C" void qsort(
232 void * array,
233 size_t nmembers,
234 size_t member_size,
235 int (*)(const void *, const void *));
236
237 extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
238 extern "C" ppnum_t pmap_valid_page(ppnum_t pn);
239
240 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
241
242 struct IOTRecursiveLock {
243 lck_mtx_t * mutex;
244 thread_t thread;
245 UInt32 count;
246 };
247
248 struct IOTrackingQueue {
249 queue_chain_t link;
250 IOTRecursiveLock lock;
251 const char * name;
252 uintptr_t btEntry;
253 size_t allocSize;
254 size_t minCaptureSize;
255 uint32_t siteCount;
256 uint32_t type;
257 uint32_t numSiteQs;
258 uint8_t captureOn;
259 queue_head_t sites[];
260 };
261
262 struct IOTrackingCallSite {
263 queue_chain_t link;
264 IOTrackingQueue * queue;
265 uint32_t crc;
266
267 vm_tag_t tag;
268 uint32_t count;
269 size_t size[2];
270 uintptr_t bt[kIOTrackingCallSiteBTs];
271
272 queue_head_t instances;
273 IOTracking * addresses;
274 };
275
276 struct IOTrackingLeaksRef {
277 uintptr_t * instances;
278 uint32_t zoneSize;
279 uint32_t count;
280 uint32_t found;
281 uint32_t foundzlen;
282 size_t bytes;
283 };
284
285 lck_mtx_t * gIOTrackingLock;
286 queue_head_t gIOTrackingQ;
287
288 enum{
289 kTrackingAddressFlagAllocated = 0x00000001
290 };
291
292 #if defined(__LP64__)
293 #define IOTrackingAddressFlags(ptr) (ptr->flags)
294 #else
295 #define IOTrackingAddressFlags(ptr) (ptr->tracking.flags)
296 #endif
297
298 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
299
300 static void
301 IOTRecursiveLockLock(IOTRecursiveLock * lock)
302 {
303 if (lock->thread == current_thread()) {
304 lock->count++;
305 } else {
306 lck_mtx_lock(lock->mutex);
307 assert(lock->thread == NULL);
308 assert(lock->count == 0);
309 lock->thread = current_thread();
310 lock->count = 1;
311 }
312 }
313
314 static void
315 IOTRecursiveLockUnlock(IOTRecursiveLock * lock)
316 {
317 assert(lock->thread == current_thread());
318 if (0 == (--lock->count)) {
319 lock->thread = NULL;
320 lck_mtx_unlock(lock->mutex);
321 }
322 }
323
324 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
325
326 void
327 IOTrackingInit(void)
328 {
329 queue_init(&gIOTrackingQ);
330 gIOTrackingLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
331 }
332
333 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
334
335 IOTrackingQueue *
336 IOTrackingQueueAlloc(const char * name, uintptr_t btEntry,
337 size_t allocSize, size_t minCaptureSize,
338 uint32_t type, uint32_t numSiteQs)
339 {
340 IOTrackingQueue * queue;
341 uint32_t idx;
342
343 if (!numSiteQs) {
344 numSiteQs = 1;
345 }
346 queue = (typeof(queue))kalloc(sizeof(IOTrackingQueue) + numSiteQs * sizeof(queue->sites[0]));
347 bzero(queue, sizeof(IOTrackingQueue));
348
349 queue->name = name;
350 queue->btEntry = btEntry;
351 queue->allocSize = allocSize;
352 queue->minCaptureSize = minCaptureSize;
353 queue->lock.mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
354 queue->numSiteQs = numSiteQs;
355 queue->type = type;
356 enum { kFlags = (kIOTracking | kIOTrackingBoot) };
357 queue->captureOn = (kFlags == (kFlags & gIOKitDebug))
358 || (kIOTrackingQueueTypeDefaultOn & type);
359
360 for (idx = 0; idx < numSiteQs; idx++) {
361 queue_init(&queue->sites[idx]);
362 }
363
364 lck_mtx_lock(gIOTrackingLock);
365 queue_enter(&gIOTrackingQ, queue, IOTrackingQueue *, link);
366 lck_mtx_unlock(gIOTrackingLock);
367
368 return queue;
369 };
370
371 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
372
373 void
374 IOTrackingQueueFree(IOTrackingQueue * queue)
375 {
376 lck_mtx_lock(gIOTrackingLock);
377 IOTrackingReset(queue);
378 remque(&queue->link);
379 lck_mtx_unlock(gIOTrackingLock);
380
381 lck_mtx_free(queue->lock.mutex, IOLockGroup);
382
383 kfree(queue, sizeof(IOTrackingQueue) + queue->numSiteQs * sizeof(queue->sites[0]));
384 };
385
386 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
387
388 /* fasthash
389 * The MIT License
390 *
391 * Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com)
392 *
393 * Permission is hereby granted, free of charge, to any person
394 * obtaining a copy of this software and associated documentation
395 * files (the "Software"), to deal in the Software without
396 * restriction, including without limitation the rights to use, copy,
397 * modify, merge, publish, distribute, sublicense, and/or sell copies
398 * of the Software, and to permit persons to whom the Software is
399 * furnished to do so, subject to the following conditions:
400 *
401 * The above copyright notice and this permission notice shall be
402 * included in all copies or substantial portions of the Software.
403 *
404 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
405 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
406 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
407 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
408 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
409 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
410 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
411 * SOFTWARE.
412 */
413
414
415 // Compression function for Merkle-Damgard construction.
416 // This function is generated using the framework provided.
417 #define mix(h) ({ \
418 (h) ^= (h) >> 23; \
419 (h) *= 0x2127599bf4325c37ULL; \
420 (h) ^= (h) >> 47; })
421
422 static uint64_t
423 fasthash64(const void *buf, size_t len, uint64_t seed)
424 {
425 const uint64_t m = 0x880355f21e6d1965ULL;
426 const uint64_t *pos = (const uint64_t *)buf;
427 const uint64_t *end = pos + (len / 8);
428 const unsigned char *pos2;
429 uint64_t h = seed ^ (len * m);
430 uint64_t v;
431
432 while (pos != end) {
433 v = *pos++;
434 h ^= mix(v);
435 h *= m;
436 }
437
438 pos2 = (const unsigned char*)pos;
439 v = 0;
440
441 switch (len & 7) {
442 case 7: v ^= (uint64_t)pos2[6] << 48;
443 [[clang::fallthrough]];
444 case 6: v ^= (uint64_t)pos2[5] << 40;
445 [[clang::fallthrough]];
446 case 5: v ^= (uint64_t)pos2[4] << 32;
447 [[clang::fallthrough]];
448 case 4: v ^= (uint64_t)pos2[3] << 24;
449 [[clang::fallthrough]];
450 case 3: v ^= (uint64_t)pos2[2] << 16;
451 [[clang::fallthrough]];
452 case 2: v ^= (uint64_t)pos2[1] << 8;
453 [[clang::fallthrough]];
454 case 1: v ^= (uint64_t)pos2[0];
455 h ^= mix(v);
456 h *= m;
457 }
458
459 return mix(h);
460 }
461
462 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
463
464 static uint32_t
465 fasthash32(const void *buf, size_t len, uint32_t seed)
466 {
467 // the following trick converts the 64-bit hashcode to Fermat
468 // residue, which shall retain information from both the higher
469 // and lower parts of hashcode.
470 uint64_t h = fasthash64(buf, len, seed);
471 return h - (h >> 32);
472 }
473
474 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
475
476 void
477 IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size)
478 {
479 uint32_t num;
480 proc_t self;
481
482 if (!queue->captureOn) {
483 return;
484 }
485 if (size < queue->minCaptureSize) {
486 return;
487 }
488
489 assert(!mem->link.next);
490
491 num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs, NULL);
492 num = 0;
493 if ((kernel_task != current_task()) && (self = proc_self())) {
494 bool user_64 = false;
495 mem->btPID = proc_pid(self);
496 (void)backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1, &num,
497 &user_64, NULL);
498 mem->user32 = !user_64;
499 proc_rele(self);
500 }
501 assert(num <= kIOTrackingCallSiteBTs);
502 mem->userCount = num;
503
504 IOTRecursiveLockLock(&queue->lock);
505 queue_enter/*last*/ (&queue->sites[0], mem, IOTrackingUser *, link);
506 queue->siteCount++;
507 IOTRecursiveLockUnlock(&queue->lock);
508 }
509
510 void
511 IOTrackingRemoveUser(IOTrackingQueue * queue, IOTrackingUser * mem)
512 {
513 if (!mem->link.next) {
514 return;
515 }
516
517 IOTRecursiveLockLock(&queue->lock);
518 if (mem->link.next) {
519 remque(&mem->link);
520 assert(queue->siteCount);
521 queue->siteCount--;
522 }
523 IOTRecursiveLockUnlock(&queue->lock);
524 }
525
526 uint64_t gIOTrackingAddTime;
527
528 void
529 IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool address, vm_tag_t tag)
530 {
531 IOTrackingCallSite * site;
532 uint32_t crc, num;
533 uintptr_t bt[kIOTrackingCallSiteBTs + 1];
534 queue_head_t * que;
535
536 if (mem->site) {
537 return;
538 }
539 if (!queue->captureOn) {
540 return;
541 }
542 if (size < queue->minCaptureSize) {
543 return;
544 }
545
546 assert(!mem->link.next);
547
548 num = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1, NULL);
549 if (!num) {
550 return;
551 }
552 num--;
553 crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7);
554
555 IOTRecursiveLockLock(&queue->lock);
556 que = &queue->sites[crc % queue->numSiteQs];
557 queue_iterate(que, site, IOTrackingCallSite *, link)
558 {
559 if (tag != site->tag) {
560 continue;
561 }
562 if (crc == site->crc) {
563 break;
564 }
565 }
566
567 if (queue_end(que, (queue_entry_t) site)) {
568 site = (typeof(site))kalloc(sizeof(IOTrackingCallSite));
569
570 queue_init(&site->instances);
571 site->addresses = (IOTracking *) &site->instances;
572 site->queue = queue;
573 site->crc = crc;
574 site->count = 0;
575 site->tag = tag;
576 memset(&site->size[0], 0, sizeof(site->size));
577 bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0]));
578 assert(num <= kIOTrackingCallSiteBTs);
579 bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0]));
580
581 queue_enter_first(que, site, IOTrackingCallSite *, link);
582 queue->siteCount++;
583 }
584
585 if (address) {
586 queue_enter/*last*/ (&site->instances, mem, IOTracking *, link);
587 if (queue_end(&site->instances, (queue_entry_t)site->addresses)) {
588 site->addresses = mem;
589 }
590 } else {
591 queue_enter_first(&site->instances, mem, IOTracking *, link);
592 }
593
594 mem->site = site;
595 site->size[0] += size;
596 site->count++;
597
598 IOTRecursiveLockUnlock(&queue->lock);
599 }
600
601 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
602
603 void
604 IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size)
605 {
606 if (!mem->link.next) {
607 return;
608 }
609
610 IOTRecursiveLockLock(&queue->lock);
611 if (mem->link.next) {
612 assert(mem->site);
613
614 if (mem == mem->site->addresses) {
615 mem->site->addresses = (IOTracking *) queue_next(&mem->link);
616 }
617 remque(&mem->link);
618
619 assert(mem->site->count);
620 mem->site->count--;
621 assert(mem->site->size[0] >= size);
622 mem->site->size[0] -= size;
623 if (!mem->site->count) {
624 assert(queue_empty(&mem->site->instances));
625 assert(!mem->site->size[0]);
626 assert(!mem->site->size[1]);
627
628 remque(&mem->site->link);
629 assert(queue->siteCount);
630 queue->siteCount--;
631 kfree(mem->site, sizeof(IOTrackingCallSite));
632 }
633 mem->site = NULL;
634 }
635 IOTRecursiveLockUnlock(&queue->lock);
636 }
637
638 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
639
640 void
641 IOTrackingAlloc(IOTrackingQueue * queue, uintptr_t address, size_t size)
642 {
643 IOTrackingAddress * tracking;
644
645 if (!queue->captureOn) {
646 return;
647 }
648 if (size < queue->minCaptureSize) {
649 return;
650 }
651
652 address = ~address;
653 tracking = (typeof(tracking))kalloc(sizeof(IOTrackingAddress));
654 bzero(tracking, sizeof(IOTrackingAddress));
655 IOTrackingAddressFlags(tracking) |= kTrackingAddressFlagAllocated;
656 tracking->address = address;
657 tracking->size = size;
658
659 IOTrackingAdd(queue, &tracking->tracking, size, true, VM_KERN_MEMORY_NONE);
660 }
661
662 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
663
664 void
665 IOTrackingFree(IOTrackingQueue * queue, uintptr_t address, size_t size)
666 {
667 IOTrackingCallSite * site;
668 IOTrackingAddress * tracking;
669 uint32_t idx;
670 bool done;
671
672 address = ~address;
673 IOTRecursiveLockLock(&queue->lock);
674 done = false;
675 for (idx = 0; idx < queue->numSiteQs; idx++) {
676 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link)
677 {
678 tracking = (IOTrackingAddress *) site->addresses;
679 while (!queue_end(&site->instances, &tracking->tracking.link)) {
680 if ((done = (address == tracking->address))) {
681 IOTrackingRemove(queue, &tracking->tracking, size);
682 kfree(tracking, sizeof(IOTrackingAddress));
683 break;
684 } else {
685 tracking = (IOTrackingAddress *) queue_next(&tracking->tracking.link);
686 }
687 }
688 if (done) {
689 break;
690 }
691 }
692 if (done) {
693 break;
694 }
695 }
696 IOTRecursiveLockUnlock(&queue->lock);
697 }
698
699 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
700
701 void
702 IOTrackingAccumSize(IOTrackingQueue * queue, IOTracking * mem, size_t size)
703 {
704 IOTRecursiveLockLock(&queue->lock);
705 if (mem->link.next) {
706 assert(mem->site);
707 assert((size > 0) || (mem->site->size[1] >= -size));
708 mem->site->size[1] += size;
709 }
710 ;
711 IOTRecursiveLockUnlock(&queue->lock);
712 }
713
714 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
715
716 void
717 IOTrackingReset(IOTrackingQueue * queue)
718 {
719 IOTrackingCallSite * site;
720 IOTrackingUser * user;
721 IOTracking * tracking;
722 IOTrackingAddress * trackingAddress;
723 uint32_t idx;
724 bool addresses;
725
726 IOTRecursiveLockLock(&queue->lock);
727 for (idx = 0; idx < queue->numSiteQs; idx++) {
728 while (!queue_empty(&queue->sites[idx])) {
729 if (kIOTrackingQueueTypeMap & queue->type) {
730 queue_remove_first(&queue->sites[idx], user, IOTrackingUser *, link);
731 user->link.next = user->link.prev = NULL;
732 } else {
733 queue_remove_first(&queue->sites[idx], site, IOTrackingCallSite *, link);
734 addresses = false;
735 while (!queue_empty(&site->instances)) {
736 queue_remove_first(&site->instances, tracking, IOTracking *, link);
737 if (tracking == site->addresses) {
738 addresses = true;
739 }
740 if (addresses) {
741 trackingAddress = (typeof(trackingAddress))tracking;
742 if (kTrackingAddressFlagAllocated & IOTrackingAddressFlags(trackingAddress)) {
743 kfree(tracking, sizeof(IOTrackingAddress));
744 }
745 }
746 }
747 kfree(site, sizeof(IOTrackingCallSite));
748 }
749 }
750 }
751 queue->siteCount = 0;
752 IOTRecursiveLockUnlock(&queue->lock);
753 }
754
755 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
756
757 static int
758 IOTrackingCallSiteInfoCompare(const void * left, const void * right)
759 {
760 IOTrackingCallSiteInfo * l = (typeof(l))left;
761 IOTrackingCallSiteInfo * r = (typeof(r))right;
762 size_t lsize, rsize;
763
764 rsize = r->size[0] + r->size[1];
765 lsize = l->size[0] + l->size[1];
766
767 return (rsize > lsize) ? 1 : ((rsize == lsize) ? 0 : -1);
768 }
769
770 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
771
772 static int
773 IOTrackingAddressCompare(const void * left, const void * right)
774 {
775 IOTracking * instance;
776 uintptr_t inst, laddr, raddr;
777
778 inst = ((typeof(inst) *)left)[0];
779 instance = (typeof(instance))INSTANCE_GET(inst);
780 if (kInstanceFlagAddress & inst) {
781 laddr = ~((IOTrackingAddress *)instance)->address;
782 } else {
783 laddr = (uintptr_t) (instance + 1);
784 }
785
786 inst = ((typeof(inst) *)right)[0];
787 instance = (typeof(instance))(inst & ~kInstanceFlags);
788 if (kInstanceFlagAddress & inst) {
789 raddr = ~((IOTrackingAddress *)instance)->address;
790 } else {
791 raddr = (uintptr_t) (instance + 1);
792 }
793
794 return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
795 }
796
797
798 static int
799 IOTrackingZoneElementCompare(const void * left, const void * right)
800 {
801 uintptr_t inst, laddr, raddr;
802
803 inst = ((typeof(inst) *)left)[0];
804 laddr = INSTANCE_PUT(inst);
805 inst = ((typeof(inst) *)right)[0];
806 raddr = INSTANCE_PUT(inst);
807
808 return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
809 }
810
811 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
812
813 static void
814 CopyOutKernelBacktrace(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo)
815 {
816 uint32_t j;
817 mach_vm_address_t bt, btEntry;
818
819 btEntry = site->queue->btEntry;
820 for (j = 0; j < kIOTrackingCallSiteBTs; j++) {
821 bt = site->bt[j];
822 if (btEntry
823 && (!bt || (j == (kIOTrackingCallSiteBTs - 1)))) {
824 bt = btEntry;
825 btEntry = 0;
826 }
827 siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt);
828 }
829 }
830
831 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
832
833 static void
834 IOTrackingLeakScan(void * refcon)
835 {
836 IOTrackingLeaksRef * ref = (typeof(ref))refcon;
837 uintptr_t * instances;
838 IOTracking * instance;
839 uint64_t vaddr, vincr;
840 ppnum_t ppn;
841 uintptr_t ptr, addr, vphysaddr, inst;
842 size_t size, origsize;
843 uint32_t baseIdx, lim, ptrIdx, count;
844 boolean_t is;
845 AbsoluteTime deadline;
846
847 instances = ref->instances;
848 count = ref->count;
849 size = origsize = ref->zoneSize;
850
851 for (deadline = 0, vaddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
852 ;
853 vaddr += vincr) {
854 if ((mach_absolute_time() > deadline) || (vaddr >= VM_MAX_KERNEL_ADDRESS)) {
855 if (deadline) {
856 ml_set_interrupts_enabled(is);
857 IODelay(10);
858 }
859 if (vaddr >= VM_MAX_KERNEL_ADDRESS) {
860 break;
861 }
862 is = ml_set_interrupts_enabled(false);
863 clock_interval_to_deadline(10, kMillisecondScale, &deadline);
864 }
865
866 ppn = kernel_pmap_present_mapping(vaddr, &vincr, &vphysaddr);
867 // check noencrypt to avoid VM structs (map entries) with pointers
868 if (ppn && (!pmap_valid_page(ppn) || (!ref->zoneSize && pmap_is_noencrypt(ppn)))) {
869 ppn = 0;
870 }
871 if (!ppn) {
872 continue;
873 }
874
875 for (ptrIdx = 0; ptrIdx < (page_size / sizeof(uintptr_t)); ptrIdx++) {
876 ptr = ((uintptr_t *)vphysaddr)[ptrIdx];
877 #if defined(HAS_APPLE_PAC)
878 // strip possible ptrauth signature from candidate data pointer
879 ptr = (uintptr_t)ptrauth_strip((void*)ptr, ptrauth_key_process_independent_data);
880 #endif /* defined(HAS_APPLE_PAC) */
881
882 for (lim = count, baseIdx = 0; lim; lim >>= 1) {
883 inst = instances[baseIdx + (lim >> 1)];
884 instance = (typeof(instance))INSTANCE_GET(inst);
885
886 if (ref->zoneSize) {
887 addr = INSTANCE_PUT(inst) & ~kInstanceFlags;
888 } else if (kInstanceFlagAddress & inst) {
889 addr = ~((IOTrackingAddress *)instance)->address;
890 origsize = size = ((IOTrackingAddress *)instance)->size;
891 if (!size) {
892 size = 1;
893 }
894 } else {
895 addr = (uintptr_t) (instance + 1);
896 origsize = size = instance->site->queue->allocSize;
897 }
898 if ((ptr >= addr) && (ptr < (addr + size))
899
900 && (((vaddr + ptrIdx * sizeof(uintptr_t)) < addr)
901 || ((vaddr + ptrIdx * sizeof(uintptr_t)) >= (addr + size)))) {
902 if (!(kInstanceFlagReferenced & inst)) {
903 inst |= kInstanceFlagReferenced;
904 instances[baseIdx + (lim >> 1)] = inst;
905 ref->found++;
906 if (!origsize) {
907 ref->foundzlen++;
908 }
909 }
910 break;
911 }
912 if (ptr > addr) {
913 // move right
914 baseIdx += (lim >> 1) + 1;
915 lim--;
916 }
917 // else move left
918 }
919 }
920 ref->bytes += page_size;
921 }
922 }
923
924 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
925
926 extern "C" void
927 zone_leaks_scan(uintptr_t * instances, uint32_t count, uint32_t zoneSize, uint32_t * found)
928 {
929 IOTrackingLeaksRef ref;
930 IOTrackingCallSiteInfo siteInfo;
931 uint32_t idx;
932
933 qsort(instances, count, sizeof(*instances), &IOTrackingZoneElementCompare);
934
935 bzero(&siteInfo, sizeof(siteInfo));
936 bzero(&ref, sizeof(ref));
937 ref.instances = instances;
938 ref.count = count;
939 ref.zoneSize = zoneSize;
940
941 for (idx = 0; idx < 2; idx++) {
942 ref.bytes = 0;
943 IOTrackingLeakScan(&ref);
944 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found);
945 if (count <= ref.found) {
946 break;
947 }
948 }
949
950 *found = ref.found;
951 }
952
953 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
954
955 static void
956 ZoneSiteProc(void * refCon, uint32_t siteCount, uint32_t zoneSize,
957 uintptr_t * backtrace, uint32_t btCount)
958 {
959 IOTrackingCallSiteInfo siteInfo;
960 OSData * leakData;
961 uint32_t idx;
962
963 leakData = (typeof(leakData))refCon;
964
965 bzero(&siteInfo, sizeof(siteInfo));
966 siteInfo.count = siteCount;
967 siteInfo.size[0] = zoneSize * siteCount;
968
969 for (idx = 0; (idx < btCount) && (idx < kIOTrackingCallSiteBTs); idx++) {
970 siteInfo.bt[0][idx] = VM_KERNEL_UNSLIDE(backtrace[idx]);
971 }
972
973 leakData->appendBytes(&siteInfo, sizeof(siteInfo));
974 }
975
976
977 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
978
979 static OSData *
980 IOTrackingLeaks(LIBKERN_CONSUMED OSData * data)
981 {
982 IOTrackingLeaksRef ref;
983 IOTrackingCallSiteInfo siteInfo;
984 IOTrackingCallSite * site;
985 OSData * leakData;
986 uintptr_t * instances;
987 IOTracking * instance;
988 uintptr_t inst;
989 uint32_t count, idx, numSites, dups, siteCount;
990
991 instances = (typeof(instances))data->getBytesNoCopy();
992 count = (data->getLength() / sizeof(*instances));
993 qsort(instances, count, sizeof(*instances), &IOTrackingAddressCompare);
994
995 bzero(&siteInfo, sizeof(siteInfo));
996 bzero(&ref, sizeof(ref));
997 ref.instances = instances;
998 ref.count = count;
999 for (idx = 0; idx < 2; idx++) {
1000 ref.bytes = 0;
1001 IOTrackingLeakScan(&ref);
1002 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d (zlen %d)\n", idx, ref.bytes / 1024 / 1024, count, ref.found, ref.foundzlen);
1003 if (count <= ref.found) {
1004 break;
1005 }
1006 }
1007
1008 leakData = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
1009
1010 for (numSites = 0, idx = 0; idx < count; idx++) {
1011 inst = instances[idx];
1012 if (kInstanceFlagReferenced & inst) {
1013 continue;
1014 }
1015 instance = (typeof(instance))INSTANCE_GET(inst);
1016 site = instance->site;
1017 instances[numSites] = (uintptr_t) site;
1018 numSites++;
1019 }
1020
1021 for (idx = 0; idx < numSites; idx++) {
1022 inst = instances[idx];
1023 if (!inst) {
1024 continue;
1025 }
1026 site = (typeof(site))inst;
1027 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) {
1028 if (instances[dups] == (uintptr_t) site) {
1029 siteCount++;
1030 instances[dups] = 0;
1031 }
1032 }
1033 siteInfo.count = siteCount;
1034 siteInfo.size[0] = (site->size[0] * site->count) / siteCount;
1035 siteInfo.size[1] = (site->size[1] * site->count) / siteCount;;
1036 CopyOutKernelBacktrace(site, &siteInfo);
1037 leakData->appendBytes(&siteInfo, sizeof(siteInfo));
1038 }
1039 data->release();
1040
1041 return leakData;
1042 }
1043
1044 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1045
1046 static bool
1047 SkipName(uint32_t options, const char * name, size_t namesLen, const char * names)
1048 {
1049 const char * scan;
1050 const char * next;
1051 bool exclude, found;
1052 size_t qLen, sLen;
1053
1054 if (!namesLen || !names) {
1055 return false;
1056 }
1057 // <len><name>...<len><name><0>
1058 exclude = (0 != (kIOTrackingExcludeNames & options));
1059 qLen = strlen(name);
1060 scan = names;
1061 found = false;
1062 do{
1063 sLen = scan[0];
1064 scan++;
1065 next = scan + sLen;
1066 if (next >= (names + namesLen)) {
1067 break;
1068 }
1069 found = ((sLen == qLen) && !strncmp(scan, name, sLen));
1070 scan = next;
1071 }while (!found && (scan < (names + namesLen)));
1072
1073 return !(exclude ^ found);
1074 }
1075
1076 #endif /* IOTRACKING */
1077
1078 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1079
1080 static kern_return_t
1081 IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value,
1082 uint32_t intag, uint32_t inzsize,
1083 const char * names, size_t namesLen,
1084 size_t size, OSObject ** result)
1085 {
1086 kern_return_t ret;
1087 OSData * data;
1088
1089 if (result) {
1090 *result = NULL;
1091 }
1092 data = NULL;
1093 ret = kIOReturnNotReady;
1094
1095 #if IOTRACKING
1096
1097 kern_return_t kr;
1098 IOTrackingQueue * queue;
1099 IOTracking * instance;
1100 IOTrackingCallSite * site;
1101 IOTrackingCallSiteInfo siteInfo;
1102 IOTrackingUser * user;
1103 task_t mapTask;
1104 mach_vm_address_t mapAddress;
1105 mach_vm_size_t mapSize;
1106 uint32_t num, idx, qIdx;
1107 uintptr_t instFlags;
1108 proc_t proc;
1109 bool addresses;
1110
1111 ret = kIOReturnNotFound;
1112 proc = NULL;
1113 if (kIOTrackingGetMappings == selector) {
1114 if (value != -1ULL) {
1115 proc = proc_find(value);
1116 if (!proc) {
1117 return kIOReturnNotFound;
1118 }
1119 }
1120 }
1121
1122 bzero(&siteInfo, sizeof(siteInfo));
1123 lck_mtx_lock(gIOTrackingLock);
1124 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
1125 {
1126 if (SkipName(options, queue->name, namesLen, names)) {
1127 continue;
1128 }
1129
1130 if (!(kIOTracking & gIOKitDebug) && (kIOTrackingQueueTypeAlloc & queue->type)) {
1131 continue;
1132 }
1133
1134 switch (selector) {
1135 case kIOTrackingResetTracking:
1136 {
1137 IOTrackingReset(queue);
1138 ret = kIOReturnSuccess;
1139 break;
1140 }
1141
1142 case kIOTrackingStartCapture:
1143 case kIOTrackingStopCapture:
1144 {
1145 queue->captureOn = (kIOTrackingStartCapture == selector);
1146 ret = kIOReturnSuccess;
1147 break;
1148 }
1149
1150 case kIOTrackingSetMinCaptureSize:
1151 {
1152 queue->minCaptureSize = size;
1153 ret = kIOReturnSuccess;
1154 break;
1155 }
1156
1157 case kIOTrackingLeaks:
1158 {
1159 if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
1160 break;
1161 }
1162
1163 if (!data) {
1164 data = OSData::withCapacity(1024 * sizeof(uintptr_t));
1165 }
1166
1167 IOTRecursiveLockLock(&queue->lock);
1168 for (idx = 0; idx < queue->numSiteQs; idx++) {
1169 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link)
1170 {
1171 addresses = false;
1172 queue_iterate(&site->instances, instance, IOTracking *, link)
1173 {
1174 if (instance == site->addresses) {
1175 addresses = true;
1176 }
1177 instFlags = (typeof(instFlags))instance;
1178 if (addresses) {
1179 instFlags |= kInstanceFlagAddress;
1180 }
1181 data->appendBytes(&instFlags, sizeof(instFlags));
1182 }
1183 }
1184 }
1185 // queue is locked
1186 ret = kIOReturnSuccess;
1187 break;
1188 }
1189
1190
1191 case kIOTrackingGetTracking:
1192 {
1193 if (kIOTrackingQueueTypeMap & queue->type) {
1194 break;
1195 }
1196
1197 if (!data) {
1198 data = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
1199 }
1200
1201 IOTRecursiveLockLock(&queue->lock);
1202 num = queue->siteCount;
1203 idx = 0;
1204 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
1205 queue_iterate(&queue->sites[qIdx], site, IOTrackingCallSite *, link)
1206 {
1207 assert(idx < num);
1208 idx++;
1209
1210 size_t tsize[2];
1211 uint32_t count = site->count;
1212 tsize[0] = site->size[0];
1213 tsize[1] = site->size[1];
1214
1215 if (intag || inzsize) {
1216 uintptr_t addr;
1217 vm_size_t size, zoneSize;
1218 vm_tag_t tag;
1219
1220 if (kIOTrackingQueueTypeAlloc & queue->type) {
1221 addresses = false;
1222 count = 0;
1223 tsize[0] = tsize[1] = 0;
1224 queue_iterate(&site->instances, instance, IOTracking *, link)
1225 {
1226 if (instance == site->addresses) {
1227 addresses = true;
1228 }
1229
1230 if (addresses) {
1231 addr = ~((IOTrackingAddress *)instance)->address;
1232 } else {
1233 addr = (uintptr_t) (instance + 1);
1234 }
1235
1236 kr = vm_kern_allocation_info(addr, &size, &tag, &zoneSize);
1237 if (KERN_SUCCESS != kr) {
1238 continue;
1239 }
1240
1241 if ((VM_KERN_MEMORY_NONE != intag) && (intag != tag)) {
1242 continue;
1243 }
1244 if (inzsize && (inzsize != zoneSize)) {
1245 continue;
1246 }
1247
1248 count++;
1249 tsize[0] += size;
1250 }
1251 } else {
1252 if (!intag || inzsize || (intag != site->tag)) {
1253 continue;
1254 }
1255 }
1256 }
1257
1258 if (!count) {
1259 continue;
1260 }
1261 if (size && ((tsize[0] + tsize[1]) < size)) {
1262 continue;
1263 }
1264
1265 siteInfo.count = count;
1266 siteInfo.size[0] = tsize[0];
1267 siteInfo.size[1] = tsize[1];
1268
1269 CopyOutKernelBacktrace(site, &siteInfo);
1270 data->appendBytes(&siteInfo, sizeof(siteInfo));
1271 }
1272 }
1273 assert(idx == num);
1274 IOTRecursiveLockUnlock(&queue->lock);
1275 ret = kIOReturnSuccess;
1276 break;
1277 }
1278
1279 case kIOTrackingGetMappings:
1280 {
1281 if (!(kIOTrackingQueueTypeMap & queue->type)) {
1282 break;
1283 }
1284 if (!data) {
1285 data = OSData::withCapacity(page_size);
1286 }
1287
1288 IOTRecursiveLockLock(&queue->lock);
1289 num = queue->siteCount;
1290 idx = 0;
1291 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
1292 queue_iterate(&queue->sites[qIdx], user, IOTrackingUser *, link)
1293 {
1294 assert(idx < num);
1295 idx++;
1296
1297 kr = IOMemoryMapTracking(user, &mapTask, &mapAddress, &mapSize);
1298 if (kIOReturnSuccess != kr) {
1299 continue;
1300 }
1301 if (proc && (mapTask != proc_task(proc))) {
1302 continue;
1303 }
1304 if (size && (mapSize < size)) {
1305 continue;
1306 }
1307
1308 siteInfo.count = 1;
1309 siteInfo.size[0] = mapSize;
1310 siteInfo.address = mapAddress;
1311 siteInfo.addressPID = task_pid(mapTask);
1312 siteInfo.btPID = user->btPID;
1313
1314 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
1315 siteInfo.bt[0][j] = VM_KERNEL_UNSLIDE(user->bt[j]);
1316 }
1317 uint32_t * bt32 = (typeof(bt32)) & user->btUser[0];
1318 uint64_t * bt64 = (typeof(bt64))((void *) &user->btUser[0]);
1319 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
1320 if (j >= user->userCount) {
1321 siteInfo.bt[1][j] = 0;
1322 } else if (user->user32) {
1323 siteInfo.bt[1][j] = bt32[j];
1324 } else {
1325 siteInfo.bt[1][j] = bt64[j];
1326 }
1327 }
1328 data->appendBytes(&siteInfo, sizeof(siteInfo));
1329 }
1330 }
1331 assert(idx == num);
1332 IOTRecursiveLockUnlock(&queue->lock);
1333 ret = kIOReturnSuccess;
1334 break;
1335 }
1336
1337 default:
1338 ret = kIOReturnUnsupported;
1339 break;
1340 }
1341 }
1342
1343 if ((kIOTrackingLeaks == selector) && data) {
1344 data = IOTrackingLeaks(data);
1345 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
1346 {
1347 if (SkipName(options, queue->name, namesLen, names)) {
1348 continue;
1349 }
1350 if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
1351 continue;
1352 }
1353 IOTRecursiveLockUnlock(&queue->lock);
1354 }
1355 }
1356
1357 lck_mtx_unlock(gIOTrackingLock);
1358
1359 if ((kIOTrackingLeaks == selector) && namesLen && names) {
1360 const char * scan;
1361 const char * next;
1362 size_t sLen;
1363
1364 if (!data) {
1365 data = OSData::withCapacity(4096 * sizeof(uintptr_t));
1366 }
1367
1368 // <len><name>...<len><name><0>
1369 scan = names;
1370 do{
1371 sLen = scan[0];
1372 scan++;
1373 next = scan + sLen;
1374 if (next >= (names + namesLen)) {
1375 break;
1376 }
1377 kr = zone_leaks(scan, sLen, &ZoneSiteProc, data);
1378 if (KERN_SUCCESS == kr) {
1379 ret = kIOReturnSuccess;
1380 } else if (KERN_INVALID_NAME != kr) {
1381 ret = kIOReturnVMError;
1382 }
1383 scan = next;
1384 }while (scan < (names + namesLen));
1385 }
1386
1387 if (data) {
1388 switch (selector) {
1389 case kIOTrackingLeaks:
1390 case kIOTrackingGetTracking:
1391 case kIOTrackingGetMappings:
1392 {
1393 IOTrackingCallSiteInfo * siteInfos;
1394 siteInfos = (typeof(siteInfos))data->getBytesNoCopy();
1395 num = (data->getLength() / sizeof(*siteInfos));
1396 qsort(siteInfos, num, sizeof(*siteInfos), &IOTrackingCallSiteInfoCompare);
1397 break;
1398 }
1399 default: assert(false); break;
1400 }
1401 }
1402
1403 *result = data;
1404 if (proc) {
1405 proc_rele(proc);
1406 }
1407
1408 #endif /* IOTRACKING */
1409
1410 return ret;
1411 }
1412
1413 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1414
1415 #include <IOKit/IOKitDiagnosticsUserClient.h>
1416
1417 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1418
1419 #undef super
1420 #define super IOUserClient
1421
1422 OSDefineMetaClassAndStructors(IOKitDiagnosticsClient, IOUserClient)
1423
1424 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1425
1426 IOUserClient * IOKitDiagnosticsClient::withTask(task_t owningTask)
1427 {
1428 IOKitDiagnosticsClient * inst;
1429
1430 inst = new IOKitDiagnosticsClient;
1431 if (inst && !inst->init()) {
1432 inst->release();
1433 inst = NULL;
1434 }
1435
1436 return inst;
1437 }
1438
1439 IOReturn
1440 IOKitDiagnosticsClient::clientClose(void)
1441 {
1442 terminate();
1443 return kIOReturnSuccess;
1444 }
1445
1446 IOReturn
1447 IOKitDiagnosticsClient::setProperties(OSObject * properties)
1448 {
1449 IOReturn kr = kIOReturnUnsupported;
1450 return kr;
1451 }
1452
1453 IOReturn
1454 IOKitDiagnosticsClient::externalMethod(uint32_t selector, IOExternalMethodArguments * args,
1455 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference)
1456 {
1457 IOReturn ret = kIOReturnBadArgument;
1458 const IOKitDiagnosticsParameters * params;
1459 const char * names;
1460 size_t namesLen;
1461 OSObject * result;
1462
1463 if (args->structureInputSize < sizeof(IOKitDiagnosticsParameters)) {
1464 return kIOReturnBadArgument;
1465 }
1466 params = (typeof(params))args->structureInput;
1467 if (!params) {
1468 return kIOReturnBadArgument;
1469 }
1470
1471 names = NULL;
1472 namesLen = args->structureInputSize - sizeof(IOKitDiagnosticsParameters);
1473 if (namesLen) {
1474 names = (typeof(names))(params + 1);
1475 }
1476
1477 ret = IOTrackingDebug(selector, params->options, params->value, params->tag, params->zsize, names, namesLen, params->size, &result);
1478
1479 if ((kIOReturnSuccess == ret) && args->structureVariableOutputData) {
1480 *args->structureVariableOutputData = result;
1481 } else if (result) {
1482 result->release();
1483 }
1484
1485 return ret;
1486 }
1487
1488 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */