2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/sysctl.h>
32 #include <vm/vm_kern.h>
33 #include <kern/task.h>
34 #include <kern/debug.h>
37 #include <libkern/c++/OSContainers.h>
38 #include <libkern/OSDebug.h>
39 #include <libkern/c++/OSCPPDebug.h>
40 #include <kern/backtrace.h>
42 #include <IOKit/IOKitDebug.h>
43 #include <IOKit/IOLib.h>
44 #include <IOKit/assert.h>
45 #include <IOKit/IODeviceTreeSupport.h>
46 #include <IOKit/IOService.h>
48 #include "IOKitKernelInternal.h"
51 #define DEBUG_INIT_VALUE IOKITDEBUG
53 #define DEBUG_INIT_VALUE 0
56 SInt64 gIOKitDebug
= DEBUG_INIT_VALUE
;
57 SInt64 gIOKitTrace
= 0;
59 #if DEVELOPMENT || DEBUG
60 #define IODEBUG_CTLFLAGS CTLFLAG_RW
62 #define IODEBUG_CTLFLAGS CTLFLAG_RD
65 SYSCTL_QUAD(_debug
, OID_AUTO
, iotrace
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &gIOKitTrace
, "trace io");
69 (__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
72 int changed
, error
= sysctl_io_number(req
, gIOKitDebug
, sizeof(gIOKitDebug
), &newValue
, &changed
);
74 gIOKitDebug
= ((gIOKitDebug
& ~kIOKitDebugUserOptions
) | (newValue
& kIOKitDebugUserOptions
));
79 SYSCTL_PROC(_debug
, OID_AUTO
, iokit
,
80 CTLTYPE_QUAD
| IODEBUG_CTLFLAGS
| CTLFLAG_NOAUTO
| CTLFLAG_KERN
| CTLFLAG_LOCKED
,
81 &gIOKitDebug
, 0, sysctl_debug_iokit
, "Q", "boot_arg io");
83 int debug_malloc_size
;
84 int debug_iomalloc_size
;
86 vm_size_t debug_iomallocpageable_size
;
87 int debug_container_malloc_size
;
88 // int debug_ivars_size; // in OSObject.cpp
92 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
94 #define DEBG(fmt, args...) { IOLog(fmt, ## args); }
98 IOPrintPlane( const IORegistryPlane
* plane
)
100 IORegistryEntry
* next
;
101 IORegistryIterator
* iter
;
103 char format
[] = "%xxxs";
106 iter
= IORegistryIterator::iterateOver( plane
);
108 all
= iter
->iterateAll();
110 DEBG("Count %d\n", all
->getCount());
117 while ((next
= iter
->getNextObjectRecursive())) {
118 snprintf(format
+ 1, sizeof(format
) - 1, "%ds", 2 * next
->getDepth( plane
));
120 DEBG( "\033[33m%s", next
->getName( plane
));
121 if ((next
->getLocation( plane
))) {
122 DEBG("@%s", next
->getLocation( plane
));
124 DEBG("\033[0m <class %s", next
->getMetaClass()->getClassName());
125 if ((service
= OSDynamicCast(IOService
, next
))) {
126 DEBG(", busy %ld", (long) service
->getBusyState());
140 db_dumpiojunk( const IORegistryPlane
* plane __unused
)
145 IOPrintMemory( void )
147 // OSMetaClass::printInstanceCounts();
150 "ivar kalloc() 0x%08x\n"
152 "containers kalloc() 0x%08x\n"
153 "IOMalloc() 0x%08x\n"
154 "----------------------------------------\n",
157 debug_container_malloc_size
,
163 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
165 #define super OSObject
166 OSDefineMetaClassAndStructors(IOKitDiagnostics
, OSObject
)
168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
170 OSObject
* IOKitDiagnostics::diagnostics( void )
172 IOKitDiagnostics
* diags
;
174 diags
= new IOKitDiagnostics
;
175 if (diags
&& !diags
->init()) {
184 IOKitDiagnostics::updateOffset( OSDictionary
* dict
,
185 UInt64 value
, const char * name
)
189 off
= OSNumber::withNumber( value
, 64 );
194 dict
->setObject( name
, off
);
199 IOKitDiagnostics::serialize(OSSerialize
*s
) const
204 dict
= OSDictionary::withCapacity( 5 );
209 updateOffset( dict
, debug_ivars_size
, "Instance allocation" );
210 updateOffset( dict
, debug_container_malloc_size
, "Container allocation" );
211 updateOffset( dict
, debug_iomalloc_size
, "IOMalloc allocation" );
212 updateOffset( dict
, debug_iomallocpageable_size
, "Pageable allocation" );
214 OSMetaClass::serializeClassDictionary(dict
);
216 ok
= dict
->serialize( s
);
223 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
227 #include <libkern/c++/OSCPPDebug.h>
228 #include <libkern/c++/OSKext.h>
229 #include <kern/zalloc.h>
231 __private_extern__
"C" void qsort(
235 int (*)(const void *, const void *));
237 extern "C" ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
238 extern "C" ppnum_t
pmap_valid_page(ppnum_t pn
);
240 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
242 struct IOTRecursiveLock
{
248 struct IOTrackingQueue
{
250 IOTRecursiveLock lock
;
254 size_t minCaptureSize
;
259 queue_head_t sites
[];
262 struct IOTrackingCallSite
{
264 IOTrackingQueue
* queue
;
270 uintptr_t bt
[kIOTrackingCallSiteBTs
];
272 queue_head_t instances
;
273 IOTracking
* addresses
;
276 struct IOTrackingLeaksRef
{
277 uintptr_t * instances
;
285 lck_mtx_t
* gIOTrackingLock
;
286 queue_head_t gIOTrackingQ
;
289 kTrackingAddressFlagAllocated
= 0x00000001
292 #if defined(__LP64__)
293 #define IOTrackingAddressFlags(ptr) (ptr->flags)
295 #define IOTrackingAddressFlags(ptr) (ptr->tracking.flags)
298 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
301 IOTRecursiveLockLock(IOTRecursiveLock
* lock
)
303 if (lock
->thread
== current_thread()) {
306 lck_mtx_lock(lock
->mutex
);
307 assert(lock
->thread
== NULL
);
308 assert(lock
->count
== 0);
309 lock
->thread
= current_thread();
315 IOTRecursiveLockUnlock(IOTRecursiveLock
* lock
)
317 assert(lock
->thread
== current_thread());
318 if (0 == (--lock
->count
)) {
320 lck_mtx_unlock(lock
->mutex
);
324 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
329 queue_init(&gIOTrackingQ
);
330 gIOTrackingLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
333 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
336 IOTrackingQueueAlloc(const char * name
, uintptr_t btEntry
,
337 size_t allocSize
, size_t minCaptureSize
,
338 uint32_t type
, uint32_t numSiteQs
)
340 IOTrackingQueue
* queue
;
346 queue
= (typeof(queue
))kalloc(sizeof(IOTrackingQueue
) + numSiteQs
* sizeof(queue
->sites
[0]));
347 bzero(queue
, sizeof(IOTrackingQueue
));
350 queue
->btEntry
= btEntry
;
351 queue
->allocSize
= allocSize
;
352 queue
->minCaptureSize
= minCaptureSize
;
353 queue
->lock
.mutex
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
354 queue
->numSiteQs
= numSiteQs
;
356 enum { kFlags
= (kIOTracking
| kIOTrackingBoot
) };
357 queue
->captureOn
= (kFlags
== (kFlags
& gIOKitDebug
))
358 || (kIOTrackingQueueTypeDefaultOn
& type
);
360 for (idx
= 0; idx
< numSiteQs
; idx
++) {
361 queue_init(&queue
->sites
[idx
]);
364 lck_mtx_lock(gIOTrackingLock
);
365 queue_enter(&gIOTrackingQ
, queue
, IOTrackingQueue
*, link
);
366 lck_mtx_unlock(gIOTrackingLock
);
371 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
374 IOTrackingQueueFree(IOTrackingQueue
* queue
)
376 lck_mtx_lock(gIOTrackingLock
);
377 IOTrackingReset(queue
);
378 remque(&queue
->link
);
379 lck_mtx_unlock(gIOTrackingLock
);
381 lck_mtx_free(queue
->lock
.mutex
, IOLockGroup
);
383 kfree(queue
, sizeof(IOTrackingQueue
) + queue
->numSiteQs
* sizeof(queue
->sites
[0]));
386 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391 * Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com)
393 * Permission is hereby granted, free of charge, to any person
394 * obtaining a copy of this software and associated documentation
395 * files (the "Software"), to deal in the Software without
396 * restriction, including without limitation the rights to use, copy,
397 * modify, merge, publish, distribute, sublicense, and/or sell copies
398 * of the Software, and to permit persons to whom the Software is
399 * furnished to do so, subject to the following conditions:
401 * The above copyright notice and this permission notice shall be
402 * included in all copies or substantial portions of the Software.
404 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
405 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
406 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
407 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
408 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
409 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
410 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
415 // Compression function for Merkle-Damgard construction.
416 // This function is generated using the framework provided.
419 (h) *= 0x2127599bf4325c37ULL; \
423 fasthash64(const void *buf
, size_t len
, uint64_t seed
)
425 const uint64_t m
= 0x880355f21e6d1965ULL
;
426 const uint64_t *pos
= (const uint64_t *)buf
;
427 const uint64_t *end
= pos
+ (len
/ 8);
428 const unsigned char *pos2
;
429 uint64_t h
= seed
^ (len
* m
);
438 pos2
= (const unsigned char*)pos
;
442 case 7: v
^= (uint64_t)pos2
[6] << 48;
443 [[clang::fallthrough]];
444 case 6: v
^= (uint64_t)pos2
[5] << 40;
445 [[clang::fallthrough]];
446 case 5: v
^= (uint64_t)pos2
[4] << 32;
447 [[clang::fallthrough]];
448 case 4: v
^= (uint64_t)pos2
[3] << 24;
449 [[clang::fallthrough]];
450 case 3: v
^= (uint64_t)pos2
[2] << 16;
451 [[clang::fallthrough]];
452 case 2: v
^= (uint64_t)pos2
[1] << 8;
453 [[clang::fallthrough]];
454 case 1: v
^= (uint64_t)pos2
[0];
462 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
465 fasthash32(const void *buf
, size_t len
, uint32_t seed
)
467 // the following trick converts the 64-bit hashcode to Fermat
468 // residue, which shall retain information from both the higher
469 // and lower parts of hashcode.
470 uint64_t h
= fasthash64(buf
, len
, seed
);
471 return h
- (h
>> 32);
474 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
477 IOTrackingAddUser(IOTrackingQueue
* queue
, IOTrackingUser
* mem
, vm_size_t size
)
482 if (!queue
->captureOn
) {
485 if (size
< queue
->minCaptureSize
) {
489 assert(!mem
->link
.next
);
491 num
= backtrace(&mem
->bt
[0], kIOTrackingCallSiteBTs
, NULL
);
493 if ((kernel_task
!= current_task()) && (self
= proc_self())) {
494 bool user_64
= false;
495 mem
->btPID
= proc_pid(self
);
496 (void)backtrace_user(&mem
->btUser
[0], kIOTrackingCallSiteBTs
- 1, &num
,
498 mem
->user32
= !user_64
;
501 assert(num
<= kIOTrackingCallSiteBTs
);
502 mem
->userCount
= num
;
504 IOTRecursiveLockLock(&queue
->lock
);
505 queue_enter
/*last*/ (&queue
->sites
[0], mem
, IOTrackingUser
*, link
);
507 IOTRecursiveLockUnlock(&queue
->lock
);
511 IOTrackingRemoveUser(IOTrackingQueue
* queue
, IOTrackingUser
* mem
)
513 if (!mem
->link
.next
) {
517 IOTRecursiveLockLock(&queue
->lock
);
518 if (mem
->link
.next
) {
520 assert(queue
->siteCount
);
523 IOTRecursiveLockUnlock(&queue
->lock
);
526 uint64_t gIOTrackingAddTime
;
529 IOTrackingAdd(IOTrackingQueue
* queue
, IOTracking
* mem
, size_t size
, bool address
, vm_tag_t tag
)
531 IOTrackingCallSite
* site
;
533 uintptr_t bt
[kIOTrackingCallSiteBTs
+ 1];
539 if (!queue
->captureOn
) {
542 if (size
< queue
->minCaptureSize
) {
546 assert(!mem
->link
.next
);
548 num
= backtrace(&bt
[0], kIOTrackingCallSiteBTs
+ 1, NULL
);
553 crc
= fasthash32(&bt
[1], num
* sizeof(bt
[0]), 0x04C11DB7);
555 IOTRecursiveLockLock(&queue
->lock
);
556 que
= &queue
->sites
[crc
% queue
->numSiteQs
];
557 queue_iterate(que
, site
, IOTrackingCallSite
*, link
)
559 if (tag
!= site
->tag
) {
562 if (crc
== site
->crc
) {
567 if (queue_end(que
, (queue_entry_t
) site
)) {
568 site
= (typeof(site
))kalloc(sizeof(IOTrackingCallSite
));
570 queue_init(&site
->instances
);
571 site
->addresses
= (IOTracking
*) &site
->instances
;
576 memset(&site
->size
[0], 0, sizeof(site
->size
));
577 bcopy(&bt
[1], &site
->bt
[0], num
* sizeof(site
->bt
[0]));
578 assert(num
<= kIOTrackingCallSiteBTs
);
579 bzero(&site
->bt
[num
], (kIOTrackingCallSiteBTs
- num
) * sizeof(site
->bt
[0]));
581 queue_enter_first(que
, site
, IOTrackingCallSite
*, link
);
586 queue_enter
/*last*/ (&site
->instances
, mem
, IOTracking
*, link
);
587 if (queue_end(&site
->instances
, (queue_entry_t
)site
->addresses
)) {
588 site
->addresses
= mem
;
591 queue_enter_first(&site
->instances
, mem
, IOTracking
*, link
);
595 site
->size
[0] += size
;
598 IOTRecursiveLockUnlock(&queue
->lock
);
601 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
604 IOTrackingRemove(IOTrackingQueue
* queue
, IOTracking
* mem
, size_t size
)
606 if (!mem
->link
.next
) {
610 IOTRecursiveLockLock(&queue
->lock
);
611 if (mem
->link
.next
) {
614 if (mem
== mem
->site
->addresses
) {
615 mem
->site
->addresses
= (IOTracking
*) queue_next(&mem
->link
);
619 assert(mem
->site
->count
);
621 assert(mem
->site
->size
[0] >= size
);
622 mem
->site
->size
[0] -= size
;
623 if (!mem
->site
->count
) {
624 assert(queue_empty(&mem
->site
->instances
));
625 assert(!mem
->site
->size
[0]);
626 assert(!mem
->site
->size
[1]);
628 remque(&mem
->site
->link
);
629 assert(queue
->siteCount
);
631 kfree(mem
->site
, sizeof(IOTrackingCallSite
));
635 IOTRecursiveLockUnlock(&queue
->lock
);
638 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
641 IOTrackingAlloc(IOTrackingQueue
* queue
, uintptr_t address
, size_t size
)
643 IOTrackingAddress
* tracking
;
645 if (!queue
->captureOn
) {
648 if (size
< queue
->minCaptureSize
) {
653 tracking
= (typeof(tracking
))kalloc(sizeof(IOTrackingAddress
));
654 bzero(tracking
, sizeof(IOTrackingAddress
));
655 IOTrackingAddressFlags(tracking
) |= kTrackingAddressFlagAllocated
;
656 tracking
->address
= address
;
657 tracking
->size
= size
;
659 IOTrackingAdd(queue
, &tracking
->tracking
, size
, true, VM_KERN_MEMORY_NONE
);
662 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
665 IOTrackingFree(IOTrackingQueue
* queue
, uintptr_t address
, size_t size
)
667 IOTrackingCallSite
* site
;
668 IOTrackingAddress
* tracking
;
673 IOTRecursiveLockLock(&queue
->lock
);
675 for (idx
= 0; idx
< queue
->numSiteQs
; idx
++) {
676 queue_iterate(&queue
->sites
[idx
], site
, IOTrackingCallSite
*, link
)
678 tracking
= (IOTrackingAddress
*) site
->addresses
;
679 while (!queue_end(&site
->instances
, &tracking
->tracking
.link
)) {
680 if ((done
= (address
== tracking
->address
))) {
681 IOTrackingRemove(queue
, &tracking
->tracking
, size
);
682 kfree(tracking
, sizeof(IOTrackingAddress
));
685 tracking
= (IOTrackingAddress
*) queue_next(&tracking
->tracking
.link
);
696 IOTRecursiveLockUnlock(&queue
->lock
);
699 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
702 IOTrackingAccumSize(IOTrackingQueue
* queue
, IOTracking
* mem
, size_t size
)
704 IOTRecursiveLockLock(&queue
->lock
);
705 if (mem
->link
.next
) {
707 assert((size
> 0) || (mem
->site
->size
[1] >= -size
));
708 mem
->site
->size
[1] += size
;
711 IOTRecursiveLockUnlock(&queue
->lock
);
714 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
717 IOTrackingReset(IOTrackingQueue
* queue
)
719 IOTrackingCallSite
* site
;
720 IOTrackingUser
* user
;
721 IOTracking
* tracking
;
722 IOTrackingAddress
* trackingAddress
;
726 IOTRecursiveLockLock(&queue
->lock
);
727 for (idx
= 0; idx
< queue
->numSiteQs
; idx
++) {
728 while (!queue_empty(&queue
->sites
[idx
])) {
729 if (kIOTrackingQueueTypeMap
& queue
->type
) {
730 queue_remove_first(&queue
->sites
[idx
], user
, IOTrackingUser
*, link
);
731 user
->link
.next
= user
->link
.prev
= NULL
;
733 queue_remove_first(&queue
->sites
[idx
], site
, IOTrackingCallSite
*, link
);
735 while (!queue_empty(&site
->instances
)) {
736 queue_remove_first(&site
->instances
, tracking
, IOTracking
*, link
);
737 if (tracking
== site
->addresses
) {
741 trackingAddress
= (typeof(trackingAddress
))tracking
;
742 if (kTrackingAddressFlagAllocated
& IOTrackingAddressFlags(trackingAddress
)) {
743 kfree(tracking
, sizeof(IOTrackingAddress
));
747 kfree(site
, sizeof(IOTrackingCallSite
));
751 queue
->siteCount
= 0;
752 IOTRecursiveLockUnlock(&queue
->lock
);
755 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
758 IOTrackingCallSiteInfoCompare(const void * left
, const void * right
)
760 IOTrackingCallSiteInfo
* l
= (typeof(l
))left
;
761 IOTrackingCallSiteInfo
* r
= (typeof(r
))right
;
764 rsize
= r
->size
[0] + r
->size
[1];
765 lsize
= l
->size
[0] + l
->size
[1];
767 return (rsize
> lsize
) ? 1 : ((rsize
== lsize
) ? 0 : -1);
770 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
773 IOTrackingAddressCompare(const void * left
, const void * right
)
775 IOTracking
* instance
;
776 uintptr_t inst
, laddr
, raddr
;
778 inst
= ((typeof(inst
) *)left
)[0];
779 instance
= (typeof(instance
))INSTANCE_GET(inst
);
780 if (kInstanceFlagAddress
& inst
) {
781 laddr
= ~((IOTrackingAddress
*)instance
)->address
;
783 laddr
= (uintptr_t) (instance
+ 1);
786 inst
= ((typeof(inst
) *)right
)[0];
787 instance
= (typeof(instance
))(inst
& ~kInstanceFlags
);
788 if (kInstanceFlagAddress
& inst
) {
789 raddr
= ~((IOTrackingAddress
*)instance
)->address
;
791 raddr
= (uintptr_t) (instance
+ 1);
794 return (laddr
> raddr
) ? 1 : ((laddr
== raddr
) ? 0 : -1);
799 IOTrackingZoneElementCompare(const void * left
, const void * right
)
801 uintptr_t inst
, laddr
, raddr
;
803 inst
= ((typeof(inst
) *)left
)[0];
804 laddr
= INSTANCE_PUT(inst
);
805 inst
= ((typeof(inst
) *)right
)[0];
806 raddr
= INSTANCE_PUT(inst
);
808 return (laddr
> raddr
) ? 1 : ((laddr
== raddr
) ? 0 : -1);
811 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
814 CopyOutKernelBacktrace(IOTrackingCallSite
* site
, IOTrackingCallSiteInfo
* siteInfo
)
817 mach_vm_address_t bt
, btEntry
;
819 btEntry
= site
->queue
->btEntry
;
820 for (j
= 0; j
< kIOTrackingCallSiteBTs
; j
++) {
823 && (!bt
|| (j
== (kIOTrackingCallSiteBTs
- 1)))) {
827 siteInfo
->bt
[0][j
] = VM_KERNEL_UNSLIDE(bt
);
831 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
834 IOTrackingLeakScan(void * refcon
)
836 IOTrackingLeaksRef
* ref
= (typeof(ref
))refcon
;
837 uintptr_t * instances
;
838 IOTracking
* instance
;
839 uint64_t vaddr
, vincr
;
841 uintptr_t ptr
, addr
, vphysaddr
, inst
;
842 size_t size
, origsize
;
843 uint32_t baseIdx
, lim
, ptrIdx
, count
;
845 AbsoluteTime deadline
;
847 instances
= ref
->instances
;
849 size
= origsize
= ref
->zoneSize
;
851 for (deadline
= 0, vaddr
= VM_MIN_KERNEL_AND_KEXT_ADDRESS
;
854 if ((mach_absolute_time() > deadline
) || (vaddr
>= VM_MAX_KERNEL_ADDRESS
)) {
856 ml_set_interrupts_enabled(is
);
859 if (vaddr
>= VM_MAX_KERNEL_ADDRESS
) {
862 is
= ml_set_interrupts_enabled(false);
863 clock_interval_to_deadline(10, kMillisecondScale
, &deadline
);
866 ppn
= kernel_pmap_present_mapping(vaddr
, &vincr
, &vphysaddr
);
867 // check noencrypt to avoid VM structs (map entries) with pointers
868 if (ppn
&& (!pmap_valid_page(ppn
) || (!ref
->zoneSize
&& pmap_is_noencrypt(ppn
)))) {
875 for (ptrIdx
= 0; ptrIdx
< (page_size
/ sizeof(uintptr_t)); ptrIdx
++) {
876 ptr
= ((uintptr_t *)vphysaddr
)[ptrIdx
];
877 #if defined(HAS_APPLE_PAC)
878 // strip possible ptrauth signature from candidate data pointer
879 ptr
= (uintptr_t)ptrauth_strip((void*)ptr
, ptrauth_key_process_independent_data
);
880 #endif /* defined(HAS_APPLE_PAC) */
882 for (lim
= count
, baseIdx
= 0; lim
; lim
>>= 1) {
883 inst
= instances
[baseIdx
+ (lim
>> 1)];
884 instance
= (typeof(instance
))INSTANCE_GET(inst
);
887 addr
= INSTANCE_PUT(inst
) & ~kInstanceFlags
;
888 } else if (kInstanceFlagAddress
& inst
) {
889 addr
= ~((IOTrackingAddress
*)instance
)->address
;
890 origsize
= size
= ((IOTrackingAddress
*)instance
)->size
;
895 addr
= (uintptr_t) (instance
+ 1);
896 origsize
= size
= instance
->site
->queue
->allocSize
;
898 if ((ptr
>= addr
) && (ptr
< (addr
+ size
))
900 && (((vaddr
+ ptrIdx
* sizeof(uintptr_t)) < addr
)
901 || ((vaddr
+ ptrIdx
* sizeof(uintptr_t)) >= (addr
+ size
)))) {
902 if (!(kInstanceFlagReferenced
& inst
)) {
903 inst
|= kInstanceFlagReferenced
;
904 instances
[baseIdx
+ (lim
>> 1)] = inst
;
914 baseIdx
+= (lim
>> 1) + 1;
920 ref
->bytes
+= page_size
;
924 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
927 zone_leaks_scan(uintptr_t * instances
, uint32_t count
, uint32_t zoneSize
, uint32_t * found
)
929 IOTrackingLeaksRef ref
;
930 IOTrackingCallSiteInfo siteInfo
;
933 qsort(instances
, count
, sizeof(*instances
), &IOTrackingZoneElementCompare
);
935 bzero(&siteInfo
, sizeof(siteInfo
));
936 bzero(&ref
, sizeof(ref
));
937 ref
.instances
= instances
;
939 ref
.zoneSize
= zoneSize
;
941 for (idx
= 0; idx
< 2; idx
++) {
943 IOTrackingLeakScan(&ref
);
944 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx
, ref
.bytes
/ 1024 / 1024, count
, ref
.found
);
945 if (count
<= ref
.found
) {
953 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
956 ZoneSiteProc(void * refCon
, uint32_t siteCount
, uint32_t zoneSize
,
957 uintptr_t * backtrace
, uint32_t btCount
)
959 IOTrackingCallSiteInfo siteInfo
;
963 leakData
= (typeof(leakData
))refCon
;
965 bzero(&siteInfo
, sizeof(siteInfo
));
966 siteInfo
.count
= siteCount
;
967 siteInfo
.size
[0] = zoneSize
* siteCount
;
969 for (idx
= 0; (idx
< btCount
) && (idx
< kIOTrackingCallSiteBTs
); idx
++) {
970 siteInfo
.bt
[0][idx
] = VM_KERNEL_UNSLIDE(backtrace
[idx
]);
973 leakData
->appendBytes(&siteInfo
, sizeof(siteInfo
));
977 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
980 IOTrackingLeaks(LIBKERN_CONSUMED OSData
* data
)
982 IOTrackingLeaksRef ref
;
983 IOTrackingCallSiteInfo siteInfo
;
984 IOTrackingCallSite
* site
;
986 uintptr_t * instances
;
987 IOTracking
* instance
;
989 uint32_t count
, idx
, numSites
, dups
, siteCount
;
991 instances
= (typeof(instances
))data
->getBytesNoCopy();
992 count
= (data
->getLength() / sizeof(*instances
));
993 qsort(instances
, count
, sizeof(*instances
), &IOTrackingAddressCompare
);
995 bzero(&siteInfo
, sizeof(siteInfo
));
996 bzero(&ref
, sizeof(ref
));
997 ref
.instances
= instances
;
999 for (idx
= 0; idx
< 2; idx
++) {
1001 IOTrackingLeakScan(&ref
);
1002 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d (zlen %d)\n", idx
, ref
.bytes
/ 1024 / 1024, count
, ref
.found
, ref
.foundzlen
);
1003 if (count
<= ref
.found
) {
1008 leakData
= OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo
));
1010 for (numSites
= 0, idx
= 0; idx
< count
; idx
++) {
1011 inst
= instances
[idx
];
1012 if (kInstanceFlagReferenced
& inst
) {
1015 instance
= (typeof(instance
))INSTANCE_GET(inst
);
1016 site
= instance
->site
;
1017 instances
[numSites
] = (uintptr_t) site
;
1021 for (idx
= 0; idx
< numSites
; idx
++) {
1022 inst
= instances
[idx
];
1026 site
= (typeof(site
))inst
;
1027 for (siteCount
= 1, dups
= (idx
+ 1); dups
< numSites
; dups
++) {
1028 if (instances
[dups
] == (uintptr_t) site
) {
1030 instances
[dups
] = 0;
1033 siteInfo
.count
= siteCount
;
1034 siteInfo
.size
[0] = (site
->size
[0] * site
->count
) / siteCount
;
1035 siteInfo
.size
[1] = (site
->size
[1] * site
->count
) / siteCount
;;
1036 CopyOutKernelBacktrace(site
, &siteInfo
);
1037 leakData
->appendBytes(&siteInfo
, sizeof(siteInfo
));
1044 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1047 SkipName(uint32_t options
, const char * name
, size_t namesLen
, const char * names
)
1051 bool exclude
, found
;
1054 if (!namesLen
|| !names
) {
1057 // <len><name>...<len><name><0>
1058 exclude
= (0 != (kIOTrackingExcludeNames
& options
));
1059 qLen
= strlen(name
);
1066 if (next
>= (names
+ namesLen
)) {
1069 found
= ((sLen
== qLen
) && !strncmp(scan
, name
, sLen
));
1071 }while (!found
&& (scan
< (names
+ namesLen
)));
1073 return !(exclude
^ found
);
1076 #endif /* IOTRACKING */
1078 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1080 static kern_return_t
1081 IOTrackingDebug(uint32_t selector
, uint32_t options
, uint64_t value
,
1082 uint32_t intag
, uint32_t inzsize
,
1083 const char * names
, size_t namesLen
,
1084 size_t size
, OSObject
** result
)
1093 ret
= kIOReturnNotReady
;
1098 IOTrackingQueue
* queue
;
1099 IOTracking
* instance
;
1100 IOTrackingCallSite
* site
;
1101 IOTrackingCallSiteInfo siteInfo
;
1102 IOTrackingUser
* user
;
1104 mach_vm_address_t mapAddress
;
1105 mach_vm_size_t mapSize
;
1106 uint32_t num
, idx
, qIdx
;
1107 uintptr_t instFlags
;
1111 ret
= kIOReturnNotFound
;
1113 if (kIOTrackingGetMappings
== selector
) {
1114 if (value
!= -1ULL) {
1115 proc
= proc_find(value
);
1117 return kIOReturnNotFound
;
1122 bzero(&siteInfo
, sizeof(siteInfo
));
1123 lck_mtx_lock(gIOTrackingLock
);
1124 queue_iterate(&gIOTrackingQ
, queue
, IOTrackingQueue
*, link
)
1126 if (SkipName(options
, queue
->name
, namesLen
, names
)) {
1130 if (!(kIOTracking
& gIOKitDebug
) && (kIOTrackingQueueTypeAlloc
& queue
->type
)) {
1135 case kIOTrackingResetTracking
:
1137 IOTrackingReset(queue
);
1138 ret
= kIOReturnSuccess
;
1142 case kIOTrackingStartCapture
:
1143 case kIOTrackingStopCapture
:
1145 queue
->captureOn
= (kIOTrackingStartCapture
== selector
);
1146 ret
= kIOReturnSuccess
;
1150 case kIOTrackingSetMinCaptureSize
:
1152 queue
->minCaptureSize
= size
;
1153 ret
= kIOReturnSuccess
;
1157 case kIOTrackingLeaks
:
1159 if (!(kIOTrackingQueueTypeAlloc
& queue
->type
)) {
1164 data
= OSData::withCapacity(1024 * sizeof(uintptr_t));
1167 IOTRecursiveLockLock(&queue
->lock
);
1168 for (idx
= 0; idx
< queue
->numSiteQs
; idx
++) {
1169 queue_iterate(&queue
->sites
[idx
], site
, IOTrackingCallSite
*, link
)
1172 queue_iterate(&site
->instances
, instance
, IOTracking
*, link
)
1174 if (instance
== site
->addresses
) {
1177 instFlags
= (typeof(instFlags
))instance
;
1179 instFlags
|= kInstanceFlagAddress
;
1181 data
->appendBytes(&instFlags
, sizeof(instFlags
));
1186 ret
= kIOReturnSuccess
;
1191 case kIOTrackingGetTracking
:
1193 if (kIOTrackingQueueTypeMap
& queue
->type
) {
1198 data
= OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo
));
1201 IOTRecursiveLockLock(&queue
->lock
);
1202 num
= queue
->siteCount
;
1204 for (qIdx
= 0; qIdx
< queue
->numSiteQs
; qIdx
++) {
1205 queue_iterate(&queue
->sites
[qIdx
], site
, IOTrackingCallSite
*, link
)
1211 uint32_t count
= site
->count
;
1212 tsize
[0] = site
->size
[0];
1213 tsize
[1] = site
->size
[1];
1215 if (intag
|| inzsize
) {
1217 vm_size_t size
, zoneSize
;
1220 if (kIOTrackingQueueTypeAlloc
& queue
->type
) {
1223 tsize
[0] = tsize
[1] = 0;
1224 queue_iterate(&site
->instances
, instance
, IOTracking
*, link
)
1226 if (instance
== site
->addresses
) {
1231 addr
= ~((IOTrackingAddress
*)instance
)->address
;
1233 addr
= (uintptr_t) (instance
+ 1);
1236 kr
= vm_kern_allocation_info(addr
, &size
, &tag
, &zoneSize
);
1237 if (KERN_SUCCESS
!= kr
) {
1241 if ((VM_KERN_MEMORY_NONE
!= intag
) && (intag
!= tag
)) {
1244 if (inzsize
&& (inzsize
!= zoneSize
)) {
1252 if (!intag
|| inzsize
|| (intag
!= site
->tag
)) {
1261 if (size
&& ((tsize
[0] + tsize
[1]) < size
)) {
1265 siteInfo
.count
= count
;
1266 siteInfo
.size
[0] = tsize
[0];
1267 siteInfo
.size
[1] = tsize
[1];
1269 CopyOutKernelBacktrace(site
, &siteInfo
);
1270 data
->appendBytes(&siteInfo
, sizeof(siteInfo
));
1274 IOTRecursiveLockUnlock(&queue
->lock
);
1275 ret
= kIOReturnSuccess
;
1279 case kIOTrackingGetMappings
:
1281 if (!(kIOTrackingQueueTypeMap
& queue
->type
)) {
1285 data
= OSData::withCapacity(page_size
);
1288 IOTRecursiveLockLock(&queue
->lock
);
1289 num
= queue
->siteCount
;
1291 for (qIdx
= 0; qIdx
< queue
->numSiteQs
; qIdx
++) {
1292 queue_iterate(&queue
->sites
[qIdx
], user
, IOTrackingUser
*, link
)
1297 kr
= IOMemoryMapTracking(user
, &mapTask
, &mapAddress
, &mapSize
);
1298 if (kIOReturnSuccess
!= kr
) {
1301 if (proc
&& (mapTask
!= proc_task(proc
))) {
1304 if (size
&& (mapSize
< size
)) {
1309 siteInfo
.size
[0] = mapSize
;
1310 siteInfo
.address
= mapAddress
;
1311 siteInfo
.addressPID
= task_pid(mapTask
);
1312 siteInfo
.btPID
= user
->btPID
;
1314 for (uint32_t j
= 0; j
< kIOTrackingCallSiteBTs
; j
++) {
1315 siteInfo
.bt
[0][j
] = VM_KERNEL_UNSLIDE(user
->bt
[j
]);
1317 uint32_t * bt32
= (typeof(bt32
)) & user
->btUser
[0];
1318 uint64_t * bt64
= (typeof(bt64
))((void *) &user
->btUser
[0]);
1319 for (uint32_t j
= 0; j
< kIOTrackingCallSiteBTs
; j
++) {
1320 if (j
>= user
->userCount
) {
1321 siteInfo
.bt
[1][j
] = 0;
1322 } else if (user
->user32
) {
1323 siteInfo
.bt
[1][j
] = bt32
[j
];
1325 siteInfo
.bt
[1][j
] = bt64
[j
];
1328 data
->appendBytes(&siteInfo
, sizeof(siteInfo
));
1332 IOTRecursiveLockUnlock(&queue
->lock
);
1333 ret
= kIOReturnSuccess
;
1338 ret
= kIOReturnUnsupported
;
1343 if ((kIOTrackingLeaks
== selector
) && data
) {
1344 data
= IOTrackingLeaks(data
);
1345 queue_iterate(&gIOTrackingQ
, queue
, IOTrackingQueue
*, link
)
1347 if (SkipName(options
, queue
->name
, namesLen
, names
)) {
1350 if (!(kIOTrackingQueueTypeAlloc
& queue
->type
)) {
1353 IOTRecursiveLockUnlock(&queue
->lock
);
1357 lck_mtx_unlock(gIOTrackingLock
);
1359 if ((kIOTrackingLeaks
== selector
) && namesLen
&& names
) {
1365 data
= OSData::withCapacity(4096 * sizeof(uintptr_t));
1368 // <len><name>...<len><name><0>
1374 if (next
>= (names
+ namesLen
)) {
1377 kr
= zone_leaks(scan
, sLen
, &ZoneSiteProc
, data
);
1378 if (KERN_SUCCESS
== kr
) {
1379 ret
= kIOReturnSuccess
;
1380 } else if (KERN_INVALID_NAME
!= kr
) {
1381 ret
= kIOReturnVMError
;
1384 }while (scan
< (names
+ namesLen
));
1389 case kIOTrackingLeaks
:
1390 case kIOTrackingGetTracking
:
1391 case kIOTrackingGetMappings
:
1393 IOTrackingCallSiteInfo
* siteInfos
;
1394 siteInfos
= (typeof(siteInfos
))data
->getBytesNoCopy();
1395 num
= (data
->getLength() / sizeof(*siteInfos
));
1396 qsort(siteInfos
, num
, sizeof(*siteInfos
), &IOTrackingCallSiteInfoCompare
);
1399 default: assert(false); break;
1408 #endif /* IOTRACKING */
1413 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1415 #include <IOKit/IOKitDiagnosticsUserClient.h>
1417 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1420 #define super IOUserClient
1422 OSDefineMetaClassAndStructors(IOKitDiagnosticsClient
, IOUserClient
)
1424 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1426 IOUserClient
* IOKitDiagnosticsClient::withTask(task_t owningTask
)
1428 IOKitDiagnosticsClient
* inst
;
1430 inst
= new IOKitDiagnosticsClient
;
1431 if (inst
&& !inst
->init()) {
1440 IOKitDiagnosticsClient::clientClose(void)
1443 return kIOReturnSuccess
;
1447 IOKitDiagnosticsClient::setProperties(OSObject
* properties
)
1449 IOReturn kr
= kIOReturnUnsupported
;
1454 IOKitDiagnosticsClient::externalMethod(uint32_t selector
, IOExternalMethodArguments
* args
,
1455 IOExternalMethodDispatch
* dispatch
, OSObject
* target
, void * reference
)
1457 IOReturn ret
= kIOReturnBadArgument
;
1458 const IOKitDiagnosticsParameters
* params
;
1463 if (args
->structureInputSize
< sizeof(IOKitDiagnosticsParameters
)) {
1464 return kIOReturnBadArgument
;
1466 params
= (typeof(params
))args
->structureInput
;
1468 return kIOReturnBadArgument
;
1472 namesLen
= args
->structureInputSize
- sizeof(IOKitDiagnosticsParameters
);
1474 names
= (typeof(names
))(params
+ 1);
1477 ret
= IOTrackingDebug(selector
, params
->options
, params
->value
, params
->tag
, params
->zsize
, names
, namesLen
, params
->size
, &result
);
1479 if ((kIOReturnSuccess
== ret
) && args
->structureVariableOutputData
) {
1480 *args
->structureVariableOutputData
= result
;
1481 } else if (result
) {
1488 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */