X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3e170ce000f1506b7b5d2c5c7faec85ceabb573d..b226f5e54a60dc81db17b1260381d7dbfea3cdf1:/iokit/Kernel/IOLib.cpp diff --git a/iokit/Kernel/IOLib.cpp b/iokit/Kernel/IOLib.cpp index 44a436346..385ce056f 100644 --- a/iokit/Kernel/IOLib.cpp +++ b/iokit/Kernel/IOLib.cpp @@ -58,7 +58,9 @@ #include "libkern/OSAtomic.h" #include #include +#include #include +#include #if IOKITSTATS @@ -97,7 +99,6 @@ __doprnt( extern void cons_putc_locked(char); extern void bsd_log_lock(void); extern void bsd_log_unlock(void); -extern void logwakeup(); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -172,9 +173,17 @@ void IOLibInit(void) #if IOTRACKING IOTrackingInit(); - gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, true); - gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, page_size, false); - gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, page_size, false); + gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0, + kIOTrackingQueueTypeAlloc, + 37); + gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0); + + size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024*1024); + gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize, + kIOTrackingQueueTypeDefaultOn + | kIOTrackingQueueTypeMap + | kIOTrackingQueueTypeUser, + 0); #endif gIOKitPageableSpace.maps[0].address = 0; @@ -182,7 +191,9 @@ void IOLibInit(void) &gIOKitPageableSpace.maps[0].address, kIOPageableMapSize, TRUE, - VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT), + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_IOKIT, &gIOKitPageableSpace.maps[0].map); if (ret != KERN_SUCCESS) panic("failed to allocate iokit pageable map\n"); @@ -272,7 +283,7 @@ void * IOMalloc(vm_size_t size) bzero(&hdr->tracking, sizeof(hdr->tracking)); hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader); hdr->tracking.size = size; - IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true); + IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE); } #endif address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader); @@ -286,15 +297,25 @@ void * IOMalloc(vm_size_t size) return address; } -void IOFree(void * address, vm_size_t size) +void IOFree(void * inAddress, vm_size_t size) { - if (address) { + void * address; + if ((address = inAddress)) + { address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader); #if IOTRACKING - if (TRACK_ALLOC) { + if (TRACK_ALLOC) + { IOLibMallocHeader * hdr; + struct ptr_reference{ void * ptr; }; + volatile struct ptr_reference ptr; + + // we're about to block in IOTrackingRemove(), make sure the original pointer + // exists in memory or a register for leak scanning to find + ptr.ptr = inAddress; + hdr = (typeof(hdr)) address; if (size != hdr->tracking.size) { @@ -302,6 +323,7 @@ void IOFree(void * address, vm_size_t size) size = hdr->tracking.size; } IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size); + ptr.ptr = NULL; } #endif @@ -400,7 +422,7 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment) bzero(&hdr->tracking, sizeof(hdr->tracking)); hdr->tracking.address = ~address; hdr->tracking.size = size; - IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true); + IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE); } #endif } else @@ -505,6 +527,9 @@ IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size) #endif } +#if __arm__ || __arm64__ +extern unsigned long gPhysBase, gPhysSize; +#endif mach_vm_address_t IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, @@ -523,8 +548,8 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP alignment = 1; alignMask = alignment - 1; - adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader; - if (adjustedSize < size) return (0); + + if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) return (0); contiguous = (contiguous && (adjustedSize > page_size)) || (alignment > page_size); @@ -540,6 +565,13 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP if (!contiguous) { +#if __arm__ || __arm64__ + if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) + { + maxPhys = 0; + } + else +#endif if (maxPhys <= 0xFFFFFFFF) { maxPhys = 0; @@ -593,7 +625,7 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP bzero(&hdr->tracking, sizeof(hdr->tracking)); hdr->tracking.address = ~address; hdr->tracking.size = size; - IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true); + IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE); } #endif } else @@ -737,7 +769,7 @@ kern_return_t IOIteratePageableMaps(vm_size_t size, else index = gIOKitPageableSpace.count - 1; } - if( KERN_SUCCESS == kr) + if (KERN_NO_SPACE != kr) break; lck_mtx_lock( gIOKitPageableSpace.lock ); @@ -758,7 +790,9 @@ kern_return_t IOIteratePageableMaps(vm_size_t size, &min, segSize, TRUE, - VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT), + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_IOKIT, &map); if( KERN_SUCCESS != kr) { lck_mtx_unlock( gIOKitPageableSpace.lock ); @@ -1112,38 +1146,41 @@ void IOPause(unsigned nanoseconds) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static void _iolog_consputc(int ch, void *arg __unused) -{ - cons_putc_locked(ch); -} - -static void _iolog_logputc(int ch, void *arg __unused) -{ - log_putc_locked(ch); -} +static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1,0); +__attribute__((noinline,not_tail_called)) void IOLog(const char *format, ...) { + void *caller = __builtin_return_address(0); va_list ap; va_start(ap, format); - IOLogv(format, ap); + _IOLogv(format, ap, caller); va_end(ap); } +__attribute__((noinline,not_tail_called)) void IOLogv(const char *format, va_list ap) +{ + void *caller = __builtin_return_address(0); + _IOLogv(format, ap, caller); +} + +void _IOLogv(const char *format, va_list ap, void *caller) { va_list ap2; + struct console_printbuf_state info_data; + console_printbuf_state_init(&info_data, TRUE, TRUE); va_copy(ap2, ap); - bsd_log_lock(); - __doprnt(format, ap, _iolog_logputc, NULL, 16, TRUE); - bsd_log_unlock(); - logwakeup(); + os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller); - __doprnt(format, ap2, _iolog_consputc, NULL, 16, TRUE); + __doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE); + console_printbuf_clear(&info_data); va_end(ap2); + + assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning, "IOLog called with interrupts disabled"); } #if !__LP64__ @@ -1155,6 +1192,40 @@ void IOPanic(const char *reason) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +void IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size, + void (*output)(const char *format, ...)) +{ + uint8_t c, chars[17]; + size_t idx; + + output("%s(0x%x):\n", title, size); + if (size > 4096) size = 4096; + chars[16] = idx = 0; + while (true) { + if (!(idx & 15)) { + if (idx) output(" |%s|\n", chars); + if (idx >= size) break; + output("%04x: ", idx); + } + else if (!(idx & 7)) output(" "); + + c = ((char *)buffer)[idx]; + output("%02x ", c); + chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' '; + + idx++; + if ((idx == size) && (idx & 15)) { + chars[idx & 15] = 0; + while (idx & 15) { + idx++; + output(" "); + } + } + } +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + /* * Convert a integer constant (typically a #define or enum) to a string. */