-/*
+/*
* Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
#endif /* IOKITSTATS */
+
+#define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
+
+
extern "C"
{
va_list argp,
void (*putc)(int, void *),
void *arg,
- int radix);
+ int radix,
+ int is_log);
extern void cons_putc_locked(char);
extern void bsd_log_lock(void);
uint32_t gIOPageAllocChunkBytes;
+#if IOTRACKING
+IOTrackingQueue * gIOMallocTracking;
+IOTrackingQueue * gIOWireTracking;
+IOTrackingQueue * gIOMapTracking;
+#endif /* IOTRACKING */
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
void IOLibInit(void)
if(libInitialized)
return;
+ IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
+
+#if IOTRACKING
+ IOTrackingInit();
+ gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, true);
+ gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, page_size, false);
+ gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, page_size, false);
+#endif
+
gIOKitPageableSpace.maps[0].address = 0;
ret = kmem_suballoc(kernel_map,
&gIOKitPageableSpace.maps[0].address,
kIOPageableMapSize,
TRUE,
- VM_FLAGS_ANYWHERE,
+ VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
&gIOKitPageableSpace.maps[0].map);
if (ret != KERN_SUCCESS)
panic("failed to allocate iokit pageable map\n");
- IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
-
gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
gIOKitPageableSpace.hint = 0;
iopa_init(&gIOBMDPageAllocator);
iopa_init(&gIOPageablePageAllocator);
+
libInitialized = true;
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+static uint32_t
+log2up(uint32_t size)
+{
+ if (size <= 1) size = 0;
+ else size = 32 - __builtin_clz(size - 1);
+ return (size);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
{
kern_return_t result;
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#if IOTRACKING
+struct IOLibMallocHeader
+{
+ IOTrackingAddress tracking;
+};
+#endif
+
+#if IOTRACKING
+#define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
+#else
+#define sizeofIOLibMallocHeader (0)
+#endif
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
void * IOMalloc(vm_size_t size)
{
void * address;
+ vm_size_t allocSize;
+
+ allocSize = size + sizeofIOLibMallocHeader;
+#if IOTRACKING
+ if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow
+#endif
+ address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT);
- address = (void *)kalloc(size);
if ( address ) {
+#if IOTRACKING
+ if (TRACK_ALLOC) {
+ IOLibMallocHeader * hdr;
+ hdr = (typeof(hdr)) address;
+ bzero(&hdr->tracking, sizeof(hdr->tracking));
+ hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
+ hdr->tracking.size = size;
+ IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
+ }
+#endif
+ address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader);
+
#if IOALLOCDEBUG
- debug_iomalloc_size += size;
+ OSAddAtomic(size, &debug_iomalloc_size);
#endif
- IOStatisticsAlloc(kIOStatisticsMalloc, size);
+ IOStatisticsAlloc(kIOStatisticsMalloc, size);
}
return address;
void IOFree(void * address, vm_size_t size)
{
if (address) {
- kfree(address, size);
+
+ address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader);
+
+#if IOTRACKING
+ if (TRACK_ALLOC) {
+ IOLibMallocHeader * hdr;
+ hdr = (typeof(hdr)) address;
+ if (size != hdr->tracking.size)
+ {
+ OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
+ size = hdr->tracking.size;
+ }
+ IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
+ }
+#endif
+
+ kfree(address, size + sizeofIOLibMallocHeader);
#if IOALLOCDEBUG
- debug_iomalloc_size -= size;
+ OSAddAtomic(-size, &debug_iomalloc_size);
#endif
- IOStatisticsAlloc(kIOStatisticsFree, size);
+ IOStatisticsAlloc(kIOStatisticsFree, size);
}
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+vm_tag_t
+IOMemoryTag(vm_map_t map)
+{
+ vm_tag_t tag;
+
+ if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT);
+
+ tag = vm_tag_bt();
+ if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT;
+
+ return (tag);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+struct IOLibPageMallocHeader
+{
+ mach_vm_size_t allocationSize;
+ mach_vm_address_t allocationAddress;
+#if IOTRACKING
+ IOTrackingAddress tracking;
+#endif
+};
+
+#if IOTRACKING
+#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
+#else
+#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
+#endif
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
{
- kern_return_t kr;
- vm_offset_t address;
- vm_offset_t allocationAddress;
- vm_size_t adjustedSize;
- uintptr_t alignMask;
+ kern_return_t kr;
+ vm_offset_t address;
+ vm_offset_t allocationAddress;
+ vm_size_t adjustedSize;
+ uintptr_t alignMask;
+ IOLibPageMallocHeader * hdr;
if (size == 0)
return 0;
- if (alignment == 0)
- alignment = 1;
+ alignment = (1UL << log2up(alignment));
alignMask = alignment - 1;
- adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
+ adjustedSize = size + sizeofIOLibPageMallocHeader;
if (size > adjustedSize) {
address = 0; /* overflow detected */
else if (adjustedSize >= page_size) {
kr = kernel_memory_allocate(kernel_map, &address,
- size, alignMask, 0);
- if (KERN_SUCCESS != kr)
- address = 0;
+ size, alignMask, 0, IOMemoryTag(kernel_map));
+ if (KERN_SUCCESS != kr) address = 0;
+#if IOTRACKING
+ else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
+#endif
} else {
if (adjustedSize >= page_size) {
kr = kernel_memory_allocate(kernel_map, &allocationAddress,
- adjustedSize, 0, 0);
- if (KERN_SUCCESS != kr)
- allocationAddress = 0;
+ adjustedSize, 0, 0, IOMemoryTag(kernel_map));
+ if (KERN_SUCCESS != kr) allocationAddress = 0;
} else
- allocationAddress = (vm_address_t) kalloc(adjustedSize);
+ allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
if (allocationAddress) {
- address = (allocationAddress + alignMask
- + (sizeof(vm_size_t) + sizeof(vm_address_t)))
+ address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
& (~alignMask);
- *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t)))
- = adjustedSize;
- *((vm_address_t *)(address - sizeof(vm_address_t)))
- = allocationAddress;
+ hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
+ hdr->allocationSize = adjustedSize;
+ hdr->allocationAddress = allocationAddress;
+#if IOTRACKING
+ if (TRACK_ALLOC) {
+ bzero(&hdr->tracking, sizeof(hdr->tracking));
+ hdr->tracking.address = ~address;
+ hdr->tracking.size = size;
+ IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
+ }
+#endif
} else
address = 0;
}
if( address) {
#if IOALLOCDEBUG
- debug_iomalloc_size += size;
+ OSAddAtomic(size, &debug_iomalloc_size);
#endif
IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
}
void IOFreeAligned(void * address, vm_size_t size)
{
- vm_address_t allocationAddress;
- vm_size_t adjustedSize;
+ vm_address_t allocationAddress;
+ vm_size_t adjustedSize;
+ IOLibPageMallocHeader * hdr;
if( !address)
return;
assert(size);
- adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
+ adjustedSize = size + sizeofIOLibPageMallocHeader;
if (adjustedSize >= page_size) {
-
+#if IOTRACKING
+ if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
+#endif
kmem_free( kernel_map, (vm_offset_t) address, size);
} else {
- adjustedSize = *((vm_size_t *)( (vm_address_t) address
- - sizeof(vm_address_t) - sizeof(vm_size_t)));
- allocationAddress = *((vm_address_t *)( (vm_address_t) address
- - sizeof(vm_address_t) ));
+ hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
+ adjustedSize = hdr->allocationSize;
+ allocationAddress = hdr->allocationAddress;
- if (adjustedSize >= page_size)
+#if IOTRACKING
+ if (TRACK_ALLOC)
+ {
+ if (size != hdr->tracking.size)
+ {
+ OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
+ size = hdr->tracking.size;
+ }
+ IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
+ }
+#endif
+ if (adjustedSize >= page_size) {
kmem_free( kernel_map, allocationAddress, adjustedSize);
- else
- kfree((void *)allocationAddress, adjustedSize);
+ } else {
+ kfree((void *)allocationAddress, adjustedSize);
+ }
}
#if IOALLOCDEBUG
- debug_iomalloc_size -= size;
+ OSAddAtomic(-size, &debug_iomalloc_size);
#endif
IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
void
IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
{
- mach_vm_address_t allocationAddress;
- mach_vm_size_t adjustedSize;
+ mach_vm_address_t allocationAddress;
+ mach_vm_size_t adjustedSize;
+ IOLibPageMallocHeader * hdr;
if (!address)
return;
assert(size);
- adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
+ adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
if (adjustedSize >= page_size) {
-
+#if IOTRACKING
+ if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size);
+#endif
kmem_free( kernel_map, (vm_offset_t) address, size);
} else {
- adjustedSize = *((mach_vm_size_t *)
- (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
- allocationAddress = *((mach_vm_address_t *)
- (address - sizeof(mach_vm_address_t) ));
+ hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
+ adjustedSize = hdr->allocationSize;
+ allocationAddress = hdr->allocationAddress;
+#if IOTRACKING
+ if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
+#endif
kfree((void *)allocationAddress, adjustedSize);
}
IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
#if IOALLOCDEBUG
- debug_iomalloc_size -= size;
+ OSAddAtomic(-size, &debug_iomalloc_size);
#endif
}
IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
mach_vm_size_t alignment, bool contiguous)
{
- kern_return_t kr;
- mach_vm_address_t address;
- mach_vm_address_t allocationAddress;
- mach_vm_size_t adjustedSize;
- mach_vm_address_t alignMask;
+ kern_return_t kr;
+ mach_vm_address_t address;
+ mach_vm_address_t allocationAddress;
+ mach_vm_size_t adjustedSize;
+ mach_vm_address_t alignMask;
+ IOLibPageMallocHeader * hdr;
if (size == 0)
return (0);
alignment = 1;
alignMask = alignment - 1;
- adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
+ adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
+ if (adjustedSize < size) return (0);
contiguous = (contiguous && (adjustedSize > page_size))
|| (alignment > page_size);
if (contiguous || maxPhys)
{
kr = kmem_alloc_contig(kernel_map, &virt, size,
- alignMask, atop(maxPhys), atop(alignMask), 0);
+ alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map));
}
else
{
kr = kernel_memory_allocate(kernel_map, &virt,
- size, alignMask, options);
+ size, alignMask, options, IOMemoryTag(kernel_map));
}
if (KERN_SUCCESS == kr)
+ {
address = virt;
+#if IOTRACKING
+ if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
+#endif
+ }
else
address = 0;
}
else
{
adjustedSize += alignMask;
- allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
+ if (adjustedSize < size) return (0);
+ allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
if (allocationAddress) {
- address = (allocationAddress + alignMask
- + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
+
+ address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
& (~alignMask);
if (atop_32(address) != atop_32(address + size - 1))
address = round_page(address);
- *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
- - sizeof(mach_vm_address_t))) = adjustedSize;
- *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
- = allocationAddress;
+ hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
+ hdr->allocationSize = adjustedSize;
+ hdr->allocationAddress = allocationAddress;
+#if IOTRACKING
+ if (TRACK_ALLOC) {
+ bzero(&hdr->tracking, sizeof(hdr->tracking));
+ hdr->tracking.address = ~address;
+ hdr->tracking.size = size;
+ IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
+ }
+#endif
} else
address = 0;
}
if (address) {
IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
#if IOALLOCDEBUG
- debug_iomalloc_size += size;
+ OSAddAtomic(size, &debug_iomalloc_size);
#endif
}
&min,
segSize,
TRUE,
- VM_FLAGS_ANYWHERE,
+ VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
&map);
if( KERN_SUCCESS != kr) {
lck_mtx_unlock( gIOKitPageableSpace.lock );
struct IOMallocPageableRef
{
vm_offset_t address;
- vm_size_t size;
+ vm_size_t size;
+ vm_tag_t tag;
};
static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
kern_return_t kr;
- kr = kmem_alloc_pageable( map, &ref->address, ref->size );
+ kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
return( kr );
}
-static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment)
+static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
{
kern_return_t kr = kIOReturnNotReady;
struct IOMallocPageableRef ref;
return( 0 );
ref.size = size;
+ ref.tag = tag;
kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
if( kIOReturnSuccess != kr)
ref.address = 0;
static uintptr_t IOMallocOnePageablePage(iopa_t * a)
{
- return ((uintptr_t) IOMallocPageablePages(page_size, page_size));
+ return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT));
}
void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
{
void * addr;
- if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment);
+ if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
if (addr) {
#if IOALLOCDEBUG
- debug_iomallocpageable_size += size;
+ OSAddAtomicLong(size, &debug_iomallocpageable_size);
#endif
IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
}
void IOFreePageable(void * address, vm_size_t size)
{
#if IOALLOCDEBUG
- debug_iomallocpageable_size -= size;
+ OSAddAtomicLong(-size, &debug_iomallocpageable_size);
#endif
IOStatisticsAlloc(kIOStatisticsFreePageable, size);
return (0);
}
-static uint32_t
-log2up(uint32_t size)
-{
- if (size <= 1) size = 0;
- else size = 32 - __builtin_clz(size - 1);
- return (size);
-}
-
uintptr_t
iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
{
align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
IOLockLock(a->lock);
- pa = (typeof(pa)) queue_first(&a->list);
+ __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list));
while (!queue_end(&a->list, &pa->link))
{
addr = iopa_allocinpage(pa, count, align);
a->bytecount += bytes;
break;
}
- pa = (typeof(pa)) queue_next(&pa->link);
+ __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link));
}
IOLockUnlock(a->lock);
delay_for_interval(milliseconds, kMillisecondScale);
}
+/*
+ * Spin for indicated number of milliseconds, and potentially an
+ * additional number of milliseconds up to the leeway values.
+ */
+void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
+{
+ delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
+}
+
/*
* Spin for indicated number of microseconds.
*/
va_copy(ap2, ap);
bsd_log_lock();
- __doprnt(format, ap, _iolog_logputc, NULL, 16);
+ __doprnt(format, ap, _iolog_logputc, NULL, 16, TRUE);
bsd_log_unlock();
logwakeup();
- __doprnt(format, ap2, _iolog_consputc, NULL, 16);
+ __doprnt(format, ap2, _iolog_consputc, NULL, 16, TRUE);
+ va_end(ap2);
}
#if !__LP64__
IOAlignment IOSizeToAlignment(unsigned int size)
{
- register int shift;
+ int shift;
const int intsize = sizeof(unsigned int) * 8;
for (shift = 1; shift < intsize; shift++) {