2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
42 #include <IOKit/assert.h>
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
51 #include "IOKitKernelInternal.h"
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <os/log_private.h>
62 #include <sys/msgbuf.h>
63 #include <console/serial_protos.h>
67 #define IOStatisticsAlloc(type, size) \
69 IOStatistics::countAlloc(type, size); \
74 #define IOStatisticsAlloc(type, size)
76 #endif /* IOKITSTATS */
79 #define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
84 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
86 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
92 void (*putc
)(int, void *),
97 extern void cons_putc_locked(char);
98 extern bool bsd_log_lock(bool);
99 extern void bsd_log_unlock(void);
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
104 lck_grp_t
*IOLockGroup
;
107 * Global variables for use by iLogger
108 * These symbols are for use only by Apple diagnostic code.
109 * Binary compatibility is not guaranteed for kexts that reference these symbols.
112 void *_giDebugLogInternal
= NULL
;
113 void *_giDebugLogDataInternal
= NULL
;
114 void *_giDebugReserved1
= NULL
;
115 void *_giDebugReserved2
= NULL
;
117 iopa_t gIOBMDPageAllocator
;
120 * Static variables for this module.
123 static queue_head_t gIOMallocContiguousEntries
;
124 static lck_mtx_t
* gIOMallocContiguousEntriesLock
;
127 enum { kIOMaxPageableMaps
= 8 };
128 enum { kIOPageableMapSize
= 512 * 1024 * 1024 };
129 enum { kIOPageableMaxMapSize
= 512 * 1024 * 1024 };
131 enum { kIOMaxPageableMaps
= 16 };
132 enum { kIOPageableMapSize
= 96 * 1024 * 1024 };
133 enum { kIOPageableMaxMapSize
= 96 * 1024 * 1024 };
145 IOMapData maps
[kIOMaxPageableMaps
];
147 } gIOKitPageableSpace
;
149 static iopa_t gIOPageablePageAllocator
;
151 uint32_t gIOPageAllocChunkBytes
;
154 IOTrackingQueue
* gIOMallocTracking
;
155 IOTrackingQueue
* gIOWireTracking
;
156 IOTrackingQueue
* gIOMapTracking
;
157 #endif /* IOTRACKING */
159 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
166 static bool libInitialized
;
168 if (libInitialized
) {
172 IOLockGroup
= lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL
);
176 gIOMallocTracking
= IOTrackingQueueAlloc(kIOMallocTrackingName
, 0, 0, 0,
177 kIOTrackingQueueTypeAlloc
,
179 gIOWireTracking
= IOTrackingQueueAlloc(kIOWireTrackingName
, 0, 0, page_size
, 0, 0);
181 size_t mapCaptureSize
= (kIOTracking
& gIOKitDebug
) ? page_size
: (1024 * 1024);
182 gIOMapTracking
= IOTrackingQueueAlloc(kIOMapTrackingName
, 0, 0, mapCaptureSize
,
183 kIOTrackingQueueTypeDefaultOn
184 | kIOTrackingQueueTypeMap
185 | kIOTrackingQueueTypeUser
,
189 gIOKitPageableSpace
.maps
[0].address
= 0;
190 ret
= kmem_suballoc(kernel_map
,
191 &gIOKitPageableSpace
.maps
[0].address
,
195 VM_MAP_KERNEL_FLAGS_NONE
,
196 VM_KERN_MEMORY_IOKIT
,
197 &gIOKitPageableSpace
.maps
[0].map
);
198 if (ret
!= KERN_SUCCESS
) {
199 panic("failed to allocate iokit pageable map\n");
202 gIOKitPageableSpace
.lock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
203 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
204 gIOKitPageableSpace
.hint
= 0;
205 gIOKitPageableSpace
.count
= 1;
207 gIOMallocContiguousEntriesLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
208 queue_init( &gIOMallocContiguousEntries
);
210 gIOPageAllocChunkBytes
= PAGE_SIZE
/ 64;
211 assert(sizeof(iopa_page_t
) <= gIOPageAllocChunkBytes
);
212 iopa_init(&gIOBMDPageAllocator
);
213 iopa_init(&gIOPageablePageAllocator
);
216 libInitialized
= true;
219 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
222 log2up(vm_size_t size
)
228 size
= 64 - __builtin_clzl(size
- 1);
230 size
= 32 - __builtin_clzl(size
- 1);
236 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
239 IOCreateThread(IOThreadFunc fcn
, void *arg
)
241 kern_return_t result
;
244 result
= kernel_thread_start((thread_continue_t
)fcn
, arg
, &thread
);
245 if (result
!= KERN_SUCCESS
) {
249 thread_deallocate(thread
);
258 (void) thread_terminate(current_thread());
268 return IOMalloc_internal(KHEAP_KEXT
, size
);
272 IOMallocZero_external(
275 IOMallocZero_external(
278 return IOMallocZero_internal(KHEAP_KEXT
, size
);
281 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
284 IOMallocZero_internal(struct kalloc_heap
*kalloc_heap_cfg
, vm_size_t size
)
287 result
= IOMalloc_internal(kalloc_heap_cfg
, size
);
294 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
297 struct IOLibMallocHeader
{
298 IOTrackingAddress tracking
;
303 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
305 #define sizeofIOLibMallocHeader (0)
308 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
311 IOMalloc_internal(struct kalloc_heap
*kheap
, vm_size_t size
)
316 allocSize
= size
+ sizeofIOLibMallocHeader
;
318 if (sizeofIOLibMallocHeader
&& (allocSize
<= size
)) {
319 return NULL
; // overflow
322 address
= kheap_alloc_tag_bt(kheap
, allocSize
, Z_WAITOK
, VM_KERN_MEMORY_IOKIT
);
327 IOLibMallocHeader
* hdr
;
328 hdr
= (typeof(hdr
))address
;
329 bzero(&hdr
->tracking
, sizeof(hdr
->tracking
));
330 hdr
->tracking
.address
= ~(((uintptr_t) address
) + sizeofIOLibMallocHeader
);
331 hdr
->tracking
.size
= size
;
332 IOTrackingAdd(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
, true, VM_KERN_MEMORY_NONE
);
335 address
= (typeof(address
))(((uintptr_t) address
) + sizeofIOLibMallocHeader
);
338 OSAddAtomicLong(size
, &debug_iomalloc_size
);
340 IOStatisticsAlloc(kIOStatisticsMalloc
, size
);
347 IOFree(void * inAddress
, vm_size_t size
)
351 if ((address
= inAddress
)) {
352 address
= (typeof(address
))(((uintptr_t) address
) - sizeofIOLibMallocHeader
);
356 IOLibMallocHeader
* hdr
;
357 struct ptr_reference
{ void * ptr
; };
358 volatile struct ptr_reference ptr
;
360 // we're about to block in IOTrackingRemove(), make sure the original pointer
361 // exists in memory or a register for leak scanning to find
364 hdr
= (typeof(hdr
))address
;
365 if (size
!= hdr
->tracking
.size
) {
366 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size
, hdr
->tracking
.size
);
367 size
= hdr
->tracking
.size
;
369 IOTrackingRemove(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
);
374 kfree(address
, size
+ sizeofIOLibMallocHeader
);
376 OSAddAtomicLong(-size
, &debug_iomalloc_size
);
378 IOStatisticsAlloc(kIOStatisticsFree
, size
);
382 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
385 IOMemoryTag(vm_map_t map
)
389 if (!vm_kernel_map_is_kernel(map
)) {
390 return VM_MEMORY_IOKIT
;
394 if (tag
== VM_KERN_MEMORY_NONE
) {
395 tag
= VM_KERN_MEMORY_IOKIT
;
401 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
403 struct IOLibPageMallocHeader
{
404 mach_vm_size_t allocationSize
;
405 mach_vm_address_t allocationAddress
;
407 IOTrackingAddress tracking
;
412 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
414 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
417 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
419 IOMallocAligned_external(
420 vm_size_t size
, vm_size_t alignment
);
422 IOMallocAligned_external(
423 vm_size_t size
, vm_size_t alignment
)
425 return IOMallocAligned_internal(KHEAP_KEXT
, size
, alignment
);
429 IOMallocAligned_internal(struct kalloc_heap
*kheap
, vm_size_t size
,
434 vm_offset_t allocationAddress
;
435 vm_size_t adjustedSize
;
437 IOLibPageMallocHeader
* hdr
;
442 if (((uint32_t) alignment
) != alignment
) {
446 alignment
= (1UL << log2up((uint32_t) alignment
));
447 alignMask
= alignment
- 1;
448 adjustedSize
= size
+ sizeofIOLibPageMallocHeader
;
450 if (size
> adjustedSize
) {
451 address
= 0; /* overflow detected */
452 } else if (adjustedSize
>= page_size
) {
453 kr
= kernel_memory_allocate(kernel_map
, &address
,
454 size
, alignMask
, KMA_NONE
, IOMemoryTag(kernel_map
));
455 if (KERN_SUCCESS
!= kr
) {
459 else if (TRACK_ALLOC
) {
460 IOTrackingAlloc(gIOMallocTracking
, address
, size
);
464 adjustedSize
+= alignMask
;
466 if (adjustedSize
>= page_size
) {
467 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
468 adjustedSize
, 0, KMA_NONE
, IOMemoryTag(kernel_map
));
469 if (KERN_SUCCESS
!= kr
) {
470 allocationAddress
= 0;
473 allocationAddress
= (vm_address_t
) kheap_alloc_tag_bt(kheap
,
474 adjustedSize
, Z_WAITOK
, VM_KERN_MEMORY_IOKIT
);
477 if (allocationAddress
) {
478 address
= (allocationAddress
+ alignMask
+ sizeofIOLibPageMallocHeader
)
481 hdr
= (typeof(hdr
))(address
- sizeofIOLibPageMallocHeader
);
482 hdr
->allocationSize
= adjustedSize
;
483 hdr
->allocationAddress
= allocationAddress
;
486 bzero(&hdr
->tracking
, sizeof(hdr
->tracking
));
487 hdr
->tracking
.address
= ~address
;
488 hdr
->tracking
.size
= size
;
489 IOTrackingAdd(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
, true, VM_KERN_MEMORY_NONE
);
497 assert(0 == (address
& alignMask
));
501 OSAddAtomicLong(size
, &debug_iomalloc_size
);
503 IOStatisticsAlloc(kIOStatisticsMallocAligned
, size
);
506 return (void *) address
;
510 IOFreeAligned(void * address
, vm_size_t size
)
512 vm_address_t allocationAddress
;
513 vm_size_t adjustedSize
;
514 IOLibPageMallocHeader
* hdr
;
522 adjustedSize
= size
+ sizeofIOLibPageMallocHeader
;
523 if (adjustedSize
>= page_size
) {
526 IOTrackingFree(gIOMallocTracking
, (uintptr_t) address
, size
);
529 kmem_free( kernel_map
, (vm_offset_t
) address
, size
);
531 hdr
= (typeof(hdr
))(((uintptr_t)address
) - sizeofIOLibPageMallocHeader
);
532 adjustedSize
= hdr
->allocationSize
;
533 allocationAddress
= hdr
->allocationAddress
;
537 if (size
!= hdr
->tracking
.size
) {
538 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size
, hdr
->tracking
.size
);
539 size
= hdr
->tracking
.size
;
541 IOTrackingRemove(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
);
544 if (adjustedSize
>= page_size
) {
545 kmem_free( kernel_map
, allocationAddress
, adjustedSize
);
547 kfree(allocationAddress
, adjustedSize
);
552 OSAddAtomicLong(-size
, &debug_iomalloc_size
);
555 IOStatisticsAlloc(kIOStatisticsFreeAligned
, size
);
558 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
561 IOKernelFreePhysical(mach_vm_address_t address
, mach_vm_size_t size
)
563 vm_address_t allocationAddress
;
564 vm_size_t adjustedSize
;
565 IOLibPageMallocHeader
* hdr
;
573 adjustedSize
= (2 * size
) + sizeofIOLibPageMallocHeader
;
574 if (adjustedSize
>= page_size
) {
577 IOTrackingFree(gIOMallocTracking
, address
, size
);
580 kmem_free( kernel_map
, (vm_offset_t
) address
, size
);
582 hdr
= (typeof(hdr
))(((uintptr_t)address
) - sizeofIOLibPageMallocHeader
);
583 adjustedSize
= hdr
->allocationSize
;
584 allocationAddress
= hdr
->allocationAddress
;
587 IOTrackingRemove(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
);
590 kfree(allocationAddress
, adjustedSize
);
593 IOStatisticsAlloc(kIOStatisticsFreeContiguous
, size
);
595 OSAddAtomicLong(-size
, &debug_iomalloc_size
);
599 #if __arm__ || __arm64__
600 extern unsigned long gPhysBase
, gPhysSize
;
604 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size
, mach_vm_address_t maxPhys
,
605 mach_vm_size_t alignment
, bool contiguous
)
608 mach_vm_address_t address
;
609 mach_vm_address_t allocationAddress
;
610 mach_vm_size_t adjustedSize
;
611 mach_vm_address_t alignMask
;
612 IOLibPageMallocHeader
* hdr
;
617 if (alignment
== 0) {
621 alignMask
= alignment
- 1;
623 if (os_mul_and_add_overflow(2, size
, sizeofIOLibPageMallocHeader
, &adjustedSize
)) {
627 contiguous
= (contiguous
&& (adjustedSize
> page_size
))
628 || (alignment
> page_size
);
630 if (contiguous
|| maxPhys
) {
631 kma_flags_t options
= KMA_NONE
;
635 contiguous
= (contiguous
&& (adjustedSize
> page_size
))
636 || (alignment
> page_size
);
639 #if __arm__ || __arm64__
640 if (maxPhys
>= (mach_vm_address_t
)(gPhysBase
+ gPhysSize
)) {
644 if (maxPhys
<= 0xFFFFFFFF) {
646 options
= (kma_flags_t
)(options
| KMA_LOMEM
);
647 } else if (gIOLastPage
&& (atop_64(maxPhys
) > gIOLastPage
)) {
651 if (contiguous
|| maxPhys
) {
652 kr
= kmem_alloc_contig(kernel_map
, &virt
, size
,
653 alignMask
, (ppnum_t
) atop(maxPhys
), (ppnum_t
) atop(alignMask
),
654 KMA_NONE
, IOMemoryTag(kernel_map
));
656 kr
= kernel_memory_allocate(kernel_map
, &virt
,
657 size
, alignMask
, options
, IOMemoryTag(kernel_map
));
659 if (KERN_SUCCESS
== kr
) {
663 IOTrackingAlloc(gIOMallocTracking
, address
, size
);
670 adjustedSize
+= alignMask
;
671 if (adjustedSize
< size
) {
674 allocationAddress
= (mach_vm_address_t
) kheap_alloc_tag_bt(KHEAP_KEXT
,
675 adjustedSize
, Z_WAITOK
, VM_KERN_MEMORY_IOKIT
);
677 if (allocationAddress
) {
678 address
= (allocationAddress
+ alignMask
+ sizeofIOLibPageMallocHeader
)
681 if (atop_32(address
) != atop_32(address
+ size
- 1)) {
682 address
= round_page(address
);
685 hdr
= (typeof(hdr
))(address
- sizeofIOLibPageMallocHeader
);
686 hdr
->allocationSize
= adjustedSize
;
687 hdr
->allocationAddress
= allocationAddress
;
690 bzero(&hdr
->tracking
, sizeof(hdr
->tracking
));
691 hdr
->tracking
.address
= ~address
;
692 hdr
->tracking
.size
= size
;
693 IOTrackingAdd(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
, true, VM_KERN_MEMORY_NONE
);
702 IOStatisticsAlloc(kIOStatisticsMallocContiguous
, size
);
704 OSAddAtomicLong(size
, &debug_iomalloc_size
);
712 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
714 struct _IOMallocContiguousEntry
{
715 mach_vm_address_t virtualAddr
;
716 IOBufferMemoryDescriptor
* md
;
719 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
722 IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
723 IOPhysicalAddress
* physicalAddress
)
725 mach_vm_address_t address
= 0;
730 if (alignment
== 0) {
734 /* Do we want a physical address? */
735 if (!physicalAddress
) {
736 address
= IOKernelAllocateWithPhysicalRestrict(size
, 0 /*maxPhys*/, alignment
, true);
739 IOBufferMemoryDescriptor
* bmd
;
740 mach_vm_address_t physicalMask
;
741 vm_offset_t alignMask
;
743 alignMask
= alignment
- 1;
744 physicalMask
= (0xFFFFFFFF ^ alignMask
);
746 bmd
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
747 kernel_task
, kIOMemoryPhysicallyContiguous
, size
, physicalMask
);
752 _IOMallocContiguousEntry
*
753 entry
= IONew(_IOMallocContiguousEntry
, 1);
758 entry
->virtualAddr
= (mach_vm_address_t
) bmd
->getBytesNoCopy();
760 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
761 queue_enter( &gIOMallocContiguousEntries
, entry
,
762 _IOMallocContiguousEntry
*, link
);
763 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
765 address
= (mach_vm_address_t
) entry
->virtualAddr
;
766 *physicalAddress
= bmd
->getPhysicalAddress();
770 return (void *) address
;
774 IOFreeContiguous(void * _address
, vm_size_t size
)
776 _IOMallocContiguousEntry
* entry
;
777 IOMemoryDescriptor
* md
= NULL
;
779 mach_vm_address_t address
= (mach_vm_address_t
) _address
;
787 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
788 queue_iterate( &gIOMallocContiguousEntries
, entry
,
789 _IOMallocContiguousEntry
*, link
)
791 if (entry
->virtualAddr
== address
) {
793 queue_remove( &gIOMallocContiguousEntries
, entry
,
794 _IOMallocContiguousEntry
*, link
);
798 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
802 IODelete(entry
, _IOMallocContiguousEntry
, 1);
804 IOKernelFreePhysical((mach_vm_address_t
) address
, size
);
808 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
811 IOIteratePageableMaps(vm_size_t size
,
812 IOIteratePageableMapsCallback callback
, void * ref
)
814 kern_return_t kr
= kIOReturnNotReady
;
821 if (size
> kIOPageableMaxMapSize
) {
822 return kIOReturnBadArgument
;
826 index
= gIOKitPageableSpace
.hint
;
827 attempts
= gIOKitPageableSpace
.count
;
829 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
830 if (KERN_SUCCESS
== kr
) {
831 gIOKitPageableSpace
.hint
= index
;
837 index
= gIOKitPageableSpace
.count
- 1;
840 if (KERN_NO_SPACE
!= kr
) {
844 lck_mtx_lock( gIOKitPageableSpace
.lock
);
846 index
= gIOKitPageableSpace
.count
;
847 if (index
>= (kIOMaxPageableMaps
- 1)) {
848 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
852 if (size
< kIOPageableMapSize
) {
853 segSize
= kIOPageableMapSize
;
859 kr
= kmem_suballoc(kernel_map
,
864 VM_MAP_KERNEL_FLAGS_NONE
,
865 VM_KERN_MEMORY_IOKIT
,
867 if (KERN_SUCCESS
!= kr
) {
868 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
872 gIOKitPageableSpace
.maps
[index
].map
= map
;
873 gIOKitPageableSpace
.maps
[index
].address
= min
;
874 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
875 gIOKitPageableSpace
.hint
= index
;
876 gIOKitPageableSpace
.count
= index
+ 1;
878 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
884 struct IOMallocPageableRef
{
891 IOMallocPageableCallback(vm_map_t map
, void * _ref
)
893 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
896 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
, ref
->tag
);
902 IOMallocPageablePages(vm_size_t size
, vm_size_t alignment
, vm_tag_t tag
)
904 kern_return_t kr
= kIOReturnNotReady
;
905 struct IOMallocPageableRef ref
;
907 if (alignment
> page_size
) {
910 if (size
> kIOPageableMaxMapSize
) {
916 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
917 if (kIOReturnSuccess
!= kr
) {
921 return (void *) ref
.address
;
925 IOPageableMapForAddress( uintptr_t address
)
930 for (index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
931 if ((address
>= gIOKitPageableSpace
.maps
[index
].address
)
932 && (address
< gIOKitPageableSpace
.maps
[index
].end
)) {
933 map
= gIOKitPageableSpace
.maps
[index
].map
;
938 panic("IOPageableMapForAddress: null");
945 IOFreePageablePages(void * address
, vm_size_t size
)
949 map
= IOPageableMapForAddress((vm_address_t
) address
);
951 kmem_free( map
, (vm_offset_t
) address
, size
);
956 IOMallocOnePageablePage(iopa_t
* a
)
958 return (uintptr_t) IOMallocPageablePages(page_size
, page_size
, VM_KERN_MEMORY_IOKIT
);
962 IOMallocPageableInternal(vm_size_t size
, vm_size_t alignment
, bool zeroed
)
966 if (((uint32_t) alignment
) != alignment
) {
969 if (size
>= (page_size
- 4 * gIOPageAllocChunkBytes
) ||
970 alignment
> page_size
) {
971 addr
= IOMallocPageablePages(size
, alignment
, IOMemoryTag(kernel_map
));
972 /* Memory allocated this way will already be zeroed. */
974 addr
= ((void *) iopa_alloc(&gIOPageablePageAllocator
, &IOMallocOnePageablePage
, size
, (uint32_t) alignment
));
982 OSAddAtomicLong(size
, &debug_iomallocpageable_size
);
984 IOStatisticsAlloc(kIOStatisticsMallocPageable
, size
);
991 IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
993 return IOMallocPageableInternal(size
, alignment
, /*zeroed*/ false);
997 IOMallocPageableZero(vm_size_t size
, vm_size_t alignment
)
999 return IOMallocPageableInternal(size
, alignment
, /*zeroed*/ true);
1003 IOFreePageable(void * address
, vm_size_t size
)
1006 OSAddAtomicLong(-size
, &debug_iomallocpageable_size
);
1008 IOStatisticsAlloc(kIOStatisticsFreePageable
, size
);
1010 if (size
< (page_size
- 4 * gIOPageAllocChunkBytes
)) {
1011 address
= (void *) iopa_free(&gIOPageablePageAllocator
, (uintptr_t) address
, size
);
1015 IOFreePageablePages(address
, size
);
1019 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1022 iopa_init(iopa_t
* a
)
1024 bzero(a
, sizeof(*a
));
1025 a
->lock
= IOLockAlloc();
1026 queue_init(&a
->list
);
1030 iopa_allocinpage(iopa_page_t
* pa
, uint32_t count
, uint64_t align
)
1033 uint64_t avail
= pa
->avail
;
1037 // find strings of count 1 bits in avail
1038 for (n
= count
; n
> 1; n
-= s
) {
1040 avail
= avail
& (avail
<< s
);
1046 n
= __builtin_clzll(avail
);
1047 pa
->avail
&= ~((-1ULL << (64 - count
)) >> n
);
1048 if (!pa
->avail
&& pa
->link
.next
) {
1050 pa
->link
.next
= NULL
;
1052 return n
* gIOPageAllocChunkBytes
+ trunc_page((uintptr_t) pa
);
1059 iopa_alloc(iopa_t
* a
, iopa_proc_t alloc
, vm_size_t bytes
, vm_size_t balign
)
1061 static const uint64_t align_masks
[] = {
1074 vm_size_t align_masks_idx
;
1076 if (((uint32_t) bytes
) != bytes
) {
1082 count
= (((uint32_t) bytes
) + gIOPageAllocChunkBytes
- 1) / gIOPageAllocChunkBytes
;
1084 align_masks_idx
= log2up((balign
+ gIOPageAllocChunkBytes
- 1) / gIOPageAllocChunkBytes
);
1085 assert(align_masks_idx
< sizeof(align_masks
) / sizeof(*align_masks
));
1086 align
= align_masks
[align_masks_idx
];
1088 IOLockLock(a
->lock
);
1089 __IGNORE_WCASTALIGN(pa
= (typeof(pa
))queue_first(&a
->list
));
1090 while (!queue_end(&a
->list
, &pa
->link
)) {
1091 addr
= iopa_allocinpage(pa
, count
, align
);
1093 a
->bytecount
+= bytes
;
1096 __IGNORE_WCASTALIGN(pa
= (typeof(pa
))queue_next(&pa
->link
));
1098 IOLockUnlock(a
->lock
);
1103 pa
= (typeof(pa
))(addr
+ page_size
- gIOPageAllocChunkBytes
);
1104 pa
->signature
= kIOPageAllocSignature
;
1107 addr
= iopa_allocinpage(pa
, count
, align
);
1108 IOLockLock(a
->lock
);
1110 enqueue_head(&a
->list
, &pa
->link
);
1114 a
->bytecount
+= bytes
;
1116 IOLockUnlock(a
->lock
);
1120 assert((addr
& ((1 << log2up(balign
)) - 1)) == 0);
1125 iopa_free(iopa_t
* a
, uintptr_t addr
, vm_size_t bytes
)
1131 if (((uint32_t) bytes
) != bytes
) {
1138 chunk
= (addr
& page_mask
);
1139 assert(0 == (chunk
& (gIOPageAllocChunkBytes
- 1)));
1141 pa
= (typeof(pa
))(addr
| (page_size
- gIOPageAllocChunkBytes
));
1142 assert(kIOPageAllocSignature
== pa
->signature
);
1144 count
= (((uint32_t) bytes
) + gIOPageAllocChunkBytes
- 1) / gIOPageAllocChunkBytes
;
1145 chunk
/= gIOPageAllocChunkBytes
;
1147 IOLockLock(a
->lock
);
1149 assert(!pa
->link
.next
);
1150 enqueue_tail(&a
->list
, &pa
->link
);
1152 pa
->avail
|= ((-1ULL << (64 - count
)) >> chunk
);
1153 if (pa
->avail
!= -2ULL) {
1157 pa
->link
.next
= NULL
;
1161 pa
= (typeof(pa
))trunc_page(pa
);
1163 a
->bytecount
-= bytes
;
1164 IOLockUnlock(a
->lock
);
1166 return (uintptr_t) pa
;
1169 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1172 IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
1173 IOByteCount length
, IOOptionBits cacheMode
)
1175 IOReturn ret
= kIOReturnSuccess
;
1178 if (task
!= kernel_task
) {
1179 return kIOReturnUnsupported
;
1181 if ((address
| length
) & PAGE_MASK
) {
1182 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1183 return kIOReturnUnsupported
;
1185 length
= round_page(address
+ length
) - trunc_page( address
);
1186 address
= trunc_page( address
);
1189 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
1191 while ((kIOReturnSuccess
== ret
) && (length
> 0)) {
1192 // Get the physical page number
1193 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
1195 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
1196 ret
= IOMapPages( get_task_map(task
), address
, ptoa_64(pagenum
), page_size
, cacheMode
);
1198 ret
= kIOReturnVMError
;
1201 address
+= page_size
;
1202 length
-= page_size
;
1210 IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
1211 IOByteCount length
)
1213 if (task
!= kernel_task
) {
1214 return kIOReturnUnsupported
;
1217 flush_dcache64((addr64_t
) address
, (unsigned) length
, false );
1219 return kIOReturnSuccess
;
1222 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1225 OSKernelStackRemaining( void )
1227 return ml_stack_remaining();
1230 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1233 * Spin for indicated number of milliseconds.
1236 IOSleep(unsigned milliseconds
)
1238 delay_for_interval(milliseconds
, kMillisecondScale
);
1242 * Spin for indicated number of milliseconds, and potentially an
1243 * additional number of milliseconds up to the leeway values.
1246 IOSleepWithLeeway(unsigned intervalMilliseconds
, unsigned leewayMilliseconds
)
1248 delay_for_interval_with_leeway(intervalMilliseconds
, leewayMilliseconds
, kMillisecondScale
);
1252 * Spin for indicated number of microseconds.
1255 IODelay(unsigned microseconds
)
1257 delay_for_interval(microseconds
, kMicrosecondScale
);
1261 * Spin for indicated number of nanoseconds.
1264 IOPause(unsigned nanoseconds
)
1266 delay_for_interval(nanoseconds
, kNanosecondScale
);
1269 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1271 static void _IOLogv(const char *format
, va_list ap
, void *caller
) __printflike(1, 0);
1273 __attribute__((noinline
, not_tail_called
))
1275 IOLog(const char *format
, ...)
1277 void *caller
= __builtin_return_address(0);
1280 va_start(ap
, format
);
1281 _IOLogv(format
, ap
, caller
);
1285 __attribute__((noinline
, not_tail_called
))
1287 IOLogv(const char *format
, va_list ap
)
1289 void *caller
= __builtin_return_address(0);
1290 _IOLogv(format
, ap
, caller
);
1294 _IOLogv(const char *format
, va_list ap
, void *caller
)
1297 struct console_printbuf_state info_data
;
1298 console_printbuf_state_init(&info_data
, TRUE
, TRUE
);
1302 os_log_with_args(OS_LOG_DEFAULT
, OS_LOG_TYPE_DEFAULT
, format
, ap
, caller
);
1304 __doprnt(format
, ap2
, console_printbuf_putc
, &info_data
, 16, TRUE
);
1305 console_printbuf_clear(&info_data
);
1308 assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning
, "IOLog called with interrupts disabled");
1313 IOPanic(const char *reason
)
1315 panic("%s", reason
);
1319 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1322 IOKitKernelLogBuffer(const char * title
, const void * buffer
, size_t size
,
1323 void (*output
)(const char *format
, ...))
1325 size_t idx
, linestart
;
1326 enum { bytelen
= (sizeof("0xZZ, ") - 1) };
1327 char hex
[(bytelen
* 16) + 1];
1328 uint8_t c
, chars
[17];
1330 output("%s(0x%lx):\n", title
, size
);
1331 output(" 0 1 2 3 4 5 6 7 8 9 A B C D E F\n");
1336 for (idx
= 0, linestart
= 0; idx
< size
;) {
1337 c
= ((char *)buffer
)[idx
];
1338 snprintf(&hex
[bytelen
* (idx
& 15)], bytelen
+ 1, "0x%02x, ", c
);
1339 chars
[idx
& 15] = ((c
>= 0x20) && (c
<= 0x7f)) ? c
: ' ';
1341 if ((idx
== size
) || !(idx
& 15)) {
1343 chars
[idx
& 15] = 0;
1345 output("/* %04lx: */ %-96s /* |%-16s| */\n", linestart
, hex
, chars
);
1351 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1354 * Convert a integer constant (typically a #define or enum) to a string.
1356 static char noValue
[80]; // that's pretty
1359 IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
1361 for (; regValueArray
->name
; regValueArray
++) {
1362 if (regValueArray
->value
== value
) {
1363 return regValueArray
->name
;
1366 snprintf(noValue
, sizeof(noValue
), "0x%x (UNDEFINED)", value
);
1367 return (const char *)noValue
;
1371 IOFindValueForName(const char *string
,
1372 const IONamedValue
*regValueArray
,
1375 for (; regValueArray
->name
; regValueArray
++) {
1376 if (!strcmp(regValueArray
->name
, string
)) {
1377 *value
= regValueArray
->value
;
1378 return kIOReturnSuccess
;
1381 return kIOReturnBadArgument
;
1385 IOCopyLogNameForPID(int pid
)
1389 snprintf(buf
, sizeof(buf
), "pid %d, ", pid
);
1391 proc_name(pid
, buf
+ len
, (int) (sizeof(buf
) - len
));
1392 return OSString::withCString(buf
);
1395 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1398 IOSizeToAlignment(unsigned int size
)
1401 const int intsize
= sizeof(unsigned int) * 8;
1403 for (shift
= 1; shift
< intsize
; shift
++) {
1404 if (size
& 0x80000000) {
1405 return (IOAlignment
)(intsize
- shift
);
1413 IOAlignmentToSize(IOAlignment align
)
1417 for (size
= 1; align
; align
--) {