2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
42 #include <IOKit/assert.h>
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
51 #include "IOKitKernelInternal.h"
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <os/log_private.h>
62 #include <sys/msgbuf.h>
63 #include <console/serial_protos.h>
67 #define IOStatisticsAlloc(type, size) \
69 IOStatistics::countAlloc(type, size); \
74 #define IOStatisticsAlloc(type, size)
76 #endif /* IOKITSTATS */
79 #define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug))
86 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
88 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
94 void (*putc
)(int, void *),
99 extern void cons_putc_locked(char);
100 extern void bsd_log_lock(void);
101 extern void bsd_log_unlock(void);
102 extern void logwakeup();
105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107 lck_grp_t
*IOLockGroup
;
110 * Global variables for use by iLogger
111 * These symbols are for use only by Apple diagnostic code.
112 * Binary compatibility is not guaranteed for kexts that reference these symbols.
115 void *_giDebugLogInternal
= NULL
;
116 void *_giDebugLogDataInternal
= NULL
;
117 void *_giDebugReserved1
= NULL
;
118 void *_giDebugReserved2
= NULL
;
120 iopa_t gIOBMDPageAllocator
;
123 * Static variables for this module.
126 static queue_head_t gIOMallocContiguousEntries
;
127 static lck_mtx_t
* gIOMallocContiguousEntriesLock
;
130 enum { kIOMaxPageableMaps
= 8 };
131 enum { kIOPageableMapSize
= 512 * 1024 * 1024 };
132 enum { kIOPageableMaxMapSize
= 512 * 1024 * 1024 };
134 enum { kIOMaxPageableMaps
= 16 };
135 enum { kIOPageableMapSize
= 96 * 1024 * 1024 };
136 enum { kIOPageableMaxMapSize
= 96 * 1024 * 1024 };
148 IOMapData maps
[ kIOMaxPageableMaps
];
150 } gIOKitPageableSpace
;
152 static iopa_t gIOPageablePageAllocator
;
154 uint32_t gIOPageAllocChunkBytes
;
157 IOTrackingQueue
* gIOMallocTracking
;
158 IOTrackingQueue
* gIOWireTracking
;
159 IOTrackingQueue
* gIOMapTracking
;
160 #endif /* IOTRACKING */
162 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
168 static bool libInitialized
;
173 IOLockGroup
= lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL
);
177 gIOMallocTracking
= IOTrackingQueueAlloc(kIOMallocTrackingName
, 0, 0, 0,
178 kIOTrackingQueueTypeAlloc
,
180 gIOWireTracking
= IOTrackingQueueAlloc(kIOWireTrackingName
, 0, 0, page_size
, 0, 0);
182 size_t mapCaptureSize
= (kIOTracking
& gIOKitDebug
) ? page_size
: (1024*1024);
183 gIOMapTracking
= IOTrackingQueueAlloc(kIOMapTrackingName
, 0, 0, mapCaptureSize
,
184 kIOTrackingQueueTypeDefaultOn
185 | kIOTrackingQueueTypeMap
186 | kIOTrackingQueueTypeUser
,
190 gIOKitPageableSpace
.maps
[0].address
= 0;
191 ret
= kmem_suballoc(kernel_map
,
192 &gIOKitPageableSpace
.maps
[0].address
,
196 VM_MAP_KERNEL_FLAGS_NONE
,
197 VM_KERN_MEMORY_IOKIT
,
198 &gIOKitPageableSpace
.maps
[0].map
);
199 if (ret
!= KERN_SUCCESS
)
200 panic("failed to allocate iokit pageable map\n");
202 gIOKitPageableSpace
.lock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
203 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
204 gIOKitPageableSpace
.hint
= 0;
205 gIOKitPageableSpace
.count
= 1;
207 gIOMallocContiguousEntriesLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
208 queue_init( &gIOMallocContiguousEntries
);
210 gIOPageAllocChunkBytes
= PAGE_SIZE
/64;
211 assert(sizeof(iopa_page_t
) <= gIOPageAllocChunkBytes
);
212 iopa_init(&gIOBMDPageAllocator
);
213 iopa_init(&gIOPageablePageAllocator
);
216 libInitialized
= true;
219 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
222 log2up(uint32_t size
)
224 if (size
<= 1) size
= 0;
225 else size
= 32 - __builtin_clz(size
- 1);
229 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
231 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
233 kern_return_t result
;
236 result
= kernel_thread_start((thread_continue_t
)fcn
, arg
, &thread
);
237 if (result
!= KERN_SUCCESS
)
240 thread_deallocate(thread
);
246 void IOExitThread(void)
248 (void) thread_terminate(current_thread());
251 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
254 struct IOLibMallocHeader
256 IOTrackingAddress tracking
;
261 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
263 #define sizeofIOLibMallocHeader (0)
266 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
268 void * IOMalloc(vm_size_t size
)
273 allocSize
= size
+ sizeofIOLibMallocHeader
;
275 if (sizeofIOLibMallocHeader
&& (allocSize
<= size
)) return (NULL
); // overflow
277 address
= kalloc_tag_bt(allocSize
, VM_KERN_MEMORY_IOKIT
);
282 IOLibMallocHeader
* hdr
;
283 hdr
= (typeof(hdr
)) address
;
284 bzero(&hdr
->tracking
, sizeof(hdr
->tracking
));
285 hdr
->tracking
.address
= ~(((uintptr_t) address
) + sizeofIOLibMallocHeader
);
286 hdr
->tracking
.size
= size
;
287 IOTrackingAdd(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
, true, VM_KERN_MEMORY_NONE
);
290 address
= (typeof(address
)) (((uintptr_t) address
) + sizeofIOLibMallocHeader
);
293 OSAddAtomic(size
, &debug_iomalloc_size
);
295 IOStatisticsAlloc(kIOStatisticsMalloc
, size
);
301 void IOFree(void * inAddress
, vm_size_t size
)
305 if ((address
= inAddress
))
307 address
= (typeof(address
)) (((uintptr_t) address
) - sizeofIOLibMallocHeader
);
312 IOLibMallocHeader
* hdr
;
313 struct ptr_reference
{ void * ptr
; };
314 volatile struct ptr_reference ptr
;
316 // we're about to block in IOTrackingRemove(), make sure the original pointer
317 // exists in memory or a register for leak scanning to find
320 hdr
= (typeof(hdr
)) address
;
321 if (size
!= hdr
->tracking
.size
)
323 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size
, hdr
->tracking
.size
);
324 size
= hdr
->tracking
.size
;
326 IOTrackingRemove(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
);
331 kfree(address
, size
+ sizeofIOLibMallocHeader
);
333 OSAddAtomic(-size
, &debug_iomalloc_size
);
335 IOStatisticsAlloc(kIOStatisticsFree
, size
);
339 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
342 IOMemoryTag(vm_map_t map
)
346 if (!vm_kernel_map_is_kernel(map
)) return (VM_MEMORY_IOKIT
);
349 if (tag
== VM_KERN_MEMORY_NONE
) tag
= VM_KERN_MEMORY_IOKIT
;
354 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
356 struct IOLibPageMallocHeader
358 mach_vm_size_t allocationSize
;
359 mach_vm_address_t allocationAddress
;
361 IOTrackingAddress tracking
;
366 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
368 #define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader))
371 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
373 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
377 vm_offset_t allocationAddress
;
378 vm_size_t adjustedSize
;
380 IOLibPageMallocHeader
* hdr
;
385 alignment
= (1UL << log2up(alignment
));
386 alignMask
= alignment
- 1;
387 adjustedSize
= size
+ sizeofIOLibPageMallocHeader
;
389 if (size
> adjustedSize
) {
390 address
= 0; /* overflow detected */
392 else if (adjustedSize
>= page_size
) {
394 kr
= kernel_memory_allocate(kernel_map
, &address
,
395 size
, alignMask
, 0, IOMemoryTag(kernel_map
));
396 if (KERN_SUCCESS
!= kr
) address
= 0;
398 else if (TRACK_ALLOC
) IOTrackingAlloc(gIOMallocTracking
, address
, size
);
403 adjustedSize
+= alignMask
;
405 if (adjustedSize
>= page_size
) {
407 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
408 adjustedSize
, 0, 0, IOMemoryTag(kernel_map
));
409 if (KERN_SUCCESS
!= kr
) allocationAddress
= 0;
412 allocationAddress
= (vm_address_t
) kalloc_tag_bt(adjustedSize
, VM_KERN_MEMORY_IOKIT
);
414 if (allocationAddress
) {
415 address
= (allocationAddress
+ alignMask
+ sizeofIOLibPageMallocHeader
)
418 hdr
= (typeof(hdr
))(address
- sizeofIOLibPageMallocHeader
);
419 hdr
->allocationSize
= adjustedSize
;
420 hdr
->allocationAddress
= allocationAddress
;
423 bzero(&hdr
->tracking
, sizeof(hdr
->tracking
));
424 hdr
->tracking
.address
= ~address
;
425 hdr
->tracking
.size
= size
;
426 IOTrackingAdd(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
, true, VM_KERN_MEMORY_NONE
);
433 assert(0 == (address
& alignMask
));
437 OSAddAtomic(size
, &debug_iomalloc_size
);
439 IOStatisticsAlloc(kIOStatisticsMallocAligned
, size
);
442 return (void *) address
;
445 void IOFreeAligned(void * address
, vm_size_t size
)
447 vm_address_t allocationAddress
;
448 vm_size_t adjustedSize
;
449 IOLibPageMallocHeader
* hdr
;
456 adjustedSize
= size
+ sizeofIOLibPageMallocHeader
;
457 if (adjustedSize
>= page_size
) {
459 if (TRACK_ALLOC
) IOTrackingFree(gIOMallocTracking
, (uintptr_t) address
, size
);
461 kmem_free( kernel_map
, (vm_offset_t
) address
, size
);
464 hdr
= (typeof(hdr
)) (((uintptr_t)address
) - sizeofIOLibPageMallocHeader
);
465 adjustedSize
= hdr
->allocationSize
;
466 allocationAddress
= hdr
->allocationAddress
;
471 if (size
!= hdr
->tracking
.size
)
473 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size
, hdr
->tracking
.size
);
474 size
= hdr
->tracking
.size
;
476 IOTrackingRemove(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
);
479 if (adjustedSize
>= page_size
) {
480 kmem_free( kernel_map
, allocationAddress
, adjustedSize
);
482 kfree((void *)allocationAddress
, adjustedSize
);
487 OSAddAtomic(-size
, &debug_iomalloc_size
);
490 IOStatisticsAlloc(kIOStatisticsFreeAligned
, size
);
493 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
496 IOKernelFreePhysical(mach_vm_address_t address
, mach_vm_size_t size
)
498 mach_vm_address_t allocationAddress
;
499 mach_vm_size_t adjustedSize
;
500 IOLibPageMallocHeader
* hdr
;
507 adjustedSize
= (2 * size
) + sizeofIOLibPageMallocHeader
;
508 if (adjustedSize
>= page_size
) {
510 if (TRACK_ALLOC
) IOTrackingFree(gIOMallocTracking
, address
, size
);
512 kmem_free( kernel_map
, (vm_offset_t
) address
, size
);
516 hdr
= (typeof(hdr
)) (((uintptr_t)address
) - sizeofIOLibPageMallocHeader
);
517 adjustedSize
= hdr
->allocationSize
;
518 allocationAddress
= hdr
->allocationAddress
;
520 if (TRACK_ALLOC
) IOTrackingRemove(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
);
522 kfree((void *)allocationAddress
, adjustedSize
);
525 IOStatisticsAlloc(kIOStatisticsFreeContiguous
, size
);
527 OSAddAtomic(-size
, &debug_iomalloc_size
);
531 #if __arm__ || __arm64__
532 extern unsigned long gPhysBase
, gPhysSize
;
536 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size
, mach_vm_address_t maxPhys
,
537 mach_vm_size_t alignment
, bool contiguous
)
540 mach_vm_address_t address
;
541 mach_vm_address_t allocationAddress
;
542 mach_vm_size_t adjustedSize
;
543 mach_vm_address_t alignMask
;
544 IOLibPageMallocHeader
* hdr
;
551 alignMask
= alignment
- 1;
552 adjustedSize
= (2 * size
) + sizeofIOLibPageMallocHeader
;
553 if (adjustedSize
< size
) return (0);
555 contiguous
= (contiguous
&& (adjustedSize
> page_size
))
556 || (alignment
> page_size
);
558 if (contiguous
|| maxPhys
)
564 contiguous
= (contiguous
&& (adjustedSize
> page_size
))
565 || (alignment
> page_size
);
569 #if __arm__ || __arm64__
570 if (maxPhys
>= (mach_vm_address_t
)(gPhysBase
+ gPhysSize
))
576 if (maxPhys
<= 0xFFFFFFFF)
579 options
|= KMA_LOMEM
;
581 else if (gIOLastPage
&& (atop_64(maxPhys
) > gIOLastPage
))
586 if (contiguous
|| maxPhys
)
588 kr
= kmem_alloc_contig(kernel_map
, &virt
, size
,
589 alignMask
, atop(maxPhys
), atop(alignMask
), 0, IOMemoryTag(kernel_map
));
593 kr
= kernel_memory_allocate(kernel_map
, &virt
,
594 size
, alignMask
, options
, IOMemoryTag(kernel_map
));
596 if (KERN_SUCCESS
== kr
)
600 if (TRACK_ALLOC
) IOTrackingAlloc(gIOMallocTracking
, address
, size
);
608 adjustedSize
+= alignMask
;
609 if (adjustedSize
< size
) return (0);
610 allocationAddress
= (mach_vm_address_t
) kalloc_tag_bt(adjustedSize
, VM_KERN_MEMORY_IOKIT
);
612 if (allocationAddress
) {
615 address
= (allocationAddress
+ alignMask
+ sizeofIOLibPageMallocHeader
)
618 if (atop_32(address
) != atop_32(address
+ size
- 1))
619 address
= round_page(address
);
621 hdr
= (typeof(hdr
))(address
- sizeofIOLibPageMallocHeader
);
622 hdr
->allocationSize
= adjustedSize
;
623 hdr
->allocationAddress
= allocationAddress
;
626 bzero(&hdr
->tracking
, sizeof(hdr
->tracking
));
627 hdr
->tracking
.address
= ~address
;
628 hdr
->tracking
.size
= size
;
629 IOTrackingAdd(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
, true, VM_KERN_MEMORY_NONE
);
637 IOStatisticsAlloc(kIOStatisticsMallocContiguous
, size
);
639 OSAddAtomic(size
, &debug_iomalloc_size
);
647 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
649 struct _IOMallocContiguousEntry
651 mach_vm_address_t virtualAddr
;
652 IOBufferMemoryDescriptor
* md
;
655 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
657 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
658 IOPhysicalAddress
* physicalAddress
)
660 mach_vm_address_t address
= 0;
667 /* Do we want a physical address? */
668 if (!physicalAddress
)
670 address
= IOKernelAllocateWithPhysicalRestrict(size
, 0 /*maxPhys*/, alignment
, true);
674 IOBufferMemoryDescriptor
* bmd
;
675 mach_vm_address_t physicalMask
;
676 vm_offset_t alignMask
;
678 alignMask
= alignment
- 1;
679 physicalMask
= (0xFFFFFFFF ^ alignMask
);
681 bmd
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
682 kernel_task
, kIOMemoryPhysicallyContiguous
, size
, physicalMask
);
686 _IOMallocContiguousEntry
*
687 entry
= IONew(_IOMallocContiguousEntry
, 1);
693 entry
->virtualAddr
= (mach_vm_address_t
) bmd
->getBytesNoCopy();
695 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
696 queue_enter( &gIOMallocContiguousEntries
, entry
,
697 _IOMallocContiguousEntry
*, link
);
698 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
700 address
= (mach_vm_address_t
) entry
->virtualAddr
;
701 *physicalAddress
= bmd
->getPhysicalAddress();
705 return (void *) address
;
708 void IOFreeContiguous(void * _address
, vm_size_t size
)
710 _IOMallocContiguousEntry
* entry
;
711 IOMemoryDescriptor
* md
= NULL
;
713 mach_vm_address_t address
= (mach_vm_address_t
) _address
;
720 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
721 queue_iterate( &gIOMallocContiguousEntries
, entry
,
722 _IOMallocContiguousEntry
*, link
)
724 if( entry
->virtualAddr
== address
) {
726 queue_remove( &gIOMallocContiguousEntries
, entry
,
727 _IOMallocContiguousEntry
*, link
);
731 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
736 IODelete(entry
, _IOMallocContiguousEntry
, 1);
740 IOKernelFreePhysical((mach_vm_address_t
) address
, size
);
744 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
746 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
747 IOIteratePageableMapsCallback callback
, void * ref
)
749 kern_return_t kr
= kIOReturnNotReady
;
756 if (size
> kIOPageableMaxMapSize
)
757 return( kIOReturnBadArgument
);
760 index
= gIOKitPageableSpace
.hint
;
761 attempts
= gIOKitPageableSpace
.count
;
763 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
764 if( KERN_SUCCESS
== kr
) {
765 gIOKitPageableSpace
.hint
= index
;
771 index
= gIOKitPageableSpace
.count
- 1;
773 if (KERN_NO_SPACE
!= kr
)
776 lck_mtx_lock( gIOKitPageableSpace
.lock
);
778 index
= gIOKitPageableSpace
.count
;
779 if( index
>= (kIOMaxPageableMaps
- 1)) {
780 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
784 if( size
< kIOPageableMapSize
)
785 segSize
= kIOPageableMapSize
;
790 kr
= kmem_suballoc(kernel_map
,
795 VM_MAP_KERNEL_FLAGS_NONE
,
796 VM_KERN_MEMORY_IOKIT
,
798 if( KERN_SUCCESS
!= kr
) {
799 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
803 gIOKitPageableSpace
.maps
[index
].map
= map
;
804 gIOKitPageableSpace
.maps
[index
].address
= min
;
805 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
806 gIOKitPageableSpace
.hint
= index
;
807 gIOKitPageableSpace
.count
= index
+ 1;
809 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
816 struct IOMallocPageableRef
823 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
825 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
828 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
, ref
->tag
);
833 static void * IOMallocPageablePages(vm_size_t size
, vm_size_t alignment
, vm_tag_t tag
)
835 kern_return_t kr
= kIOReturnNotReady
;
836 struct IOMallocPageableRef ref
;
838 if (alignment
> page_size
)
840 if (size
> kIOPageableMaxMapSize
)
845 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
846 if( kIOReturnSuccess
!= kr
)
849 return( (void *) ref
.address
);
852 vm_map_t
IOPageableMapForAddress( uintptr_t address
)
857 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
858 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
859 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
860 map
= gIOKitPageableSpace
.maps
[index
].map
;
865 panic("IOPageableMapForAddress: null");
870 static void IOFreePageablePages(void * address
, vm_size_t size
)
874 map
= IOPageableMapForAddress( (vm_address_t
) address
);
876 kmem_free( map
, (vm_offset_t
) address
, size
);
879 static uintptr_t IOMallocOnePageablePage(iopa_t
* a
)
881 return ((uintptr_t) IOMallocPageablePages(page_size
, page_size
, VM_KERN_MEMORY_IOKIT
));
884 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
888 if (size
>= (page_size
- 4*gIOPageAllocChunkBytes
)) addr
= IOMallocPageablePages(size
, alignment
, IOMemoryTag(kernel_map
));
889 else addr
= ((void * ) iopa_alloc(&gIOPageablePageAllocator
, &IOMallocOnePageablePage
, size
, alignment
));
893 OSAddAtomicLong(size
, &debug_iomallocpageable_size
);
895 IOStatisticsAlloc(kIOStatisticsMallocPageable
, size
);
901 void IOFreePageable(void * address
, vm_size_t size
)
904 OSAddAtomicLong(-size
, &debug_iomallocpageable_size
);
906 IOStatisticsAlloc(kIOStatisticsFreePageable
, size
);
908 if (size
< (page_size
- 4*gIOPageAllocChunkBytes
))
910 address
= (void *) iopa_free(&gIOPageablePageAllocator
, (uintptr_t) address
, size
);
913 if (address
) IOFreePageablePages(address
, size
);
916 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
919 iopa_init(iopa_t
* a
)
921 bzero(a
, sizeof(*a
));
922 a
->lock
= IOLockAlloc();
923 queue_init(&a
->list
);
927 iopa_allocinpage(iopa_page_t
* pa
, uint32_t count
, uint64_t align
)
930 uint64_t avail
= pa
->avail
;
934 // find strings of count 1 bits in avail
935 for (n
= count
; n
> 1; n
-= s
)
938 avail
= avail
& (avail
<< s
);
945 n
= __builtin_clzll(avail
);
946 pa
->avail
&= ~((-1ULL << (64 - count
)) >> n
);
947 if (!pa
->avail
&& pa
->link
.next
)
952 return (n
* gIOPageAllocChunkBytes
+ trunc_page((uintptr_t) pa
));
959 iopa_alloc(iopa_t
* a
, iopa_proc_t alloc
, vm_size_t bytes
, uint32_t balign
)
961 static const uint64_t align_masks
[] = {
975 if (!bytes
) bytes
= 1;
976 count
= (bytes
+ gIOPageAllocChunkBytes
- 1) / gIOPageAllocChunkBytes
;
977 align
= align_masks
[log2up((balign
+ gIOPageAllocChunkBytes
- 1) / gIOPageAllocChunkBytes
)];
980 __IGNORE_WCASTALIGN(pa
= (typeof(pa
)) queue_first(&a
->list
));
981 while (!queue_end(&a
->list
, &pa
->link
))
983 addr
= iopa_allocinpage(pa
, count
, align
);
986 a
->bytecount
+= bytes
;
989 __IGNORE_WCASTALIGN(pa
= (typeof(pa
)) queue_next(&pa
->link
));
991 IOLockUnlock(a
->lock
);
998 pa
= (typeof(pa
)) (addr
+ page_size
- gIOPageAllocChunkBytes
);
999 pa
->signature
= kIOPageAllocSignature
;
1002 addr
= iopa_allocinpage(pa
, count
, align
);
1003 IOLockLock(a
->lock
);
1004 if (pa
->avail
) enqueue_head(&a
->list
, &pa
->link
);
1006 if (addr
) a
->bytecount
+= bytes
;
1007 IOLockUnlock(a
->lock
);
1011 assert((addr
& ((1 << log2up(balign
)) - 1)) == 0);
1016 iopa_free(iopa_t
* a
, uintptr_t addr
, vm_size_t bytes
)
1022 if (!bytes
) bytes
= 1;
1024 chunk
= (addr
& page_mask
);
1025 assert(0 == (chunk
& (gIOPageAllocChunkBytes
- 1)));
1027 pa
= (typeof(pa
)) (addr
| (page_size
- gIOPageAllocChunkBytes
));
1028 assert(kIOPageAllocSignature
== pa
->signature
);
1030 count
= (bytes
+ gIOPageAllocChunkBytes
- 1) / gIOPageAllocChunkBytes
;
1031 chunk
/= gIOPageAllocChunkBytes
;
1033 IOLockLock(a
->lock
);
1036 assert(!pa
->link
.next
);
1037 enqueue_tail(&a
->list
, &pa
->link
);
1039 pa
->avail
|= ((-1ULL << (64 - count
)) >> chunk
);
1040 if (pa
->avail
!= -2ULL) pa
= 0;
1048 pa
= (typeof(pa
)) trunc_page(pa
);
1050 a
->bytecount
-= bytes
;
1051 IOLockUnlock(a
->lock
);
1053 return ((uintptr_t) pa
);
1056 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1058 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
1059 IOByteCount length
, IOOptionBits cacheMode
)
1061 IOReturn ret
= kIOReturnSuccess
;
1064 if( task
!= kernel_task
)
1065 return( kIOReturnUnsupported
);
1066 if ((address
| length
) & PAGE_MASK
)
1068 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1069 return( kIOReturnUnsupported
);
1071 length
= round_page(address
+ length
) - trunc_page( address
);
1072 address
= trunc_page( address
);
1075 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
1077 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
1079 // Get the physical page number
1080 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
1082 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
1083 ret
= IOMapPages( get_task_map(task
), address
, ptoa_64(pagenum
), page_size
, cacheMode
);
1085 ret
= kIOReturnVMError
;
1087 address
+= page_size
;
1088 length
-= page_size
;
1095 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
1096 IOByteCount length
)
1098 if( task
!= kernel_task
)
1099 return( kIOReturnUnsupported
);
1101 flush_dcache64( (addr64_t
) address
, (unsigned) length
, false );
1103 return( kIOReturnSuccess
);
1106 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1108 vm_offset_t
OSKernelStackRemaining( void )
1110 return (ml_stack_remaining());
1113 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1116 * Spin for indicated number of milliseconds.
1118 void IOSleep(unsigned milliseconds
)
1120 delay_for_interval(milliseconds
, kMillisecondScale
);
1124 * Spin for indicated number of milliseconds, and potentially an
1125 * additional number of milliseconds up to the leeway values.
1127 void IOSleepWithLeeway(unsigned intervalMilliseconds
, unsigned leewayMilliseconds
)
1129 delay_for_interval_with_leeway(intervalMilliseconds
, leewayMilliseconds
, kMillisecondScale
);
1133 * Spin for indicated number of microseconds.
1135 void IODelay(unsigned microseconds
)
1137 delay_for_interval(microseconds
, kMicrosecondScale
);
1141 * Spin for indicated number of nanoseconds.
1143 void IOPause(unsigned nanoseconds
)
1145 delay_for_interval(nanoseconds
, kNanosecondScale
);
1148 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1150 static void _IOLogv(const char *format
, va_list ap
, void *caller
) __printflike(1,0);
1152 __attribute__((noinline
,not_tail_called
))
1153 void IOLog(const char *format
, ...)
1155 void *caller
= __builtin_return_address(0);
1158 va_start(ap
, format
);
1159 _IOLogv(format
, ap
, caller
);
1163 __attribute__((noinline
,not_tail_called
))
1164 void IOLogv(const char *format
, va_list ap
)
1166 void *caller
= __builtin_return_address(0);
1167 _IOLogv(format
, ap
, caller
);
1170 void _IOLogv(const char *format
, va_list ap
, void *caller
)
1173 struct console_printbuf_state info_data
;
1174 console_printbuf_state_init(&info_data
, TRUE
, TRUE
);
1178 os_log_with_args(OS_LOG_DEFAULT
, OS_LOG_TYPE_DEFAULT
, format
, ap
, caller
);
1180 __doprnt(format
, ap2
, console_printbuf_putc
, &info_data
, 16, TRUE
);
1181 console_printbuf_clear(&info_data
);
1184 assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning
, "IOLog called with interrupts disabled");
1188 void IOPanic(const char *reason
)
1190 panic("%s", reason
);
1194 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1197 * Convert a integer constant (typically a #define or enum) to a string.
1199 static char noValue
[80]; // that's pretty
1201 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
1203 for( ; regValueArray
->name
; regValueArray
++) {
1204 if(regValueArray
->value
== value
)
1205 return(regValueArray
->name
);
1207 snprintf(noValue
, sizeof(noValue
), "0x%x (UNDEFINED)", value
);
1208 return((const char *)noValue
);
1211 IOReturn
IOFindValueForName(const char *string
,
1212 const IONamedValue
*regValueArray
,
1215 for( ; regValueArray
->name
; regValueArray
++) {
1216 if(!strcmp(regValueArray
->name
, string
)) {
1217 *value
= regValueArray
->value
;
1218 return kIOReturnSuccess
;
1221 return kIOReturnBadArgument
;
1224 OSString
* IOCopyLogNameForPID(int pid
)
1228 snprintf(buf
, sizeof(buf
), "pid %d, ", pid
);
1230 proc_name(pid
, buf
+ len
, sizeof(buf
) - len
);
1231 return (OSString::withCString(buf
));
1234 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1236 IOAlignment
IOSizeToAlignment(unsigned int size
)
1239 const int intsize
= sizeof(unsigned int) * 8;
1241 for (shift
= 1; shift
< intsize
; shift
++) {
1242 if (size
& 0x80000000)
1243 return (IOAlignment
)(intsize
- shift
);
1249 unsigned int IOAlignmentToSize(IOAlignment align
)
1253 for (size
= 1; align
; align
--) {