2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
42 #include <IOKit/assert.h>
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
51 #include "IOKitKernelInternal.h"
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <sys/msgbuf.h>
65 #define IOStatisticsAlloc(type, size) \
67 IOStatistics::countAlloc(type, size); \
72 #define IOStatisticsAlloc(type, size)
74 #endif /* IOKITSTATS */
80 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
82 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
88 void (*putc
)(int, void *),
92 extern void cons_putc_locked(char);
93 extern void bsd_log_lock(void);
94 extern void bsd_log_unlock(void);
95 extern void logwakeup();
98 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100 lck_grp_t
*IOLockGroup
;
103 * Global variables for use by iLogger
104 * These symbols are for use only by Apple diagnostic code.
105 * Binary compatibility is not guaranteed for kexts that reference these symbols.
108 void *_giDebugLogInternal
= NULL
;
109 void *_giDebugLogDataInternal
= NULL
;
110 void *_giDebugReserved1
= NULL
;
111 void *_giDebugReserved2
= NULL
;
113 iopa_t gIOBMDPageAllocator
;
116 * Static variables for this module.
119 static queue_head_t gIOMallocContiguousEntries
;
120 static lck_mtx_t
* gIOMallocContiguousEntriesLock
;
123 enum { kIOMaxPageableMaps
= 8 };
124 enum { kIOPageableMapSize
= 512 * 1024 * 1024 };
125 enum { kIOPageableMaxMapSize
= 512 * 1024 * 1024 };
127 enum { kIOMaxPageableMaps
= 16 };
128 enum { kIOPageableMapSize
= 96 * 1024 * 1024 };
129 enum { kIOPageableMaxMapSize
= 96 * 1024 * 1024 };
141 IOMapData maps
[ kIOMaxPageableMaps
];
143 } gIOKitPageableSpace
;
145 static iopa_t gIOPageablePageAllocator
;
147 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
153 static bool libInitialized
;
158 gIOKitPageableSpace
.maps
[0].address
= 0;
159 ret
= kmem_suballoc(kernel_map
,
160 &gIOKitPageableSpace
.maps
[0].address
,
164 &gIOKitPageableSpace
.maps
[0].map
);
165 if (ret
!= KERN_SUCCESS
)
166 panic("failed to allocate iokit pageable map\n");
168 IOLockGroup
= lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL
);
170 gIOKitPageableSpace
.lock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
171 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
172 gIOKitPageableSpace
.hint
= 0;
173 gIOKitPageableSpace
.count
= 1;
175 gIOMallocContiguousEntriesLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
176 queue_init( &gIOMallocContiguousEntries
);
178 iopa_init(&gIOBMDPageAllocator
);
179 iopa_init(&gIOPageablePageAllocator
);
181 libInitialized
= true;
184 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
186 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
188 kern_return_t result
;
191 result
= kernel_thread_start((thread_continue_t
)fcn
, arg
, &thread
);
192 if (result
!= KERN_SUCCESS
)
195 thread_deallocate(thread
);
201 void IOExitThread(void)
203 (void) thread_terminate(current_thread());
206 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
209 void * IOMalloc(vm_size_t size
)
213 address
= (void *)kalloc(size
);
216 debug_iomalloc_size
+= size
;
218 IOStatisticsAlloc(kIOStatisticsMalloc
, size
);
224 void IOFree(void * address
, vm_size_t size
)
227 kfree(address
, size
);
229 debug_iomalloc_size
-= size
;
231 IOStatisticsAlloc(kIOStatisticsFree
, size
);
235 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
237 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
241 vm_offset_t allocationAddress
;
242 vm_size_t adjustedSize
;
250 alignMask
= alignment
- 1;
251 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
253 if (size
> adjustedSize
) {
254 address
= 0; /* overflow detected */
256 else if (adjustedSize
>= page_size
) {
258 kr
= kernel_memory_allocate(kernel_map
, &address
,
260 if (KERN_SUCCESS
!= kr
)
265 adjustedSize
+= alignMask
;
267 if (adjustedSize
>= page_size
) {
269 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
271 if (KERN_SUCCESS
!= kr
)
272 allocationAddress
= 0;
275 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
277 if (allocationAddress
) {
278 address
= (allocationAddress
+ alignMask
279 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
282 *((vm_size_t
*)(address
- sizeof(vm_size_t
) - sizeof(vm_address_t
)))
284 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
290 assert(0 == (address
& alignMask
));
294 debug_iomalloc_size
+= size
;
296 IOStatisticsAlloc(kIOStatisticsMallocAligned
, size
);
299 return (void *) address
;
302 void IOFreeAligned(void * address
, vm_size_t size
)
304 vm_address_t allocationAddress
;
305 vm_size_t adjustedSize
;
312 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
313 if (adjustedSize
>= page_size
) {
315 kmem_free( kernel_map
, (vm_offset_t
) address
, size
);
318 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
319 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
320 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
321 - sizeof(vm_address_t
) ));
323 if (adjustedSize
>= page_size
)
324 kmem_free( kernel_map
, allocationAddress
, adjustedSize
);
326 kfree((void *)allocationAddress
, adjustedSize
);
330 debug_iomalloc_size
-= size
;
333 IOStatisticsAlloc(kIOStatisticsFreeAligned
, size
);
336 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
339 IOKernelFreePhysical(mach_vm_address_t address
, mach_vm_size_t size
)
341 mach_vm_address_t allocationAddress
;
342 mach_vm_size_t adjustedSize
;
349 adjustedSize
= (2 * size
) + sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
);
350 if (adjustedSize
>= page_size
) {
352 kmem_free( kernel_map
, (vm_offset_t
) address
, size
);
356 adjustedSize
= *((mach_vm_size_t
*)
357 (address
- sizeof(mach_vm_address_t
) - sizeof(mach_vm_size_t
)));
358 allocationAddress
= *((mach_vm_address_t
*)
359 (address
- sizeof(mach_vm_address_t
) ));
360 kfree((void *)allocationAddress
, adjustedSize
);
363 IOStatisticsAlloc(kIOStatisticsFreeContiguous
, size
);
365 debug_iomalloc_size
-= size
;
370 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size
, mach_vm_address_t maxPhys
,
371 mach_vm_size_t alignment
, bool contiguous
)
374 mach_vm_address_t address
;
375 mach_vm_address_t allocationAddress
;
376 mach_vm_size_t adjustedSize
;
377 mach_vm_address_t alignMask
;
384 alignMask
= alignment
- 1;
385 adjustedSize
= (2 * size
) + sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
);
387 contiguous
= (contiguous
&& (adjustedSize
> page_size
))
388 || (alignment
> page_size
);
390 if (contiguous
|| maxPhys
)
396 contiguous
= (contiguous
&& (adjustedSize
> page_size
))
397 || (alignment
> page_size
);
401 if (maxPhys
<= 0xFFFFFFFF)
404 options
|= KMA_LOMEM
;
406 else if (gIOLastPage
&& (atop_64(maxPhys
) > gIOLastPage
))
411 if (contiguous
|| maxPhys
)
413 kr
= kmem_alloc_contig(kernel_map
, &virt
, size
,
414 alignMask
, atop(maxPhys
), atop(alignMask
), 0);
418 kr
= kernel_memory_allocate(kernel_map
, &virt
,
419 size
, alignMask
, options
);
421 if (KERN_SUCCESS
== kr
)
428 adjustedSize
+= alignMask
;
429 allocationAddress
= (mach_vm_address_t
) kalloc(adjustedSize
);
431 if (allocationAddress
) {
433 address
= (allocationAddress
+ alignMask
434 + (sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
)))
437 if (atop_32(address
) != atop_32(address
+ size
- 1))
438 address
= round_page(address
);
440 *((mach_vm_size_t
*)(address
- sizeof(mach_vm_size_t
)
441 - sizeof(mach_vm_address_t
))) = adjustedSize
;
442 *((mach_vm_address_t
*)(address
- sizeof(mach_vm_address_t
)))
449 IOStatisticsAlloc(kIOStatisticsMallocContiguous
, size
);
451 debug_iomalloc_size
+= size
;
459 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
461 struct _IOMallocContiguousEntry
463 mach_vm_address_t virtualAddr
;
464 IOBufferMemoryDescriptor
* md
;
467 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
469 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
470 IOPhysicalAddress
* physicalAddress
)
472 mach_vm_address_t address
= 0;
479 /* Do we want a physical address? */
480 if (!physicalAddress
)
482 address
= IOKernelAllocateWithPhysicalRestrict(size
, 0 /*maxPhys*/, alignment
, true);
486 IOBufferMemoryDescriptor
* bmd
;
487 mach_vm_address_t physicalMask
;
488 vm_offset_t alignMask
;
490 alignMask
= alignment
- 1;
491 physicalMask
= (0xFFFFFFFF ^ alignMask
);
493 bmd
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
494 kernel_task
, kIOMemoryPhysicallyContiguous
, size
, physicalMask
);
498 _IOMallocContiguousEntry
*
499 entry
= IONew(_IOMallocContiguousEntry
, 1);
505 entry
->virtualAddr
= (mach_vm_address_t
) bmd
->getBytesNoCopy();
507 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
508 queue_enter( &gIOMallocContiguousEntries
, entry
,
509 _IOMallocContiguousEntry
*, link
);
510 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
512 address
= (mach_vm_address_t
) entry
->virtualAddr
;
513 *physicalAddress
= bmd
->getPhysicalAddress();
517 return (void *) address
;
520 void IOFreeContiguous(void * _address
, vm_size_t size
)
522 _IOMallocContiguousEntry
* entry
;
523 IOMemoryDescriptor
* md
= NULL
;
525 mach_vm_address_t address
= (mach_vm_address_t
) _address
;
532 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
533 queue_iterate( &gIOMallocContiguousEntries
, entry
,
534 _IOMallocContiguousEntry
*, link
)
536 if( entry
->virtualAddr
== address
) {
538 queue_remove( &gIOMallocContiguousEntries
, entry
,
539 _IOMallocContiguousEntry
*, link
);
543 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
548 IODelete(entry
, _IOMallocContiguousEntry
, 1);
552 IOKernelFreePhysical((mach_vm_address_t
) address
, size
);
556 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
558 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
559 IOIteratePageableMapsCallback callback
, void * ref
)
561 kern_return_t kr
= kIOReturnNotReady
;
568 if (size
> kIOPageableMaxMapSize
)
569 return( kIOReturnBadArgument
);
572 index
= gIOKitPageableSpace
.hint
;
573 attempts
= gIOKitPageableSpace
.count
;
575 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
576 if( KERN_SUCCESS
== kr
) {
577 gIOKitPageableSpace
.hint
= index
;
583 index
= gIOKitPageableSpace
.count
- 1;
585 if( KERN_SUCCESS
== kr
)
588 lck_mtx_lock( gIOKitPageableSpace
.lock
);
590 index
= gIOKitPageableSpace
.count
;
591 if( index
>= (kIOMaxPageableMaps
- 1)) {
592 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
596 if( size
< kIOPageableMapSize
)
597 segSize
= kIOPageableMapSize
;
602 kr
= kmem_suballoc(kernel_map
,
608 if( KERN_SUCCESS
!= kr
) {
609 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
613 gIOKitPageableSpace
.maps
[index
].map
= map
;
614 gIOKitPageableSpace
.maps
[index
].address
= min
;
615 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
616 gIOKitPageableSpace
.hint
= index
;
617 gIOKitPageableSpace
.count
= index
+ 1;
619 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
626 struct IOMallocPageableRef
632 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
634 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
637 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
642 static void * IOMallocPageablePages(vm_size_t size
, vm_size_t alignment
)
644 kern_return_t kr
= kIOReturnNotReady
;
645 struct IOMallocPageableRef ref
;
647 if (alignment
> page_size
)
649 if (size
> kIOPageableMaxMapSize
)
653 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
654 if( kIOReturnSuccess
!= kr
)
657 return( (void *) ref
.address
);
660 vm_map_t
IOPageableMapForAddress( uintptr_t address
)
665 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
666 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
667 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
668 map
= gIOKitPageableSpace
.maps
[index
].map
;
673 panic("IOPageableMapForAddress: null");
678 static void IOFreePageablePages(void * address
, vm_size_t size
)
682 map
= IOPageableMapForAddress( (vm_address_t
) address
);
684 kmem_free( map
, (vm_offset_t
) address
, size
);
687 static uintptr_t IOMallocOnePageablePage(iopa_t
* a
)
689 return ((uintptr_t) IOMallocPageablePages(page_size
, page_size
));
692 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
696 if (size
>= (page_size
- 4*kIOPageAllocChunkBytes
)) addr
= IOMallocPageablePages(size
, alignment
);
697 else addr
= ((void * ) iopa_alloc(&gIOPageablePageAllocator
, &IOMallocOnePageablePage
, size
, alignment
));
701 debug_iomallocpageable_size
+= size
;
703 IOStatisticsAlloc(kIOStatisticsMallocPageable
, size
);
709 void IOFreePageable(void * address
, vm_size_t size
)
712 debug_iomallocpageable_size
-= size
;
714 IOStatisticsAlloc(kIOStatisticsFreePageable
, size
);
716 if (size
< (page_size
- 4*kIOPageAllocChunkBytes
))
718 address
= (void *) iopa_free(&gIOPageablePageAllocator
, (uintptr_t) address
, size
);
721 if (address
) IOFreePageablePages(address
, size
);
724 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
729 ((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex))
732 typedef char iopa_page_t_assert
[(sizeof(iopa_page_t
) <= kIOPageAllocChunkBytes
) ? 1 : -1];
735 iopa_init(iopa_t
* a
)
737 bzero(a
, sizeof(*a
));
738 a
->lock
= IOLockAlloc();
739 queue_init(&a
->list
);
743 iopa_allocinpage(iopa_page_t
* pa
, uint32_t count
, uint64_t align
)
746 uint64_t avail
= pa
->avail
;
750 // find strings of count 1 bits in avail
751 for (n
= count
; n
> 1; n
-= s
)
754 avail
= avail
& (avail
<< s
);
761 n
= __builtin_clzll(avail
);
762 pa
->avail
&= ~((-1ULL << (64 - count
)) >> n
);
763 if (!pa
->avail
&& pa
->link
.next
)
768 return (n
* kIOPageAllocChunkBytes
+ trunc_page((uintptr_t) pa
));
775 log2up(uint32_t size
)
777 if (size
<= 1) size
= 0;
778 else size
= 32 - __builtin_clz(size
- 1);
783 iopa_alloc(iopa_t
* a
, iopa_proc_t alloc
, vm_size_t bytes
, uint32_t balign
)
785 static const uint64_t align_masks
[] = {
799 if (!bytes
) bytes
= 1;
800 count
= (bytes
+ kIOPageAllocChunkBytes
- 1) / kIOPageAllocChunkBytes
;
801 align
= align_masks
[log2up((balign
+ kIOPageAllocChunkBytes
- 1) / kIOPageAllocChunkBytes
)];
804 pa
= (typeof(pa
)) queue_first(&a
->list
);
805 while (!queue_end(&a
->list
, &pa
->link
))
807 addr
= iopa_allocinpage(pa
, count
, align
);
810 a
->bytecount
+= bytes
;
813 pa
= (typeof(pa
)) queue_next(&pa
->link
);
815 IOLockUnlock(a
->lock
);
822 pa
= (typeof(pa
)) (addr
+ page_size
- kIOPageAllocChunkBytes
);
823 pa
->signature
= kIOPageAllocSignature
;
826 addr
= iopa_allocinpage(pa
, count
, align
);
828 if (pa
->avail
) enqueue_head(&a
->list
, &pa
->link
);
830 if (addr
) a
->bytecount
+= bytes
;
831 IOLockUnlock(a
->lock
);
835 assert((addr
& ((1 << log2up(balign
)) - 1)) == 0);
840 iopa_free(iopa_t
* a
, uintptr_t addr
, vm_size_t bytes
)
846 if (!bytes
) bytes
= 1;
848 chunk
= (addr
& page_mask
);
849 assert(0 == (chunk
& (kIOPageAllocChunkBytes
- 1)));
851 pa
= (typeof(pa
)) (addr
| (page_size
- kIOPageAllocChunkBytes
));
852 assert(kIOPageAllocSignature
== pa
->signature
);
854 count
= (bytes
+ kIOPageAllocChunkBytes
- 1) / kIOPageAllocChunkBytes
;
855 chunk
/= kIOPageAllocChunkBytes
;
860 assert(!pa
->link
.next
);
861 enqueue_tail(&a
->list
, &pa
->link
);
863 pa
->avail
|= ((-1ULL << (64 - count
)) >> chunk
);
864 if (pa
->avail
!= -2ULL) pa
= 0;
872 pa
= (typeof(pa
)) trunc_page(pa
);
874 a
->bytecount
-= bytes
;
875 IOLockUnlock(a
->lock
);
877 return ((uintptr_t) pa
);
880 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
882 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
883 IOByteCount length
, IOOptionBits cacheMode
)
885 IOReturn ret
= kIOReturnSuccess
;
888 if( task
!= kernel_task
)
889 return( kIOReturnUnsupported
);
890 if ((address
| length
) & PAGE_MASK
)
892 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
893 return( kIOReturnUnsupported
);
895 length
= round_page(address
+ length
) - trunc_page( address
);
896 address
= trunc_page( address
);
899 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
901 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
903 // Get the physical page number
904 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
906 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
907 ret
= IOMapPages( get_task_map(task
), address
, ptoa_64(pagenum
), page_size
, cacheMode
);
909 ret
= kIOReturnVMError
;
911 address
+= page_size
;
919 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
922 if( task
!= kernel_task
)
923 return( kIOReturnUnsupported
);
925 flush_dcache64( (addr64_t
) address
, (unsigned) length
, false );
927 return( kIOReturnSuccess
);
930 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
932 vm_offset_t
OSKernelStackRemaining( void )
934 return (ml_stack_remaining());
937 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
940 * Spin for indicated number of milliseconds.
942 void IOSleep(unsigned milliseconds
)
944 delay_for_interval(milliseconds
, kMillisecondScale
);
948 * Spin for indicated number of microseconds.
950 void IODelay(unsigned microseconds
)
952 delay_for_interval(microseconds
, kMicrosecondScale
);
956 * Spin for indicated number of nanoseconds.
958 void IOPause(unsigned nanoseconds
)
960 delay_for_interval(nanoseconds
, kNanosecondScale
);
963 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
965 static void _iolog_consputc(int ch
, void *arg __unused
)
967 cons_putc_locked(ch
);
970 static void _iolog_logputc(int ch
, void *arg __unused
)
975 void IOLog(const char *format
, ...)
979 va_start(ap
, format
);
984 void IOLogv(const char *format
, va_list ap
)
991 __doprnt(format
, ap
, _iolog_logputc
, NULL
, 16);
995 __doprnt(format
, ap2
, _iolog_consputc
, NULL
, 16);
999 void IOPanic(const char *reason
)
1001 panic("%s", reason
);
1005 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1008 * Convert a integer constant (typically a #define or enum) to a string.
1010 static char noValue
[80]; // that's pretty
1012 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
1014 for( ; regValueArray
->name
; regValueArray
++) {
1015 if(regValueArray
->value
== value
)
1016 return(regValueArray
->name
);
1018 snprintf(noValue
, sizeof(noValue
), "0x%x (UNDEFINED)", value
);
1019 return((const char *)noValue
);
1022 IOReturn
IOFindValueForName(const char *string
,
1023 const IONamedValue
*regValueArray
,
1026 for( ; regValueArray
->name
; regValueArray
++) {
1027 if(!strcmp(regValueArray
->name
, string
)) {
1028 *value
= regValueArray
->value
;
1029 return kIOReturnSuccess
;
1032 return kIOReturnBadArgument
;
1035 OSString
* IOCopyLogNameForPID(int pid
)
1039 snprintf(buf
, sizeof(buf
), "pid %d, ", pid
);
1041 proc_name(pid
, buf
+ len
, sizeof(buf
) - len
);
1042 return (OSString::withCString(buf
));
1045 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1047 IOAlignment
IOSizeToAlignment(unsigned int size
)
1050 const int intsize
= sizeof(unsigned int) * 8;
1052 for (shift
= 1; shift
< intsize
; shift
++) {
1053 if (size
& 0x80000000)
1054 return (IOAlignment
)(intsize
- shift
);
1060 unsigned int IOAlignmentToSize(IOAlignment align
)
1064 for (size
= 1; align
; align
--) {