2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern.h>
40 #include <libkern/c++/OSCPPDebug.h>
42 #include <IOKit/assert.h>
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOLib.h>
46 #include <IOKit/IOLocks.h>
47 #include <IOKit/IOMapper.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOKitDebug.h>
51 #include "IOKitKernelInternal.h"
54 #include <libkern/OSDebug.h>
55 #include <sys/sysctl.h>
58 #include "libkern/OSAtomic.h"
59 #include <libkern/c++/OSKext.h>
60 #include <IOKit/IOStatisticsPrivate.h>
61 #include <sys/msgbuf.h>
65 #define IOStatisticsAlloc(type, size) \
67 IOStatistics::countAlloc(type, size); \
72 #define IOStatisticsAlloc(type, size)
74 #endif /* IOKITSTATS */
80 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
82 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
88 void (*putc
)(int, void *),
92 extern void cons_putc_locked(char);
93 extern void bsd_log_lock(void);
94 extern void bsd_log_unlock(void);
97 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
99 lck_grp_t
*IOLockGroup
;
102 * Global variables for use by iLogger
103 * These symbols are for use only by Apple diagnostic code.
104 * Binary compatibility is not guaranteed for kexts that reference these symbols.
107 void *_giDebugLogInternal
= NULL
;
108 void *_giDebugLogDataInternal
= NULL
;
109 void *_giDebugReserved1
= NULL
;
110 void *_giDebugReserved2
= NULL
;
114 * Static variables for this module.
117 static queue_head_t gIOMallocContiguousEntries
;
118 static lck_mtx_t
* gIOMallocContiguousEntriesLock
;
120 enum { kIOMaxPageableMaps
= 16 };
121 enum { kIOPageableMapSize
= 96 * 1024 * 1024 };
122 enum { kIOPageableMaxMapSize
= 96 * 1024 * 1024 };
133 IOMapData maps
[ kIOMaxPageableMaps
];
135 } gIOKitPageableSpace
;
137 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
143 static bool libInitialized
;
148 gIOKitPageableSpace
.maps
[0].address
= 0;
149 ret
= kmem_suballoc(kernel_map
,
150 &gIOKitPageableSpace
.maps
[0].address
,
154 &gIOKitPageableSpace
.maps
[0].map
);
155 if (ret
!= KERN_SUCCESS
)
156 panic("failed to allocate iokit pageable map\n");
158 IOLockGroup
= lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL
);
160 gIOKitPageableSpace
.lock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
161 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
162 gIOKitPageableSpace
.hint
= 0;
163 gIOKitPageableSpace
.count
= 1;
165 gIOMallocContiguousEntriesLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
166 queue_init( &gIOMallocContiguousEntries
);
168 libInitialized
= true;
171 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
173 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
175 kern_return_t result
;
178 result
= kernel_thread_start((thread_continue_t
)fcn
, arg
, &thread
);
179 if (result
!= KERN_SUCCESS
)
182 thread_deallocate(thread
);
188 void IOExitThread(void)
190 (void) thread_terminate(current_thread());
193 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
196 void * IOMalloc(vm_size_t size
)
200 address
= (void *)kalloc(size
);
203 debug_iomalloc_size
+= size
;
205 IOStatisticsAlloc(kIOStatisticsMalloc
, size
);
211 void IOFree(void * address
, vm_size_t size
)
214 kfree(address
, size
);
216 debug_iomalloc_size
-= size
;
218 IOStatisticsAlloc(kIOStatisticsFree
, size
);
222 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
224 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
228 vm_offset_t allocationAddress
;
229 vm_size_t adjustedSize
;
237 alignMask
= alignment
- 1;
238 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
240 if (adjustedSize
>= page_size
) {
242 kr
= kernel_memory_allocate(kernel_map
, &address
,
244 if (KERN_SUCCESS
!= kr
)
249 adjustedSize
+= alignMask
;
251 if (adjustedSize
>= page_size
) {
253 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
255 if (KERN_SUCCESS
!= kr
)
256 allocationAddress
= 0;
259 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
261 if (allocationAddress
) {
262 address
= (allocationAddress
+ alignMask
263 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
266 *((vm_size_t
*)(address
- sizeof(vm_size_t
) - sizeof(vm_address_t
)))
268 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
274 assert(0 == (address
& alignMask
));
278 debug_iomalloc_size
+= size
;
280 IOStatisticsAlloc(kIOStatisticsMallocAligned
, size
);
283 return (void *) address
;
286 void IOFreeAligned(void * address
, vm_size_t size
)
288 vm_address_t allocationAddress
;
289 vm_size_t adjustedSize
;
296 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
297 if (adjustedSize
>= page_size
) {
299 kmem_free( kernel_map
, (vm_offset_t
) address
, size
);
302 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
303 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
304 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
305 - sizeof(vm_address_t
) ));
307 if (adjustedSize
>= page_size
)
308 kmem_free( kernel_map
, allocationAddress
, adjustedSize
);
310 kfree((void *)allocationAddress
, adjustedSize
);
314 debug_iomalloc_size
-= size
;
317 IOStatisticsAlloc(kIOStatisticsFreeAligned
, size
);
320 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
323 IOKernelFreePhysical(mach_vm_address_t address
, mach_vm_size_t size
)
325 mach_vm_address_t allocationAddress
;
326 mach_vm_size_t adjustedSize
;
333 adjustedSize
= (2 * size
) + sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
);
334 if (adjustedSize
>= page_size
) {
336 kmem_free( kernel_map
, (vm_offset_t
) address
, size
);
340 adjustedSize
= *((mach_vm_size_t
*)
341 (address
- sizeof(mach_vm_address_t
) - sizeof(mach_vm_size_t
)));
342 allocationAddress
= *((mach_vm_address_t
*)
343 (address
- sizeof(mach_vm_address_t
) ));
344 kfree((void *)allocationAddress
, adjustedSize
);
347 IOStatisticsAlloc(kIOStatisticsFreeContiguous
, size
);
349 debug_iomalloc_size
-= size
;
354 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size
, mach_vm_address_t maxPhys
,
355 mach_vm_size_t alignment
, bool contiguous
)
358 mach_vm_address_t address
;
359 mach_vm_address_t allocationAddress
;
360 mach_vm_size_t adjustedSize
;
361 mach_vm_address_t alignMask
;
368 alignMask
= alignment
- 1;
369 adjustedSize
= (2 * size
) + sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
);
371 contiguous
= (contiguous
&& (adjustedSize
> page_size
))
372 || (alignment
> page_size
);
374 if (contiguous
|| maxPhys
)
380 contiguous
= (contiguous
&& (adjustedSize
> page_size
))
381 || (alignment
> page_size
);
385 if (maxPhys
<= 0xFFFFFFFF)
388 options
|= KMA_LOMEM
;
390 else if (gIOLastPage
&& (atop_64(maxPhys
) > gIOLastPage
))
395 if (contiguous
|| maxPhys
)
397 kr
= kmem_alloc_contig(kernel_map
, &virt
, size
,
398 alignMask
, atop(maxPhys
), atop(alignMask
), 0);
402 kr
= kernel_memory_allocate(kernel_map
, &virt
,
403 size
, alignMask
, options
);
405 if (KERN_SUCCESS
== kr
)
412 adjustedSize
+= alignMask
;
413 allocationAddress
= (mach_vm_address_t
) kalloc(adjustedSize
);
415 if (allocationAddress
) {
417 address
= (allocationAddress
+ alignMask
418 + (sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
)))
421 if (atop_32(address
) != atop_32(address
+ size
- 1))
422 address
= round_page(address
);
424 *((mach_vm_size_t
*)(address
- sizeof(mach_vm_size_t
)
425 - sizeof(mach_vm_address_t
))) = adjustedSize
;
426 *((mach_vm_address_t
*)(address
- sizeof(mach_vm_address_t
)))
433 IOStatisticsAlloc(kIOStatisticsMallocContiguous
, size
);
435 debug_iomalloc_size
+= size
;
443 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
445 struct _IOMallocContiguousEntry
447 mach_vm_address_t virtualAddr
;
448 IOBufferMemoryDescriptor
* md
;
451 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
453 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
454 IOPhysicalAddress
* physicalAddress
)
456 mach_vm_address_t address
= 0;
463 /* Do we want a physical address? */
464 if (!physicalAddress
)
466 address
= IOKernelAllocateWithPhysicalRestrict(size
, 0 /*maxPhys*/, alignment
, true);
470 IOBufferMemoryDescriptor
* bmd
;
471 mach_vm_address_t physicalMask
;
472 vm_offset_t alignMask
;
474 alignMask
= alignment
- 1;
475 physicalMask
= (0xFFFFFFFF ^ alignMask
);
477 bmd
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
478 kernel_task
, kIOMemoryPhysicallyContiguous
, size
, physicalMask
);
482 _IOMallocContiguousEntry
*
483 entry
= IONew(_IOMallocContiguousEntry
, 1);
489 entry
->virtualAddr
= (mach_vm_address_t
) bmd
->getBytesNoCopy();
491 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
492 queue_enter( &gIOMallocContiguousEntries
, entry
,
493 _IOMallocContiguousEntry
*, link
);
494 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
496 address
= (mach_vm_address_t
) entry
->virtualAddr
;
497 *physicalAddress
= bmd
->getPhysicalAddress();
501 return (void *) address
;
504 void IOFreeContiguous(void * _address
, vm_size_t size
)
506 _IOMallocContiguousEntry
* entry
;
507 IOMemoryDescriptor
* md
= NULL
;
509 mach_vm_address_t address
= (mach_vm_address_t
) _address
;
516 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
517 queue_iterate( &gIOMallocContiguousEntries
, entry
,
518 _IOMallocContiguousEntry
*, link
)
520 if( entry
->virtualAddr
== address
) {
522 queue_remove( &gIOMallocContiguousEntries
, entry
,
523 _IOMallocContiguousEntry
*, link
);
527 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
532 IODelete(entry
, _IOMallocContiguousEntry
, 1);
536 IOKernelFreePhysical((mach_vm_address_t
) address
, size
);
540 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
542 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
543 IOIteratePageableMapsCallback callback
, void * ref
)
545 kern_return_t kr
= kIOReturnNotReady
;
552 if (size
> kIOPageableMaxMapSize
)
553 return( kIOReturnBadArgument
);
556 index
= gIOKitPageableSpace
.hint
;
557 attempts
= gIOKitPageableSpace
.count
;
559 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
560 if( KERN_SUCCESS
== kr
) {
561 gIOKitPageableSpace
.hint
= index
;
567 index
= gIOKitPageableSpace
.count
- 1;
569 if( KERN_SUCCESS
== kr
)
572 lck_mtx_lock( gIOKitPageableSpace
.lock
);
574 index
= gIOKitPageableSpace
.count
;
575 if( index
>= (kIOMaxPageableMaps
- 1)) {
576 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
580 if( size
< kIOPageableMapSize
)
581 segSize
= kIOPageableMapSize
;
586 kr
= kmem_suballoc(kernel_map
,
592 if( KERN_SUCCESS
!= kr
) {
593 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
597 gIOKitPageableSpace
.maps
[index
].map
= map
;
598 gIOKitPageableSpace
.maps
[index
].address
= min
;
599 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
600 gIOKitPageableSpace
.hint
= index
;
601 gIOKitPageableSpace
.count
= index
+ 1;
603 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
610 struct IOMallocPageableRef
616 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
618 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
621 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
626 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
628 kern_return_t kr
= kIOReturnNotReady
;
629 struct IOMallocPageableRef ref
;
631 if (alignment
> page_size
)
633 if (size
> kIOPageableMaxMapSize
)
637 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
638 if( kIOReturnSuccess
!= kr
)
643 debug_iomallocpageable_size
+= round_page(size
);
645 IOStatisticsAlloc(kIOStatisticsMallocPageable
, size
);
648 return( (void *) ref
.address
);
651 vm_map_t
IOPageableMapForAddress( uintptr_t address
)
656 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
657 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
658 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
659 map
= gIOKitPageableSpace
.maps
[index
].map
;
664 panic("IOPageableMapForAddress: null");
669 void IOFreePageable(void * address
, vm_size_t size
)
673 map
= IOPageableMapForAddress( (vm_address_t
) address
);
675 kmem_free( map
, (vm_offset_t
) address
, size
);
678 debug_iomallocpageable_size
-= round_page(size
);
681 IOStatisticsAlloc(kIOStatisticsFreePageable
, size
);
684 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
686 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
687 IOByteCount length
, IOOptionBits cacheMode
)
689 IOReturn ret
= kIOReturnSuccess
;
692 if( task
!= kernel_task
)
693 return( kIOReturnUnsupported
);
694 if ((address
| length
) & PAGE_MASK
)
696 // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
697 return( kIOReturnUnsupported
);
699 length
= round_page(address
+ length
) - trunc_page( address
);
700 address
= trunc_page( address
);
703 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
705 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
707 // Get the physical page number
708 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
710 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
711 ret
= IOMapPages( get_task_map(task
), address
, ptoa_64(pagenum
), page_size
, cacheMode
);
713 ret
= kIOReturnVMError
;
715 address
+= page_size
;
723 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
726 if( task
!= kernel_task
)
727 return( kIOReturnUnsupported
);
729 flush_dcache64( (addr64_t
) address
, (unsigned) length
, false );
731 return( kIOReturnSuccess
);
734 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
736 vm_offset_t
OSKernelStackRemaining( void )
738 return (ml_stack_remaining());
741 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
744 * Spin for indicated number of milliseconds.
746 void IOSleep(unsigned milliseconds
)
748 delay_for_interval(milliseconds
, kMillisecondScale
);
752 * Spin for indicated number of microseconds.
754 void IODelay(unsigned microseconds
)
756 delay_for_interval(microseconds
, kMicrosecondScale
);
760 * Spin for indicated number of nanoseconds.
762 void IOPause(unsigned nanoseconds
)
764 delay_for_interval(nanoseconds
, kNanosecondScale
);
767 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
769 static void _iolog_consputc(int ch
, void *arg __unused
)
771 cons_putc_locked(ch
);
774 static void _iolog_logputc(int ch
, void *arg __unused
)
779 void IOLog(const char *format
, ...)
783 va_start(ap
, format
);
788 void IOLogv(const char *format
, va_list ap
)
795 __doprnt(format
, ap
, _iolog_logputc
, NULL
, 16);
798 __doprnt(format
, ap2
, _iolog_consputc
, NULL
, 16);
802 void IOPanic(const char *reason
)
808 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
811 * Convert a integer constant (typically a #define or enum) to a string.
813 static char noValue
[80]; // that's pretty
815 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
817 for( ; regValueArray
->name
; regValueArray
++) {
818 if(regValueArray
->value
== value
)
819 return(regValueArray
->name
);
821 snprintf(noValue
, sizeof(noValue
), "0x%x (UNDEFINED)", value
);
822 return((const char *)noValue
);
825 IOReturn
IOFindValueForName(const char *string
,
826 const IONamedValue
*regValueArray
,
829 for( ; regValueArray
->name
; regValueArray
++) {
830 if(!strcmp(regValueArray
->name
, string
)) {
831 *value
= regValueArray
->value
;
832 return kIOReturnSuccess
;
835 return kIOReturnBadArgument
;
838 OSString
* IOCopyLogNameForPID(int pid
)
842 snprintf(buf
, sizeof(buf
), "pid %d, ", pid
);
844 proc_name(pid
, buf
+ len
, sizeof(buf
) - len
);
845 return (OSString::withCString(buf
));
848 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
850 IOAlignment
IOSizeToAlignment(unsigned int size
)
853 const int intsize
= sizeof(unsigned int) * 8;
855 for (shift
= 1; shift
< intsize
; shift
++) {
856 if (size
& 0x80000000)
857 return (IOAlignment
)(intsize
- shift
);
863 unsigned int IOAlignmentToSize(IOAlignment align
)
867 for (size
= 1; align
; align
--) {