2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
33 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
38 #include <IOKit/system.h>
39 #include <mach/sync_policy.h>
40 #include <machine/machine_routines.h>
41 #include <libkern/c++/OSCPPDebug.h>
43 #include <IOKit/assert.h>
45 #include <IOKit/IOReturn.h>
46 #include <IOKit/IOLib.h>
47 #include <IOKit/IOLocks.h>
48 #include <IOKit/IOMapper.h>
49 #include <IOKit/IOBufferMemoryDescriptor.h>
50 #include <IOKit/IOKitDebug.h>
52 #include "IOKitKernelInternal.h"
58 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
60 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
62 extern kern_return_t
kmem_suballoc(
70 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
72 lck_grp_t
*IOLockGroup
;
75 * Global variables for use by iLogger
76 * These symbols are for use only by Apple diagnostic code.
77 * Binary compatibility is not guaranteed for kexts that reference these symbols.
80 void *_giDebugLogInternal
= NULL
;
81 void *_giDebugLogDataInternal
= NULL
;
82 void *_giDebugReserved1
= NULL
;
83 void *_giDebugReserved2
= NULL
;
87 * Static variables for this module.
90 static queue_head_t gIOMallocContiguousEntries
;
91 static lck_mtx_t
* gIOMallocContiguousEntriesLock
;
93 enum { kIOMaxPageableMaps
= 16 };
94 enum { kIOPageableMapSize
= 96 * 1024 * 1024 };
95 enum { kIOPageableMaxMapSize
= 96 * 1024 * 1024 };
97 /* LP64todo - these need to expand */
107 IOMapData maps
[ kIOMaxPageableMaps
];
109 } gIOKitPageableSpace
;
111 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
117 static bool libInitialized
;
122 gIOKitPageableSpace
.maps
[0].address
= 0;
123 ret
= kmem_suballoc(kernel_map
,
124 &gIOKitPageableSpace
.maps
[0].address
,
128 &gIOKitPageableSpace
.maps
[0].map
);
129 if (ret
!= KERN_SUCCESS
)
130 panic("failed to allocate iokit pageable map\n");
132 IOLockGroup
= lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL
);
134 gIOKitPageableSpace
.lock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
135 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
136 gIOKitPageableSpace
.hint
= 0;
137 gIOKitPageableSpace
.count
= 1;
139 gIOMallocContiguousEntriesLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
140 queue_init( &gIOMallocContiguousEntries
);
142 libInitialized
= true;
145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
147 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
149 kern_return_t result
;
152 result
= kernel_thread_start((thread_continue_t
)fcn
, arg
, &thread
);
153 if (result
!= KERN_SUCCESS
)
156 thread_deallocate(thread
);
162 void IOExitThread(void)
164 (void) thread_terminate(current_thread());
167 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
170 void * IOMalloc(vm_size_t size
)
174 address
= (void *)kalloc(size
);
177 debug_iomalloc_size
+= size
;
182 void IOFree(void * address
, vm_size_t size
)
185 kfree(address
, size
);
187 debug_iomalloc_size
-= size
;
192 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
194 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
197 vm_address_t address
;
198 vm_address_t allocationAddress
;
199 vm_size_t adjustedSize
;
200 vm_offset_t alignMask
;
207 alignMask
= alignment
- 1;
208 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
210 if (adjustedSize
>= page_size
) {
212 kr
= kernel_memory_allocate(kernel_map
, &address
,
214 if (KERN_SUCCESS
!= kr
)
219 adjustedSize
+= alignMask
;
221 if (adjustedSize
>= page_size
) {
223 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
225 if (KERN_SUCCESS
!= kr
)
226 allocationAddress
= 0;
229 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
231 if (allocationAddress
) {
232 address
= (allocationAddress
+ alignMask
233 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
236 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
237 - sizeof(vm_address_t
))) = adjustedSize
;
238 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
244 assert(0 == (address
& alignMask
));
248 debug_iomalloc_size
+= size
;
251 return (void *) address
;
254 void IOFreeAligned(void * address
, vm_size_t size
)
256 vm_address_t allocationAddress
;
257 vm_size_t adjustedSize
;
264 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
265 if (adjustedSize
>= page_size
) {
267 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
270 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
271 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
272 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
273 - sizeof(vm_address_t
) ));
275 if (adjustedSize
>= page_size
)
276 kmem_free( kernel_map
, allocationAddress
, adjustedSize
);
278 kfree((void *)allocationAddress
, adjustedSize
);
282 debug_iomalloc_size
-= size
;
286 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
289 IOKernelFreeContiguous(mach_vm_address_t address
, mach_vm_size_t size
)
291 mach_vm_address_t allocationAddress
;
292 mach_vm_size_t adjustedSize
;
299 adjustedSize
= (2 * size
) + sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
);
300 if (adjustedSize
>= page_size
) {
302 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
306 adjustedSize
= *((mach_vm_size_t
*)
307 (address
- sizeof(mach_vm_address_t
) - sizeof(mach_vm_size_t
)));
308 allocationAddress
= *((mach_vm_address_t
*)
309 (address
- sizeof(mach_vm_address_t
) ));
310 kfree((void *)allocationAddress
, adjustedSize
);
314 debug_iomalloc_size
-= size
;
319 IOKernelAllocateContiguous(mach_vm_size_t size
, mach_vm_size_t alignment
)
322 mach_vm_address_t address
;
323 mach_vm_address_t allocationAddress
;
324 mach_vm_size_t adjustedSize
;
325 mach_vm_address_t alignMask
;
332 alignMask
= alignment
- 1;
333 adjustedSize
= (2 * size
) + sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
);
335 if (adjustedSize
>= page_size
)
339 if (adjustedSize
> page_size
)
341 kr
= kmem_alloc_contig(kernel_map
, &virt
, size
,
346 kr
= kernel_memory_allocate(kernel_map
, &virt
,
349 if (KERN_SUCCESS
== kr
)
356 adjustedSize
+= alignMask
;
357 allocationAddress
= (mach_vm_address_t
) kalloc(adjustedSize
);
359 if (allocationAddress
) {
361 address
= (allocationAddress
+ alignMask
362 + (sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
)))
365 if (atop_32(address
) != atop_32(address
+ size
- 1))
366 address
= round_page_32(address
);
368 *((mach_vm_size_t
*)(address
- sizeof(mach_vm_size_t
)
369 - sizeof(mach_vm_address_t
))) = adjustedSize
;
370 *((mach_vm_address_t
*)(address
- sizeof(mach_vm_address_t
)))
378 debug_iomalloc_size
+= size
;
384 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
386 struct _IOMallocContiguousEntry
388 mach_vm_address_t virtualAddr
;
389 IOBufferMemoryDescriptor
* md
;
392 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
394 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
395 IOPhysicalAddress
* physicalAddress
)
397 mach_vm_address_t address
= 0;
404 /* Do we want a physical address? */
405 if (!physicalAddress
)
407 address
= IOKernelAllocateContiguous(size
, alignment
);
411 IOBufferMemoryDescriptor
* bmd
;
412 mach_vm_address_t physicalMask
;
413 vm_offset_t alignMask
;
415 alignMask
= alignment
- 1;
416 physicalMask
= 0xFFFFFFFF ^ (alignMask
& PAGE_MASK
);
417 bmd
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
418 kernel_task
, kIOMemoryPhysicallyContiguous
, size
, physicalMask
);
422 _IOMallocContiguousEntry
*
423 entry
= IONew(_IOMallocContiguousEntry
, 1);
429 entry
->virtualAddr
= (mach_vm_address_t
) bmd
->getBytesNoCopy();
431 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
432 queue_enter( &gIOMallocContiguousEntries
, entry
,
433 _IOMallocContiguousEntry
*, link
);
434 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
436 address
= (mach_vm_address_t
) entry
->virtualAddr
;
437 *physicalAddress
= bmd
->getPhysicalAddress();
441 return (void *) address
;
444 void IOFreeContiguous(void * _address
, vm_size_t size
)
446 _IOMallocContiguousEntry
* entry
;
447 IOMemoryDescriptor
* md
= NULL
;
449 mach_vm_address_t address
= (mach_vm_address_t
) _address
;
456 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
457 queue_iterate( &gIOMallocContiguousEntries
, entry
,
458 _IOMallocContiguousEntry
*, link
)
460 if( entry
->virtualAddr
== address
) {
462 queue_remove( &gIOMallocContiguousEntries
, entry
,
463 _IOMallocContiguousEntry
*, link
);
467 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
472 IODelete(entry
, _IOMallocContiguousEntry
, 1);
476 IOKernelFreeContiguous((mach_vm_address_t
) address
, size
);
480 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
482 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
483 IOIteratePageableMapsCallback callback
, void * ref
)
485 kern_return_t kr
= kIOReturnNotReady
;
492 if (size
> kIOPageableMaxMapSize
)
493 return( kIOReturnBadArgument
);
496 index
= gIOKitPageableSpace
.hint
;
497 attempts
= gIOKitPageableSpace
.count
;
499 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
500 if( KERN_SUCCESS
== kr
) {
501 gIOKitPageableSpace
.hint
= index
;
507 index
= gIOKitPageableSpace
.count
- 1;
509 if( KERN_SUCCESS
== kr
)
512 lck_mtx_lock( gIOKitPageableSpace
.lock
);
514 index
= gIOKitPageableSpace
.count
;
515 if( index
>= (kIOMaxPageableMaps
- 1)) {
516 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
520 if( size
< kIOPageableMapSize
)
521 segSize
= kIOPageableMapSize
;
526 kr
= kmem_suballoc(kernel_map
,
532 if( KERN_SUCCESS
!= kr
) {
533 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
537 gIOKitPageableSpace
.maps
[index
].map
= map
;
538 gIOKitPageableSpace
.maps
[index
].address
= min
;
539 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
540 gIOKitPageableSpace
.hint
= index
;
541 gIOKitPageableSpace
.count
= index
+ 1;
543 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
550 struct IOMallocPageableRef
552 vm_address_t address
;
556 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
558 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
561 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
566 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
568 kern_return_t kr
= kIOReturnNotReady
;
569 struct IOMallocPageableRef ref
;
571 if (alignment
> page_size
)
573 if (size
> kIOPageableMaxMapSize
)
577 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
578 if( kIOReturnSuccess
!= kr
)
583 debug_iomallocpageable_size
+= round_page_32(size
);
586 return( (void *) ref
.address
);
589 vm_map_t
IOPageableMapForAddress( vm_address_t address
)
594 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
595 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
596 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
597 map
= gIOKitPageableSpace
.maps
[index
].map
;
602 IOPanic("IOPageableMapForAddress: null");
607 void IOFreePageable(void * address
, vm_size_t size
)
611 map
= IOPageableMapForAddress( (vm_address_t
) address
);
613 kmem_free( map
, (vm_offset_t
) address
, size
);
616 debug_iomallocpageable_size
-= round_page_32(size
);
620 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
622 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
623 IOByteCount length
, IOOptionBits cacheMode
)
625 IOReturn ret
= kIOReturnSuccess
;
628 if( task
!= kernel_task
)
629 return( kIOReturnUnsupported
);
631 length
= round_page_32(address
+ length
) - trunc_page_32( address
);
632 address
= trunc_page_32( address
);
635 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
637 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
639 // Get the physical page number
640 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
642 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
643 ret
= IOMapPages( get_task_map(task
), address
, ptoa_64(pagenum
), page_size
, cacheMode
);
645 ret
= kIOReturnVMError
;
647 address
+= page_size
;
655 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
658 if( task
!= kernel_task
)
659 return( kIOReturnUnsupported
);
662 flush_dcache64( (addr64_t
) address
, (unsigned) length
, false );
665 return( kIOReturnSuccess
);
668 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
670 SInt32
OSKernelStackRemaining( void )
674 stack
= (((SInt32
) &stack
) & (KERNEL_STACK_SIZE
- 1));
679 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
681 void IOSleep(unsigned milliseconds
)
683 delay_for_interval(milliseconds
, kMillisecondScale
);
687 * Spin for indicated number of microseconds.
689 void IODelay(unsigned microseconds
)
691 delay_for_interval(microseconds
, kMicrosecondScale
);
694 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
696 void IOLog(const char *format
, ...)
699 extern void conslog_putc(char);
700 extern void logwakeup(void);
702 va_start(ap
, format
);
703 _doprnt(format
, &ap
, conslog_putc
, 16);
707 void IOPanic(const char *reason
)
712 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
715 * Convert a integer constant (typically a #define or enum) to a string.
717 static char noValue
[80]; // that's pretty
719 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
721 for( ; regValueArray
->name
; regValueArray
++) {
722 if(regValueArray
->value
== value
)
723 return(regValueArray
->name
);
725 sprintf(noValue
, "0x%x (UNDEFINED)", value
);
726 return((const char *)noValue
);
729 IOReturn
IOFindValueForName(const char *string
,
730 const IONamedValue
*regValueArray
,
733 for( ; regValueArray
->name
; regValueArray
++) {
734 if(!strcmp(regValueArray
->name
, string
)) {
735 *value
= regValueArray
->value
;
736 return kIOReturnSuccess
;
739 return kIOReturnBadArgument
;
742 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
744 IOAlignment
IOSizeToAlignment(unsigned int size
)
747 const int intsize
= sizeof(unsigned int) * 8;
749 for (shift
= 1; shift
< intsize
; shift
++) {
750 if (size
& 0x80000000)
751 return (IOAlignment
)(intsize
- shift
);
757 unsigned int IOAlignmentToSize(IOAlignment align
)
761 for (size
= 1; align
; align
--) {