2 * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
28 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
33 #include <IOKit/system.h>
34 #include <mach/sync_policy.h>
35 #include <machine/machine_routines.h>
36 #include <libkern/c++/OSCPPDebug.h>
38 #include <IOKit/assert.h>
40 #include <IOKit/IOReturn.h>
41 #include <IOKit/IOLib.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IOKitDebug.h>
45 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
47 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
52 * Global variables for use by iLogger
53 * These symbols are for use only by Apple diagnostic code.
54 * Binary compatibility is not guaranteed for kexts that reference these symbols.
57 void *_giDebugLogInternal
= NULL
;
58 void *_giDebugLogDataInternal
= NULL
;
59 void *_giDebugReserved1
= NULL
;
60 void *_giDebugReserved2
= NULL
;
64 * Static variables for this module.
67 static IOThreadFunc threadArgFcn
;
68 static void * threadArgArg
;
69 static lock_t
* threadArgLock
;
71 static queue_head_t gIOMallocContiguousEntries
;
72 static mutex_t
* gIOMallocContiguousEntriesLock
;
74 enum { kIOMaxPageableMaps
= 16 };
75 enum { kIOPageableMapSize
= 16 * 1024 * 1024 };
76 enum { kIOPageableMaxMapSize
= 64 * 1024 * 1024 };
87 IOMapData maps
[ kIOMaxPageableMaps
];
89 } gIOKitPageableSpace
;
91 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
97 static bool libInitialized
;
102 threadArgLock
= lock_alloc( true, NULL
, NULL
);
104 gIOKitPageableSpace
.maps
[0].address
= 0;
105 ret
= kmem_suballoc(kernel_map
,
106 &gIOKitPageableSpace
.maps
[0].address
,
110 &gIOKitPageableSpace
.maps
[0].map
);
111 if (ret
!= KERN_SUCCESS
)
112 panic("failed to allocate iokit pageable map\n");
114 gIOKitPageableSpace
.lock
= mutex_alloc( 0 );
115 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
116 gIOKitPageableSpace
.hint
= 0;
117 gIOKitPageableSpace
.count
= 1;
119 gIOMallocContiguousEntriesLock
= mutex_alloc( 0 );
120 queue_init( &gIOMallocContiguousEntries
);
122 libInitialized
= true;
125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
128 * We pass an argument to a new thread by saving fcn and arg in some
129 * locked variables and starting the thread at ioThreadStart(). This
130 * function retrives fcn and arg and makes the appropriate call.
134 static void ioThreadStart( void )
141 lock_done( threadArgLock
);
148 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
152 lock_write( threadArgLock
);
156 thread
= kernel_thread( kernel_task
, ioThreadStart
);
162 volatile void IOExitThread()
164 (void) thread_terminate(current_act());
167 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
170 void * IOMalloc(vm_size_t size
)
174 address
= (void *)kalloc(size
);
177 debug_iomalloc_size
+= size
;
182 void IOFree(void * address
, vm_size_t size
)
185 kfree((vm_offset_t
)address
, size
);
187 debug_iomalloc_size
-= size
;
192 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
194 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
197 vm_address_t address
;
198 vm_address_t allocationAddress
;
199 vm_size_t adjustedSize
;
200 vm_offset_t alignMask
;
207 alignMask
= alignment
- 1;
208 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
210 if (adjustedSize
>= page_size
) {
212 kr
= kernel_memory_allocate(kernel_map
, &address
,
214 if (KERN_SUCCESS
!= kr
)
219 adjustedSize
+= alignMask
;
221 if (adjustedSize
>= page_size
) {
223 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
225 if (KERN_SUCCESS
!= kr
)
226 allocationAddress
= 0;
229 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
231 if (allocationAddress
) {
232 address
= (allocationAddress
+ alignMask
233 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
236 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
237 - sizeof(vm_address_t
))) = adjustedSize
;
238 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
244 assert(0 == (address
& alignMask
));
248 debug_iomalloc_size
+= size
;
251 return (void *) address
;
254 void IOFreeAligned(void * address
, vm_size_t size
)
256 vm_address_t allocationAddress
;
257 vm_size_t adjustedSize
;
264 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
265 if (adjustedSize
>= page_size
) {
267 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
270 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
271 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
272 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
273 - sizeof(vm_address_t
) ));
275 if (adjustedSize
>= page_size
)
276 kmem_free( kernel_map
, (vm_address_t
) allocationAddress
, adjustedSize
);
278 kfree((vm_offset_t
) allocationAddress
, adjustedSize
);
282 debug_iomalloc_size
-= size
;
286 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
288 struct _IOMallocContiguousEntry
294 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
296 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
297 IOPhysicalAddress
* physicalAddress
)
300 vm_address_t address
;
301 vm_address_t allocationAddress
;
302 vm_size_t adjustedSize
;
303 vm_offset_t alignMask
;
311 alignMask
= alignment
- 1;
312 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
314 if (adjustedSize
>= page_size
)
317 if (adjustedSize
> page_size
)
319 kr
= kmem_alloc_contig(kernel_map
, &address
, size
,
324 kr
= kernel_memory_allocate(kernel_map
, &address
,
327 if (KERN_SUCCESS
!= kr
)
332 adjustedSize
+= alignMask
;
333 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
335 if (allocationAddress
) {
337 address
= (allocationAddress
+ alignMask
338 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
341 if (atop_32(address
) != atop_32(address
+ size
- 1))
342 address
= round_page_32(address
);
344 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
345 - sizeof(vm_address_t
))) = adjustedSize
;
346 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
352 /* Do we want a physical address? */
353 if (address
&& physicalAddress
)
357 /* Get the physical page */
358 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
) address
);
364 base
= IOMapperIOVMAlloc((size
+ PAGE_MASK
) >> PAGE_SHIFT
);
367 _IOMallocContiguousEntry
*
368 entry
= IONew(_IOMallocContiguousEntry
, 1);
371 IOFreeContiguous((void *) address
, size
);
375 entry
->virtual = (void *) address
;
376 entry
->ioBase
= base
;
377 mutex_lock(gIOMallocContiguousEntriesLock
);
378 queue_enter( &gIOMallocContiguousEntries
, entry
,
379 _IOMallocContiguousEntry
*, link
);
380 mutex_unlock(gIOMallocContiguousEntriesLock
);
382 *physicalAddress
= (IOPhysicalAddress
)((base
<< PAGE_SHIFT
) | (address
& PAGE_MASK
));
383 for (offset
= 0; offset
< ((size
+ PAGE_MASK
) >> PAGE_SHIFT
); offset
++, pagenum
++)
384 IOMapperInsertPage( base
, offset
, pagenum
);
387 *physicalAddress
= (IOPhysicalAddress
)((pagenum
<< PAGE_SHIFT
) | (address
& PAGE_MASK
));
390 /* Did not find, return 0 */
391 *physicalAddress
= (IOPhysicalAddress
) 0;
396 assert(0 == (address
& alignMask
));
400 debug_iomalloc_size
+= size
;
403 return (void *) address
;
406 void IOFreeContiguous(void * address
, vm_size_t size
)
408 vm_address_t allocationAddress
;
409 vm_size_t adjustedSize
;
410 _IOMallocContiguousEntry
* entry
;
418 mutex_lock(gIOMallocContiguousEntriesLock
);
419 queue_iterate( &gIOMallocContiguousEntries
, entry
,
420 _IOMallocContiguousEntry
*, link
)
422 if( entry
->virtual == address
) {
423 base
= entry
->ioBase
;
424 queue_remove( &gIOMallocContiguousEntries
, entry
,
425 _IOMallocContiguousEntry
*, link
);
429 mutex_unlock(gIOMallocContiguousEntriesLock
);
433 IOMapperIOVMFree(base
, (size
+ PAGE_MASK
) >> PAGE_SHIFT
);
434 IODelete(entry
, _IOMallocContiguousEntry
, 1);
437 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
438 if (adjustedSize
>= page_size
) {
440 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
443 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
444 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
445 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
446 - sizeof(vm_address_t
) ));
448 kfree((vm_offset_t
) allocationAddress
, adjustedSize
);
452 debug_iomalloc_size
-= size
;
456 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
458 typedef kern_return_t (*IOIteratePageableMapsCallback
)(vm_map_t map
, void * ref
);
460 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
461 IOIteratePageableMapsCallback callback
, void * ref
)
463 kern_return_t kr
= kIOReturnNotReady
;
470 if (size
> kIOPageableMaxMapSize
)
471 return( kIOReturnBadArgument
);
474 index
= gIOKitPageableSpace
.hint
;
475 attempts
= gIOKitPageableSpace
.count
;
477 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
478 if( KERN_SUCCESS
== kr
) {
479 gIOKitPageableSpace
.hint
= index
;
485 index
= gIOKitPageableSpace
.count
- 1;
487 if( KERN_SUCCESS
== kr
)
490 mutex_lock( gIOKitPageableSpace
.lock
);
492 index
= gIOKitPageableSpace
.count
;
493 if( index
>= (kIOMaxPageableMaps
- 1)) {
494 mutex_unlock( gIOKitPageableSpace
.lock
);
498 if( size
< kIOPageableMapSize
)
499 segSize
= kIOPageableMapSize
;
504 kr
= kmem_suballoc(kernel_map
,
510 if( KERN_SUCCESS
!= kr
) {
511 mutex_unlock( gIOKitPageableSpace
.lock
);
515 gIOKitPageableSpace
.maps
[index
].map
= map
;
516 gIOKitPageableSpace
.maps
[index
].address
= min
;
517 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
518 gIOKitPageableSpace
.hint
= index
;
519 gIOKitPageableSpace
.count
= index
+ 1;
521 mutex_unlock( gIOKitPageableSpace
.lock
);
528 struct IOMallocPageableRef
530 vm_address_t address
;
534 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
536 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
539 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
544 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
546 kern_return_t kr
= kIOReturnNotReady
;
547 struct IOMallocPageableRef ref
;
549 if (alignment
> page_size
)
551 if (size
> kIOPageableMaxMapSize
)
555 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
556 if( kIOReturnSuccess
!= kr
)
561 debug_iomalloc_size
+= round_page_32(size
);
564 return( (void *) ref
.address
);
567 vm_map_t
IOPageableMapForAddress( vm_address_t address
)
572 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
573 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
574 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
575 map
= gIOKitPageableSpace
.maps
[index
].map
;
580 IOPanic("IOPageableMapForAddress: null");
585 void IOFreePageable(void * address
, vm_size_t size
)
589 map
= IOPageableMapForAddress( (vm_address_t
) address
);
591 kmem_free( map
, (vm_offset_t
) address
, size
);
594 debug_iomalloc_size
-= round_page_32(size
);
598 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
600 extern kern_return_t
IOMapPages(vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
601 vm_size_t length
, unsigned int options
);
602 extern kern_return_t
IOUnmapPages(vm_map_t map
, vm_offset_t va
, vm_size_t length
);
604 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
605 IOByteCount length
, IOOptionBits cacheMode
)
607 IOReturn ret
= kIOReturnSuccess
;
610 if( task
!= kernel_task
)
611 return( kIOReturnUnsupported
);
613 length
= round_page_32(address
+ length
) - trunc_page_32( address
);
614 address
= trunc_page_32( address
);
617 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
619 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
621 // Get the physical page number
622 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
624 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
625 ret
= IOMapPages( get_task_map(task
), address
, pagenum
<< PAGE_SHIFT
, page_size
, cacheMode
);
627 ret
= kIOReturnVMError
;
629 address
+= page_size
;
637 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
640 if( task
!= kernel_task
)
641 return( kIOReturnUnsupported
);
644 flush_dcache64( (addr64_t
) address
, (unsigned) length
, false );
647 return( kIOReturnSuccess
);
650 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
652 SInt32
OSKernelStackRemaining( void )
656 stack
= (((SInt32
) &stack
) & (KERNEL_STACK_SIZE
- 1));
661 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
663 void IOSleep(unsigned milliseconds
)
665 wait_result_t wait_result
;
667 wait_result
= assert_wait_timeout(milliseconds
, THREAD_UNINT
);
668 assert(wait_result
== THREAD_WAITING
);
670 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
671 assert(wait_result
== THREAD_TIMED_OUT
);
675 * Spin for indicated number of microseconds.
677 void IODelay(unsigned microseconds
)
679 extern void delay(int usec
);
684 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
686 void IOLog(const char *format
, ...)
689 extern void conslog_putc(char);
690 extern void logwakeup();
692 va_start(ap
, format
);
693 _doprnt(format
, &ap
, conslog_putc
, 16);
697 void IOPanic(const char *reason
)
702 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
705 * Convert a integer constant (typically a #define or enum) to a string.
707 static char noValue
[80]; // that's pretty
709 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
711 for( ; regValueArray
->name
; regValueArray
++) {
712 if(regValueArray
->value
== value
)
713 return(regValueArray
->name
);
715 sprintf(noValue
, "0x%x (UNDEFINED)", value
);
716 return((const char *)noValue
);
719 IOReturn
IOFindValueForName(const char *string
,
720 const IONamedValue
*regValueArray
,
723 for( ; regValueArray
->name
; regValueArray
++) {
724 if(!strcmp(regValueArray
->name
, string
)) {
725 *value
= regValueArray
->value
;
726 return kIOReturnSuccess
;
729 return kIOReturnBadArgument
;
732 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
734 IOAlignment
IOSizeToAlignment(unsigned int size
)
737 const int intsize
= sizeof(unsigned int) * 8;
739 for (shift
= 1; shift
< intsize
; shift
++) {
740 if (size
& 0x80000000)
741 return (IOAlignment
)(intsize
- shift
);
747 unsigned int IOAlignmentToSize(IOAlignment align
)
751 for (size
= 1; align
; align
--) {
757 IOReturn
IONDRVLibrariesInitialize( void )
759 return( kIOReturnUnsupported
);