2 * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
28 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
33 #include <IOKit/system.h>
34 #include <mach/sync_policy.h>
35 #include <machine/machine_routines.h>
36 #include <libkern/c++/OSCPPDebug.h>
38 #include <IOKit/assert.h>
40 #include <IOKit/IOReturn.h>
41 #include <IOKit/IOLib.h>
42 #include <IOKit/IOKitDebug.h>
44 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
48 * Global variables for use by iLogger
49 * These symbols are for use only by Apple diagnostic code.
50 * Binary compatibility is not guaranteed for kexts that reference these symbols.
53 void *_giDebugLogInternal
= NULL
;
54 void *_giDebugLogDataInternal
= NULL
;
55 void *_giDebugReserved1
= NULL
;
56 void *_giDebugReserved2
= NULL
;
60 * Static variables for this module.
63 static IOThreadFunc threadArgFcn
;
64 static void * threadArgArg
;
65 static lock_t
* threadArgLock
;
68 enum { kIOMaxPageableMaps
= 16 };
69 enum { kIOPageableMapSize
= 16 * 1024 * 1024 };
70 enum { kIOPageableMaxMapSize
= 64 * 1024 * 1024 };
81 IOMapData maps
[ kIOMaxPageableMaps
];
83 } gIOKitPageableSpace
;
90 static bool libInitialized
;
95 threadArgLock
= lock_alloc( true, NULL
, NULL
);
97 gIOKitPageableSpace
.maps
[0].address
= 0;
98 ret
= kmem_suballoc(kernel_map
,
99 &gIOKitPageableSpace
.maps
[0].address
,
103 &gIOKitPageableSpace
.maps
[0].map
);
104 if (ret
!= KERN_SUCCESS
)
105 panic("failed to allocate iokit pageable map\n");
107 gIOKitPageableSpace
.lock
= mutex_alloc( 0 );
108 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
109 gIOKitPageableSpace
.hint
= 0;
110 gIOKitPageableSpace
.count
= 1;
112 libInitialized
= true;
115 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
118 * We pass an argument to a new thread by saving fcn and arg in some
119 * locked variables and starting the thread at ioThreadStart(). This
120 * function retrives fcn and arg and makes the appropriate call.
124 static void ioThreadStart( void )
131 lock_done( threadArgLock
);
138 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
142 lock_write( threadArgLock
);
146 thread
= kernel_thread( kernel_task
, ioThreadStart
);
152 volatile void IOExitThread()
154 (void) thread_terminate(current_act());
157 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
160 void * IOMalloc(vm_size_t size
)
164 address
= (void *)kalloc(size
);
167 debug_iomalloc_size
+= size
;
172 void IOFree(void * address
, vm_size_t size
)
175 kfree((vm_offset_t
)address
, size
);
177 debug_iomalloc_size
-= size
;
182 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
184 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
187 vm_address_t address
;
188 vm_address_t allocationAddress
;
189 vm_size_t adjustedSize
;
190 vm_offset_t alignMask
;
197 alignMask
= alignment
- 1;
198 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
200 if (adjustedSize
>= page_size
) {
202 kr
= kernel_memory_allocate(kernel_map
, &address
,
204 if (KERN_SUCCESS
!= kr
)
209 adjustedSize
+= alignMask
;
211 if (adjustedSize
>= page_size
) {
213 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
215 if (KERN_SUCCESS
!= kr
)
216 allocationAddress
= 0;
219 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
221 if (allocationAddress
) {
222 address
= (allocationAddress
+ alignMask
223 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
226 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
227 - sizeof(vm_address_t
))) = adjustedSize
;
228 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
234 assert(0 == (address
& alignMask
));
238 debug_iomalloc_size
+= size
;
241 return (void *) address
;
244 void IOFreeAligned(void * address
, vm_size_t size
)
246 vm_address_t allocationAddress
;
247 vm_size_t adjustedSize
;
254 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
255 if (adjustedSize
>= page_size
) {
257 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
260 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
261 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
262 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
263 - sizeof(vm_address_t
) ));
265 if (adjustedSize
>= page_size
)
266 kmem_free( kernel_map
, (vm_address_t
) allocationAddress
, adjustedSize
);
268 kfree((vm_offset_t
) allocationAddress
, adjustedSize
);
272 debug_iomalloc_size
-= size
;
276 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
278 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
279 IOPhysicalAddress
* physicalAddress
)
282 vm_address_t address
;
283 vm_address_t allocationAddress
;
284 vm_size_t adjustedSize
;
285 vm_offset_t alignMask
;
292 alignMask
= alignment
- 1;
293 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
295 if (adjustedSize
>= page_size
) {
297 kr
= kmem_alloc_contig(kernel_map
, &address
, size
,
299 if (KERN_SUCCESS
!= kr
)
304 adjustedSize
+= alignMask
;
305 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
307 if (allocationAddress
) {
309 address
= (allocationAddress
+ alignMask
310 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
313 if (atop(address
) != atop(address
+ size
- 1))
314 address
= round_page(address
);
316 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
317 - sizeof(vm_address_t
))) = adjustedSize
;
318 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
324 if( address
&& physicalAddress
)
325 *physicalAddress
= (IOPhysicalAddress
) pmap_extract( kernel_pmap
,
328 assert(0 == (address
& alignMask
));
332 debug_iomalloc_size
+= size
;
335 return (void *) address
;
338 void IOFreeContiguous(void * address
, vm_size_t size
)
340 vm_address_t allocationAddress
;
341 vm_size_t adjustedSize
;
348 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
349 if (adjustedSize
>= page_size
) {
351 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
354 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
355 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
356 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
357 - sizeof(vm_address_t
) ));
359 kfree((vm_offset_t
) allocationAddress
, adjustedSize
);
363 debug_iomalloc_size
-= size
;
367 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
369 typedef kern_return_t (*IOIteratePageableMapsCallback
)(vm_map_t map
, void * ref
);
371 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
372 IOIteratePageableMapsCallback callback
, void * ref
)
374 kern_return_t kr
= kIOReturnNotReady
;
381 if (size
> kIOPageableMaxMapSize
)
382 return( kIOReturnBadArgument
);
385 index
= gIOKitPageableSpace
.hint
;
386 attempts
= gIOKitPageableSpace
.count
;
388 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
389 if( KERN_SUCCESS
== kr
) {
390 gIOKitPageableSpace
.hint
= index
;
396 index
= gIOKitPageableSpace
.count
- 1;
398 if( KERN_SUCCESS
== kr
)
401 mutex_lock( gIOKitPageableSpace
.lock
);
403 index
= gIOKitPageableSpace
.count
;
404 if( index
>= (kIOMaxPageableMaps
- 1)) {
405 mutex_unlock( gIOKitPageableSpace
.lock
);
409 if( size
< kIOPageableMapSize
)
410 segSize
= kIOPageableMapSize
;
415 kr
= kmem_suballoc(kernel_map
,
421 if( KERN_SUCCESS
!= kr
) {
422 mutex_unlock( gIOKitPageableSpace
.lock
);
426 gIOKitPageableSpace
.maps
[index
].map
= map
;
427 gIOKitPageableSpace
.maps
[index
].address
= min
;
428 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
429 gIOKitPageableSpace
.hint
= index
;
430 gIOKitPageableSpace
.count
= index
+ 1;
432 mutex_unlock( gIOKitPageableSpace
.lock
);
439 struct IOMallocPageableRef
441 vm_address_t address
;
445 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
447 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
450 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
455 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
457 kern_return_t kr
= kIOReturnNotReady
;
458 struct IOMallocPageableRef ref
;
460 if (alignment
> page_size
)
462 if (size
> kIOPageableMaxMapSize
)
466 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
467 if( kIOReturnSuccess
!= kr
)
472 debug_iomalloc_size
+= round_page(size
);
475 return( (void *) ref
.address
);
478 vm_map_t
IOPageableMapForAddress( vm_address_t address
)
483 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
484 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
485 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
486 map
= gIOKitPageableSpace
.maps
[index
].map
;
491 IOPanic("IOPageableMapForAddress: null");
496 void IOFreePageable(void * address
, vm_size_t size
)
500 map
= IOPageableMapForAddress( (vm_address_t
) address
);
502 kmem_free( map
, (vm_offset_t
) address
, size
);
505 debug_iomalloc_size
-= round_page(size
);
509 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
511 extern kern_return_t
IOMapPages(vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
512 vm_size_t length
, unsigned int options
);
514 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
515 IOByteCount length
, IOOptionBits cacheMode
)
517 IOReturn ret
= kIOReturnSuccess
;
518 vm_offset_t physAddr
;
520 if( task
!= kernel_task
)
521 return( kIOReturnUnsupported
);
523 length
= round_page(address
+ length
) - trunc_page( address
);
524 address
= trunc_page( address
);
527 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
529 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
531 physAddr
= pmap_extract( kernel_pmap
, address
);
533 ret
= IOMapPages( get_task_map(task
), address
, physAddr
, page_size
, cacheMode
);
535 ret
= kIOReturnVMError
;
544 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
547 if( task
!= kernel_task
)
548 return( kIOReturnUnsupported
);
551 flush_dcache( (vm_offset_t
) address
, (unsigned) length
, false );
554 return( kIOReturnSuccess
);
557 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
559 SInt32
OSKernelStackRemaining( void )
563 stack
= (((SInt32
) &stack
) & (KERNEL_STACK_SIZE
- 1));
568 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
570 void IOSleep(unsigned milliseconds
)
572 wait_result_t wait_result
;
574 wait_result
= assert_wait_timeout(milliseconds
, THREAD_UNINT
);
575 assert(wait_result
== THREAD_WAITING
);
577 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
578 assert(wait_result
== THREAD_TIMED_OUT
);
582 * Spin for indicated number of microseconds.
584 void IODelay(unsigned microseconds
)
586 extern void delay(int usec
);
591 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
593 void IOLog(const char *format
, ...)
596 extern void conslog_putc(char);
597 extern void logwakeup();
599 va_start(ap
, format
);
600 _doprnt(format
, &ap
, conslog_putc
, 16);
604 void IOPanic(const char *reason
)
609 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
612 * Convert a integer constant (typically a #define or enum) to a string.
614 static char noValue
[80]; // that's pretty
616 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
618 for( ; regValueArray
->name
; regValueArray
++) {
619 if(regValueArray
->value
== value
)
620 return(regValueArray
->name
);
622 sprintf(noValue
, "0x%x (UNDEFINED)", value
);
623 return((const char *)noValue
);
626 IOReturn
IOFindValueForName(const char *string
,
627 const IONamedValue
*regValueArray
,
630 for( ; regValueArray
->name
; regValueArray
++) {
631 if(!strcmp(regValueArray
->name
, string
)) {
632 *value
= regValueArray
->value
;
633 return kIOReturnSuccess
;
636 return kIOReturnBadArgument
;
639 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
641 IOAlignment
IOSizeToAlignment(unsigned int size
)
644 const int intsize
= sizeof(unsigned int) * 8;
646 for (shift
= 1; shift
< intsize
; shift
++) {
647 if (size
& 0x80000000)
648 return (IOAlignment
)(intsize
- shift
);
654 unsigned int IOAlignmentToSize(IOAlignment align
)
658 for (size
= 1; align
; align
--) {
664 IOReturn
IONDRVLibrariesInitialize( void )
666 return( kIOReturnUnsupported
);