2 * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
25 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
30 #include <IOKit/system.h>
31 #include <mach/sync_policy.h>
32 #include <machine/machine_routines.h>
33 #include <libkern/c++/OSCPPDebug.h>
35 #include <IOKit/assert.h>
37 #include <IOKit/IOReturn.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOKitDebug.h>
41 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
45 * Global variables for use by iLogger
46 * These symbols are for use only by Apple diagnostic code.
47 * Binary compatibility is not guaranteed for kexts that reference these symbols.
50 void *_giDebugLogInternal
= NULL
;
51 void *_giDebugLogDataInternal
= NULL
;
52 void *_giDebugReserved1
= NULL
;
53 void *_giDebugReserved2
= NULL
;
57 * Static variables for this module.
60 static IOThreadFunc threadArgFcn
;
61 static void * threadArgArg
;
62 static lock_t
* threadArgLock
;
65 enum { kIOMaxPageableMaps
= 16 };
66 enum { kIOPageableMapSize
= 16 * 1024 * 1024 };
67 enum { kIOPageableMaxMapSize
= 64 * 1024 * 1024 };
78 IOMapData maps
[ kIOMaxPageableMaps
];
80 } gIOKitPageableSpace
;
87 static bool libInitialized
;
92 threadArgLock
= lock_alloc( true, NULL
, NULL
);
94 gIOKitPageableSpace
.maps
[0].address
= 0;
95 ret
= kmem_suballoc(kernel_map
,
96 &gIOKitPageableSpace
.maps
[0].address
,
100 &gIOKitPageableSpace
.maps
[0].map
);
101 if (ret
!= KERN_SUCCESS
)
102 panic("failed to allocate iokit pageable map\n");
104 gIOKitPageableSpace
.lock
= mutex_alloc( 0 );
105 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
106 gIOKitPageableSpace
.hint
= 0;
107 gIOKitPageableSpace
.count
= 1;
109 libInitialized
= true;
112 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
115 * We pass an argument to a new thread by saving fcn and arg in some
116 * locked variables and starting the thread at ioThreadStart(). This
117 * function retrives fcn and arg and makes the appropriate call.
121 static void ioThreadStart( void )
128 lock_done( threadArgLock
);
135 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
139 lock_write( threadArgLock
);
143 thread
= kernel_thread( kernel_task
, ioThreadStart
);
149 volatile void IOExitThread()
151 (void) thread_terminate(current_act());
154 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
157 void * IOMalloc(vm_size_t size
)
161 address
= (void *)kalloc(size
);
164 debug_iomalloc_size
+= size
;
169 void IOFree(void * address
, vm_size_t size
)
172 kfree((vm_offset_t
)address
, size
);
174 debug_iomalloc_size
-= size
;
179 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
181 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
184 vm_address_t address
;
185 vm_address_t allocationAddress
;
186 vm_size_t adjustedSize
;
187 vm_offset_t alignMask
;
194 alignMask
= alignment
- 1;
195 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
197 if (adjustedSize
>= page_size
) {
199 kr
= kernel_memory_allocate(kernel_map
, &address
,
201 if (KERN_SUCCESS
!= kr
)
206 adjustedSize
+= alignMask
;
208 if (adjustedSize
>= page_size
) {
210 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
212 if (KERN_SUCCESS
!= kr
)
213 allocationAddress
= 0;
216 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
218 if (allocationAddress
) {
219 address
= (allocationAddress
+ alignMask
220 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
223 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
224 - sizeof(vm_address_t
))) = adjustedSize
;
225 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
231 assert(0 == (address
& alignMask
));
235 debug_iomalloc_size
+= size
;
238 return (void *) address
;
241 void IOFreeAligned(void * address
, vm_size_t size
)
243 vm_address_t allocationAddress
;
244 vm_size_t adjustedSize
;
251 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
252 if (adjustedSize
>= page_size
) {
254 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
257 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
258 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
259 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
260 - sizeof(vm_address_t
) ));
262 if (adjustedSize
>= page_size
)
263 kmem_free( kernel_map
, (vm_address_t
) allocationAddress
, adjustedSize
);
265 kfree((vm_offset_t
) allocationAddress
, adjustedSize
);
269 debug_iomalloc_size
-= size
;
273 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
275 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
276 IOPhysicalAddress
* physicalAddress
)
279 vm_address_t address
;
280 vm_address_t allocationAddress
;
281 vm_size_t adjustedSize
;
282 vm_offset_t alignMask
;
289 alignMask
= alignment
- 1;
290 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
292 if (adjustedSize
>= page_size
) {
294 kr
= kmem_alloc_contig(kernel_map
, &address
, size
,
296 if (KERN_SUCCESS
!= kr
)
301 adjustedSize
+= alignMask
;
302 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
304 if (allocationAddress
) {
306 address
= (allocationAddress
+ alignMask
307 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
310 if (atop(address
) != atop(address
+ size
- 1))
311 address
= round_page(address
);
313 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
314 - sizeof(vm_address_t
))) = adjustedSize
;
315 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
321 if( address
&& physicalAddress
)
322 *physicalAddress
= (IOPhysicalAddress
) pmap_extract( kernel_pmap
,
325 assert(0 == (address
& alignMask
));
329 debug_iomalloc_size
+= size
;
332 return (void *) address
;
335 void IOFreeContiguous(void * address
, vm_size_t size
)
337 vm_address_t allocationAddress
;
338 vm_size_t adjustedSize
;
345 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
346 if (adjustedSize
>= page_size
) {
348 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
351 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
352 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
353 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
354 - sizeof(vm_address_t
) ));
356 kfree((vm_offset_t
) allocationAddress
, adjustedSize
);
360 debug_iomalloc_size
-= size
;
364 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
366 typedef kern_return_t (*IOIteratePageableMapsCallback
)(vm_map_t map
, void * ref
);
368 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
369 IOIteratePageableMapsCallback callback
, void * ref
)
371 kern_return_t kr
= kIOReturnNotReady
;
378 if (size
> kIOPageableMaxMapSize
)
379 return( kIOReturnBadArgument
);
382 index
= gIOKitPageableSpace
.hint
;
383 attempts
= gIOKitPageableSpace
.count
;
385 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
386 if( KERN_SUCCESS
== kr
) {
387 gIOKitPageableSpace
.hint
= index
;
393 index
= gIOKitPageableSpace
.count
- 1;
395 if( KERN_SUCCESS
== kr
)
398 mutex_lock( gIOKitPageableSpace
.lock
);
400 index
= gIOKitPageableSpace
.count
;
401 if( index
>= (kIOMaxPageableMaps
- 1)) {
402 mutex_unlock( gIOKitPageableSpace
.lock
);
406 if( size
< kIOPageableMapSize
)
407 segSize
= kIOPageableMapSize
;
412 kr
= kmem_suballoc(kernel_map
,
418 if( KERN_SUCCESS
!= kr
) {
419 mutex_unlock( gIOKitPageableSpace
.lock
);
423 gIOKitPageableSpace
.maps
[index
].map
= map
;
424 gIOKitPageableSpace
.maps
[index
].address
= min
;
425 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
426 gIOKitPageableSpace
.hint
= index
;
427 gIOKitPageableSpace
.count
= index
+ 1;
429 mutex_unlock( gIOKitPageableSpace
.lock
);
436 struct IOMallocPageableRef
438 vm_address_t address
;
442 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
444 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
447 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
452 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
454 kern_return_t kr
= kIOReturnNotReady
;
455 struct IOMallocPageableRef ref
;
457 if (alignment
> page_size
)
459 if (size
> kIOPageableMaxMapSize
)
463 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
464 if( kIOReturnSuccess
!= kr
)
469 debug_iomalloc_size
+= round_page(size
);
472 return( (void *) ref
.address
);
475 vm_map_t
IOPageableMapForAddress( vm_address_t address
)
480 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
481 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
482 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
483 map
= gIOKitPageableSpace
.maps
[index
].map
;
488 IOPanic("IOPageableMapForAddress: null");
493 void IOFreePageable(void * address
, vm_size_t size
)
497 map
= IOPageableMapForAddress( (vm_address_t
) address
);
499 kmem_free( map
, (vm_offset_t
) address
, size
);
502 debug_iomalloc_size
-= round_page(size
);
506 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
508 extern kern_return_t
IOMapPages(vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
509 vm_size_t length
, unsigned int options
);
511 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
512 IOByteCount length
, IOOptionBits cacheMode
)
514 IOReturn ret
= kIOReturnSuccess
;
515 vm_offset_t physAddr
;
517 if( task
!= kernel_task
)
518 return( kIOReturnUnsupported
);
520 length
= round_page(address
+ length
) - trunc_page( address
);
521 address
= trunc_page( address
);
524 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
526 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
528 physAddr
= pmap_extract( kernel_pmap
, address
);
530 ret
= IOMapPages( get_task_map(task
), address
, physAddr
, page_size
, cacheMode
);
532 ret
= kIOReturnVMError
;
541 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
544 if( task
!= kernel_task
)
545 return( kIOReturnUnsupported
);
548 flush_dcache( (vm_offset_t
) address
, (unsigned) length
, false );
551 return( kIOReturnSuccess
);
554 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
556 SInt32
OSKernelStackRemaining( void )
560 stack
= (((SInt32
) &stack
) & (KERNEL_STACK_SIZE
- 1));
565 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
567 void IOSleep(unsigned milliseconds
)
569 wait_result_t wait_result
;
571 wait_result
= assert_wait_timeout(milliseconds
, THREAD_UNINT
);
572 assert(wait_result
== THREAD_WAITING
);
574 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
575 assert(wait_result
== THREAD_TIMED_OUT
);
579 * Spin for indicated number of microseconds.
581 void IODelay(unsigned microseconds
)
583 extern void delay(int usec
);
588 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
590 void IOLog(const char *format
, ...)
593 extern void conslog_putc(char);
594 extern void logwakeup();
596 va_start(ap
, format
);
597 _doprnt(format
, &ap
, conslog_putc
, 16);
601 void IOPanic(const char *reason
)
606 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
609 * Convert a integer constant (typically a #define or enum) to a string.
611 static char noValue
[80]; // that's pretty
613 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
615 for( ; regValueArray
->name
; regValueArray
++) {
616 if(regValueArray
->value
== value
)
617 return(regValueArray
->name
);
619 sprintf(noValue
, "0x%x (UNDEFINED)", value
);
620 return((const char *)noValue
);
623 IOReturn
IOFindValueForName(const char *string
,
624 const IONamedValue
*regValueArray
,
627 for( ; regValueArray
->name
; regValueArray
++) {
628 if(!strcmp(regValueArray
->name
, string
)) {
629 *value
= regValueArray
->value
;
630 return kIOReturnSuccess
;
633 return kIOReturnBadArgument
;
636 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
638 IOAlignment
IOSizeToAlignment(unsigned int size
)
641 const int intsize
= sizeof(unsigned int) * 8;
643 for (shift
= 1; shift
< intsize
; shift
++) {
644 if (size
& 0x80000000)
645 return (IOAlignment
)(intsize
- shift
);
651 unsigned int IOAlignmentToSize(IOAlignment align
)
655 for (size
= 1; align
; align
--) {
661 IOReturn
IONDRVLibrariesInitialize( void )
663 return( kIOReturnUnsupported
);