2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
27 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 #include <IOKit/system.h>
33 #include <mach/sync_policy.h>
34 #include <machine/machine_routines.h>
35 #include <libkern/c++/OSCPPDebug.h>
37 #include <IOKit/assert.h>
39 #include <IOKit/IOReturn.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOKitDebug.h>
43 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
46 * Static variables for this module.
49 static IOThreadFunc threadArgFcn
;
50 static void * threadArgArg
;
51 static lock_t
* threadArgLock
;
54 enum { kIOMaxPageableMaps
= 16 };
55 enum { kIOPageableMapSize
= 16 * 1024 * 1024 };
56 enum { kIOPageableMaxMapSize
= 32 * 1024 * 1024 };
67 IOMapData maps
[ kIOMaxPageableMaps
];
69 } gIOKitPageableSpace
;
76 static bool libInitialized
;
81 threadArgLock
= lock_alloc( true, NULL
, NULL
);
83 gIOKitPageableSpace
.maps
[0].address
= 0;
84 ret
= kmem_suballoc(kernel_map
,
85 &gIOKitPageableSpace
.maps
[0].address
,
89 &gIOKitPageableSpace
.maps
[0].map
);
90 if (ret
!= KERN_SUCCESS
)
91 panic("failed to allocate iokit pageable map\n");
93 gIOKitPageableSpace
.lock
= mutex_alloc( 0 );
94 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
95 gIOKitPageableSpace
.hint
= 0;
96 gIOKitPageableSpace
.count
= 1;
98 libInitialized
= true;
101 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
104 * We pass an argument to a new thread by saving fcn and arg in some
105 * locked variables and starting the thread at ioThreadStart(). This
106 * function retrives fcn and arg and makes the appropriate call.
110 static void ioThreadStart( void )
117 lock_done( threadArgLock
);
124 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
128 lock_write( threadArgLock
);
132 thread
= kernel_thread( kernel_task
, ioThreadStart
);
138 volatile void IOExitThread()
140 (void) thread_terminate(current_act());
143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
146 void * IOMalloc(vm_size_t size
)
150 address
= (void *)kalloc(size
);
153 debug_iomalloc_size
+= size
;
158 void IOFree(void * address
, vm_size_t size
)
161 kfree((vm_offset_t
)address
, size
);
163 debug_iomalloc_size
-= size
;
168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
170 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
173 vm_address_t address
;
174 vm_address_t allocationAddress
;
175 vm_size_t adjustedSize
;
176 vm_offset_t alignMask
;
183 alignMask
= alignment
- 1;
184 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
186 if (adjustedSize
>= page_size
) {
188 kr
= kernel_memory_allocate(kernel_map
, &address
,
189 size
, alignMask
, KMA_KOBJECT
);
190 if (KERN_SUCCESS
!= kr
) {
191 IOLog("Failed %08x, %08x\n", size
, alignment
);
197 adjustedSize
+= alignMask
;
198 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
200 if (allocationAddress
) {
201 address
= (allocationAddress
+ alignMask
202 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
205 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
206 - sizeof(vm_address_t
))) = adjustedSize
;
207 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
213 assert(0 == (address
& alignMask
));
217 debug_iomalloc_size
+= size
;
220 return (void *) address
;
223 void IOFreeAligned(void * address
, vm_size_t size
)
225 vm_address_t allocationAddress
;
226 vm_size_t adjustedSize
;
233 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
234 if (adjustedSize
>= page_size
) {
236 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
239 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
240 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
241 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
242 - sizeof(vm_address_t
) ));
244 kfree((vm_offset_t
) allocationAddress
, adjustedSize
);
248 debug_iomalloc_size
-= size
;
252 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
254 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
255 IOPhysicalAddress
* physicalAddress
)
258 vm_address_t address
;
259 vm_address_t allocationAddress
;
260 vm_size_t adjustedSize
;
261 vm_offset_t alignMask
;
268 alignMask
= alignment
- 1;
269 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
271 if (adjustedSize
>= page_size
) {
273 kr
= kmem_alloc_contig(kernel_map
, &address
, size
,
274 alignMask
, KMA_KOBJECT
);
275 if (KERN_SUCCESS
!= kr
)
280 adjustedSize
+= alignMask
;
281 allocationAddress
= (vm_address_t
)
282 kalloc(adjustedSize
);
283 if (allocationAddress
) {
285 address
= (allocationAddress
+ alignMask
286 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
289 if (atop(address
) != atop(address
+ size
- 1))
290 address
= round_page(address
);
292 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
293 - sizeof(vm_address_t
))) = adjustedSize
;
294 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
300 if( address
&& physicalAddress
)
301 *physicalAddress
= (IOPhysicalAddress
) pmap_extract( kernel_pmap
,
304 assert(0 == (address
& alignMask
));
308 debug_iomalloc_size
+= size
;
311 return (void *) address
;
314 void IOFreeContiguous(void * address
, vm_size_t size
)
316 vm_address_t allocationAddress
;
317 vm_size_t adjustedSize
;
324 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
325 if (adjustedSize
>= page_size
) {
327 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
330 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
331 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
332 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
333 - sizeof(vm_address_t
) ));
335 kfree((vm_offset_t
) allocationAddress
, adjustedSize
);
339 debug_iomalloc_size
-= size
;
343 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
345 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
347 kern_return_t kr
= kIOReturnNotReady
;
348 vm_address_t address
;
355 if (alignment
> page_size
)
357 if (size
> kIOPageableMaxMapSize
)
361 index
= gIOKitPageableSpace
.hint
;
362 attempts
= gIOKitPageableSpace
.count
;
364 kr
= kmem_alloc_pageable( gIOKitPageableSpace
.maps
[index
].map
,
366 if( KERN_SUCCESS
== kr
) {
367 gIOKitPageableSpace
.hint
= index
;
373 index
= gIOKitPageableSpace
.count
- 1;
375 if( KERN_SUCCESS
== kr
)
378 mutex_lock( gIOKitPageableSpace
.lock
);
380 index
= gIOKitPageableSpace
.count
;
381 if( index
>= (kIOMaxPageableMaps
- 1)) {
382 mutex_unlock( gIOKitPageableSpace
.lock
);
386 if( size
< kIOPageableMapSize
)
387 segSize
= kIOPageableMapSize
;
392 kr
= kmem_suballoc(kernel_map
,
398 if( KERN_SUCCESS
!= kr
) {
399 mutex_unlock( gIOKitPageableSpace
.lock
);
403 gIOKitPageableSpace
.maps
[index
].map
= map
;
404 gIOKitPageableSpace
.maps
[index
].address
= min
;
405 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
406 gIOKitPageableSpace
.hint
= index
;
407 gIOKitPageableSpace
.count
= index
+ 1;
409 mutex_unlock( gIOKitPageableSpace
.lock
);
413 if( KERN_SUCCESS
!= kr
)
418 debug_iomalloc_size
+= round_page(size
);
421 return (void *) address
;
424 vm_map_t
IOPageableMapForAddress( vm_address_t address
)
429 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
430 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
431 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
432 map
= gIOKitPageableSpace
.maps
[index
].map
;
437 IOPanic("IOPageableMapForAddress: null");
442 void IOFreePageable(void * address
, vm_size_t size
)
446 map
= IOPageableMapForAddress( (vm_address_t
) address
);
448 kmem_free( map
, (vm_offset_t
) address
, size
);
451 debug_iomalloc_size
-= round_page(size
);
455 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
457 extern kern_return_t
IOMapPages(vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
458 vm_size_t length
, unsigned int options
);
460 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
461 IOByteCount length
, IOOptionBits cacheMode
)
463 IOReturn ret
= kIOReturnSuccess
;
464 vm_offset_t physAddr
;
466 if( task
!= kernel_task
)
467 return( kIOReturnUnsupported
);
469 length
= round_page(address
+ length
) - trunc_page( address
);
470 address
= trunc_page( address
);
473 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
475 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
477 physAddr
= pmap_extract( kernel_pmap
, address
);
479 ret
= IOMapPages( get_task_map(task
), address
, physAddr
, page_size
, cacheMode
);
481 ret
= kIOReturnVMError
;
490 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
493 if( task
!= kernel_task
)
494 return( kIOReturnUnsupported
);
497 flush_dcache( (vm_offset_t
) address
, (unsigned) length
, false );
500 return( kIOReturnSuccess
);
503 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
505 SInt32
OSKernelStackRemaining( void )
509 stack
= (((SInt32
) &stack
) & (KERNEL_STACK_SIZE
- 1));
514 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
516 void IOSleep(unsigned milliseconds
)
520 assert_wait_timeout(milliseconds
, THREAD_INTERRUPTIBLE
);
521 wait_result
= thread_block((void (*)(void))0);
522 if (wait_result
!= THREAD_TIMED_OUT
)
523 thread_cancel_timer();
527 * Spin for indicated number of microseconds.
529 void IODelay(unsigned microseconds
)
531 extern void delay(int usec
);
536 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
538 void IOLog(const char *format
, ...)
541 extern void conslog_putc(char);
542 extern void logwakeup();
544 va_start(ap
, format
);
545 _doprnt(format
, &ap
, conslog_putc
, 16);
549 void IOPanic(const char *reason
)
554 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
557 * Convert a integer constant (typically a #define or enum) to a string.
559 static char noValue
[80]; // that's pretty
561 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
563 for( ; regValueArray
->name
; regValueArray
++) {
564 if(regValueArray
->value
== value
)
565 return(regValueArray
->name
);
567 sprintf(noValue
, "0x%x (UNDEFINED)", value
);
568 return((const char *)noValue
);
571 IOReturn
IOFindValueForName(const char *string
,
572 const IONamedValue
*regValueArray
,
575 for( ; regValueArray
->name
; regValueArray
++) {
576 if(!strcmp(regValueArray
->name
, string
)) {
577 *value
= regValueArray
->value
;
578 return kIOReturnSuccess
;
581 return kIOReturnBadArgument
;
584 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
586 IOAlignment
IOSizeToAlignment(unsigned int size
)
589 const int intsize
= sizeof(unsigned int) * 8;
591 for (shift
= 1; shift
< intsize
; shift
++) {
592 if (size
& 0x80000000)
593 return (IOAlignment
)(intsize
- shift
);
599 unsigned int IOAlignmentToSize(IOAlignment align
)
603 for (size
= 1; align
; align
--) {