2 * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
25 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
30 #include <IOKit/system.h>
31 #include <mach/sync_policy.h>
32 #include <machine/machine_routines.h>
33 #include <libkern/c++/OSCPPDebug.h>
35 #include <IOKit/assert.h>
37 #include <IOKit/IOReturn.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOMapper.h>
40 #include <IOKit/IOKitDebug.h>
42 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
44 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
46 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
49 * Global variables for use by iLogger
50 * These symbols are for use only by Apple diagnostic code.
51 * Binary compatibility is not guaranteed for kexts that reference these symbols.
54 void *_giDebugLogInternal
= NULL
;
55 void *_giDebugLogDataInternal
= NULL
;
56 void *_giDebugReserved1
= NULL
;
57 void *_giDebugReserved2
= NULL
;
61 * Static variables for this module.
64 static IOThreadFunc threadArgFcn
;
65 static void * threadArgArg
;
66 static lock_t
* threadArgLock
;
68 static queue_head_t gIOMallocContiguousEntries
;
69 static mutex_t
* gIOMallocContiguousEntriesLock
;
71 enum { kIOMaxPageableMaps
= 16 };
72 enum { kIOPageableMapSize
= 96 * 1024 * 1024 };
73 enum { kIOPageableMaxMapSize
= 96 * 1024 * 1024 };
84 IOMapData maps
[ kIOMaxPageableMaps
];
86 } gIOKitPageableSpace
;
88 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
94 static bool libInitialized
;
99 threadArgLock
= lock_alloc( true, NULL
, NULL
);
101 gIOKitPageableSpace
.maps
[0].address
= 0;
102 ret
= kmem_suballoc(kernel_map
,
103 &gIOKitPageableSpace
.maps
[0].address
,
107 &gIOKitPageableSpace
.maps
[0].map
);
108 if (ret
!= KERN_SUCCESS
)
109 panic("failed to allocate iokit pageable map\n");
111 gIOKitPageableSpace
.lock
= mutex_alloc( 0 );
112 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
113 gIOKitPageableSpace
.hint
= 0;
114 gIOKitPageableSpace
.count
= 1;
116 gIOMallocContiguousEntriesLock
= mutex_alloc( 0 );
117 queue_init( &gIOMallocContiguousEntries
);
119 libInitialized
= true;
122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125 * We pass an argument to a new thread by saving fcn and arg in some
126 * locked variables and starting the thread at ioThreadStart(). This
127 * function retrives fcn and arg and makes the appropriate call.
131 static void ioThreadStart( void )
138 lock_done( threadArgLock
);
145 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
149 lock_write( threadArgLock
);
153 thread
= kernel_thread( kernel_task
, ioThreadStart
);
159 volatile void IOExitThread()
161 (void) thread_terminate(current_act());
164 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
167 void * IOMalloc(vm_size_t size
)
171 address
= (void *)kalloc(size
);
174 debug_iomalloc_size
+= size
;
179 void IOFree(void * address
, vm_size_t size
)
182 kfree((vm_offset_t
)address
, size
);
184 debug_iomalloc_size
-= size
;
189 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
191 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
194 vm_address_t address
;
195 vm_address_t allocationAddress
;
196 vm_size_t adjustedSize
;
197 vm_offset_t alignMask
;
204 alignMask
= alignment
- 1;
205 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
207 if (adjustedSize
>= page_size
) {
209 kr
= kernel_memory_allocate(kernel_map
, &address
,
211 if (KERN_SUCCESS
!= kr
)
216 adjustedSize
+= alignMask
;
218 if (adjustedSize
>= page_size
) {
220 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
222 if (KERN_SUCCESS
!= kr
)
223 allocationAddress
= 0;
226 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
228 if (allocationAddress
) {
229 address
= (allocationAddress
+ alignMask
230 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
233 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
234 - sizeof(vm_address_t
))) = adjustedSize
;
235 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
241 assert(0 == (address
& alignMask
));
245 debug_iomalloc_size
+= size
;
248 return (void *) address
;
251 void IOFreeAligned(void * address
, vm_size_t size
)
253 vm_address_t allocationAddress
;
254 vm_size_t adjustedSize
;
261 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
262 if (adjustedSize
>= page_size
) {
264 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
267 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
268 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
269 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
270 - sizeof(vm_address_t
) ));
272 if (adjustedSize
>= page_size
)
273 kmem_free( kernel_map
, (vm_address_t
) allocationAddress
, adjustedSize
);
275 kfree((vm_offset_t
) allocationAddress
, adjustedSize
);
279 debug_iomalloc_size
-= size
;
283 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
285 struct _IOMallocContiguousEntry
291 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
293 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
294 IOPhysicalAddress
* physicalAddress
)
297 vm_address_t address
;
298 vm_address_t allocationAddress
;
299 vm_size_t adjustedSize
;
300 vm_offset_t alignMask
;
308 alignMask
= alignment
- 1;
309 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
311 if (adjustedSize
>= page_size
)
314 if (adjustedSize
> page_size
)
316 kr
= kmem_alloc_contig(kernel_map
, &address
, size
,
321 kr
= kernel_memory_allocate(kernel_map
, &address
,
324 if (KERN_SUCCESS
!= kr
)
329 adjustedSize
+= alignMask
;
330 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
332 if (allocationAddress
) {
334 address
= (allocationAddress
+ alignMask
335 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
338 if (atop_32(address
) != atop_32(address
+ size
- 1))
339 address
= round_page_32(address
);
341 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
342 - sizeof(vm_address_t
))) = adjustedSize
;
343 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
349 /* Do we want a physical address? */
350 if (address
&& physicalAddress
)
354 /* Get the physical page */
355 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
) address
);
361 base
= IOMapperIOVMAlloc((size
+ PAGE_MASK
) >> PAGE_SHIFT
);
364 _IOMallocContiguousEntry
*
365 entry
= IONew(_IOMallocContiguousEntry
, 1);
368 IOFreeContiguous((void *) address
, size
);
372 entry
->virtual = (void *) address
;
373 entry
->ioBase
= base
;
374 mutex_lock(gIOMallocContiguousEntriesLock
);
375 queue_enter( &gIOMallocContiguousEntries
, entry
,
376 _IOMallocContiguousEntry
*, link
);
377 mutex_unlock(gIOMallocContiguousEntriesLock
);
379 *physicalAddress
= (IOPhysicalAddress
)((base
<< PAGE_SHIFT
) | (address
& PAGE_MASK
));
380 for (offset
= 0; offset
< ((size
+ PAGE_MASK
) >> PAGE_SHIFT
); offset
++, pagenum
++)
381 IOMapperInsertPage( base
, offset
, pagenum
);
384 *physicalAddress
= (IOPhysicalAddress
)((pagenum
<< PAGE_SHIFT
) | (address
& PAGE_MASK
));
387 /* Did not find, return 0 */
388 *physicalAddress
= (IOPhysicalAddress
) 0;
393 assert(0 == (address
& alignMask
));
397 debug_iomalloc_size
+= size
;
400 return (void *) address
;
403 void IOFreeContiguous(void * address
, vm_size_t size
)
405 vm_address_t allocationAddress
;
406 vm_size_t adjustedSize
;
407 _IOMallocContiguousEntry
* entry
;
415 mutex_lock(gIOMallocContiguousEntriesLock
);
416 queue_iterate( &gIOMallocContiguousEntries
, entry
,
417 _IOMallocContiguousEntry
*, link
)
419 if( entry
->virtual == address
) {
420 base
= entry
->ioBase
;
421 queue_remove( &gIOMallocContiguousEntries
, entry
,
422 _IOMallocContiguousEntry
*, link
);
426 mutex_unlock(gIOMallocContiguousEntriesLock
);
430 IOMapperIOVMFree(base
, (size
+ PAGE_MASK
) >> PAGE_SHIFT
);
431 IODelete(entry
, _IOMallocContiguousEntry
, 1);
434 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
435 if (adjustedSize
>= page_size
) {
437 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
440 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
441 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
442 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
443 - sizeof(vm_address_t
) ));
445 kfree((vm_offset_t
) allocationAddress
, adjustedSize
);
449 debug_iomalloc_size
-= size
;
453 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
455 typedef kern_return_t (*IOIteratePageableMapsCallback
)(vm_map_t map
, void * ref
);
457 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
458 IOIteratePageableMapsCallback callback
, void * ref
)
460 kern_return_t kr
= kIOReturnNotReady
;
467 if (size
> kIOPageableMaxMapSize
)
468 return( kIOReturnBadArgument
);
471 index
= gIOKitPageableSpace
.hint
;
472 attempts
= gIOKitPageableSpace
.count
;
474 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
475 if( KERN_SUCCESS
== kr
) {
476 gIOKitPageableSpace
.hint
= index
;
482 index
= gIOKitPageableSpace
.count
- 1;
484 if( KERN_SUCCESS
== kr
)
487 mutex_lock( gIOKitPageableSpace
.lock
);
489 index
= gIOKitPageableSpace
.count
;
490 if( index
>= (kIOMaxPageableMaps
- 1)) {
491 mutex_unlock( gIOKitPageableSpace
.lock
);
495 if( size
< kIOPageableMapSize
)
496 segSize
= kIOPageableMapSize
;
501 kr
= kmem_suballoc(kernel_map
,
507 if( KERN_SUCCESS
!= kr
) {
508 mutex_unlock( gIOKitPageableSpace
.lock
);
512 gIOKitPageableSpace
.maps
[index
].map
= map
;
513 gIOKitPageableSpace
.maps
[index
].address
= min
;
514 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
515 gIOKitPageableSpace
.hint
= index
;
516 gIOKitPageableSpace
.count
= index
+ 1;
518 mutex_unlock( gIOKitPageableSpace
.lock
);
525 struct IOMallocPageableRef
527 vm_address_t address
;
531 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
533 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
536 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
541 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
543 kern_return_t kr
= kIOReturnNotReady
;
544 struct IOMallocPageableRef ref
;
546 if (alignment
> page_size
)
548 if (size
> kIOPageableMaxMapSize
)
552 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
553 if( kIOReturnSuccess
!= kr
)
558 debug_iomalloc_size
+= round_page_32(size
);
561 return( (void *) ref
.address
);
564 vm_map_t
IOPageableMapForAddress( vm_address_t address
)
569 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
570 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
571 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
572 map
= gIOKitPageableSpace
.maps
[index
].map
;
577 IOPanic("IOPageableMapForAddress: null");
582 void IOFreePageable(void * address
, vm_size_t size
)
586 map
= IOPageableMapForAddress( (vm_address_t
) address
);
588 kmem_free( map
, (vm_offset_t
) address
, size
);
591 debug_iomalloc_size
-= round_page_32(size
);
595 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
597 extern kern_return_t
IOMapPages(vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
598 vm_size_t length
, unsigned int options
);
599 extern kern_return_t
IOUnmapPages(vm_map_t map
, vm_offset_t va
, vm_size_t length
);
601 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
602 IOByteCount length
, IOOptionBits cacheMode
)
604 IOReturn ret
= kIOReturnSuccess
;
607 if( task
!= kernel_task
)
608 return( kIOReturnUnsupported
);
610 length
= round_page_32(address
+ length
) - trunc_page_32( address
);
611 address
= trunc_page_32( address
);
614 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
616 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
618 // Get the physical page number
619 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
621 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
622 ret
= IOMapPages( get_task_map(task
), address
, pagenum
<< PAGE_SHIFT
, page_size
, cacheMode
);
624 ret
= kIOReturnVMError
;
626 address
+= page_size
;
634 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
637 if( task
!= kernel_task
)
638 return( kIOReturnUnsupported
);
641 flush_dcache64( (addr64_t
) address
, (unsigned) length
, false );
644 return( kIOReturnSuccess
);
647 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
649 SInt32
OSKernelStackRemaining( void )
653 stack
= (((SInt32
) &stack
) & (KERNEL_STACK_SIZE
- 1));
658 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
660 void IOSleep(unsigned milliseconds
)
662 wait_result_t wait_result
;
664 wait_result
= assert_wait_timeout(milliseconds
, THREAD_UNINT
);
665 assert(wait_result
== THREAD_WAITING
);
667 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
668 assert(wait_result
== THREAD_TIMED_OUT
);
672 * Spin for indicated number of microseconds.
674 void IODelay(unsigned microseconds
)
676 extern void delay(int usec
);
681 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
683 void IOLog(const char *format
, ...)
686 extern void conslog_putc(char);
687 extern void logwakeup();
689 va_start(ap
, format
);
690 _doprnt(format
, &ap
, conslog_putc
, 16);
694 void IOPanic(const char *reason
)
699 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
702 * Convert a integer constant (typically a #define or enum) to a string.
704 static char noValue
[80]; // that's pretty
706 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
708 for( ; regValueArray
->name
; regValueArray
++) {
709 if(regValueArray
->value
== value
)
710 return(regValueArray
->name
);
712 sprintf(noValue
, "0x%x (UNDEFINED)", value
);
713 return((const char *)noValue
);
716 IOReturn
IOFindValueForName(const char *string
,
717 const IONamedValue
*regValueArray
,
720 for( ; regValueArray
->name
; regValueArray
++) {
721 if(!strcmp(regValueArray
->name
, string
)) {
722 *value
= regValueArray
->value
;
723 return kIOReturnSuccess
;
726 return kIOReturnBadArgument
;
729 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
731 IOAlignment
IOSizeToAlignment(unsigned int size
)
734 const int intsize
= sizeof(unsigned int) * 8;
736 for (shift
= 1; shift
< intsize
; shift
++) {
737 if (size
& 0x80000000)
738 return (IOAlignment
)(intsize
- shift
);
744 unsigned int IOAlignmentToSize(IOAlignment align
)
748 for (size
= 1; align
; align
--) {
754 IOReturn
IONDRVLibrariesInitialize( void )
756 return( kIOReturnUnsupported
);