2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
26 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
31 #include <IOKit/system.h>
32 #include <mach/sync_policy.h>
33 #include <machine/machine_routines.h>
34 #include <libkern/c++/OSCPPDebug.h>
36 #include <IOKit/assert.h>
38 #include <IOKit/IOReturn.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOLocks.h>
41 #include <IOKit/IOMapper.h>
42 #include <IOKit/IOKitDebug.h>
44 #include "IOKitKernelInternal.h"
46 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
48 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
50 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
52 lck_grp_t
*IOLockGroup
;
55 * Global variables for use by iLogger
56 * These symbols are for use only by Apple diagnostic code.
57 * Binary compatibility is not guaranteed for kexts that reference these symbols.
60 void *_giDebugLogInternal
= NULL
;
61 void *_giDebugLogDataInternal
= NULL
;
62 void *_giDebugReserved1
= NULL
;
63 void *_giDebugReserved2
= NULL
;
67 * Static variables for this module.
70 static queue_head_t gIOMallocContiguousEntries
;
71 static lck_mtx_t
* gIOMallocContiguousEntriesLock
;
73 enum { kIOMaxPageableMaps
= 16 };
74 enum { kIOPageableMapSize
= 96 * 1024 * 1024 };
75 enum { kIOPageableMaxMapSize
= 96 * 1024 * 1024 };
77 /* LP64todo - these need to expand */
87 IOMapData maps
[ kIOMaxPageableMaps
];
89 } gIOKitPageableSpace
;
91 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
97 static bool libInitialized
;
102 gIOKitPageableSpace
.maps
[0].address
= 0;
103 ret
= kmem_suballoc(kernel_map
,
104 &gIOKitPageableSpace
.maps
[0].address
,
108 &gIOKitPageableSpace
.maps
[0].map
);
109 if (ret
!= KERN_SUCCESS
)
110 panic("failed to allocate iokit pageable map\n");
112 IOLockGroup
= lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL
);
114 gIOKitPageableSpace
.lock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
115 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
116 gIOKitPageableSpace
.hint
= 0;
117 gIOKitPageableSpace
.count
= 1;
119 gIOMallocContiguousEntriesLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
120 queue_init( &gIOMallocContiguousEntries
);
122 libInitialized
= true;
125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
127 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
129 kern_return_t result
;
132 result
= kernel_thread_start((thread_continue_t
)fcn
, arg
, &thread
);
133 if (result
!= KERN_SUCCESS
)
136 thread_deallocate(thread
);
142 volatile void IOExitThread(void)
144 (void) thread_terminate(current_thread());
147 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
150 void * IOMalloc(vm_size_t size
)
154 address
= (void *)kalloc(size
);
157 debug_iomalloc_size
+= size
;
162 void IOFree(void * address
, vm_size_t size
)
165 kfree(address
, size
);
167 debug_iomalloc_size
-= size
;
172 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
174 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
177 vm_address_t address
;
178 vm_address_t allocationAddress
;
179 vm_size_t adjustedSize
;
180 vm_offset_t alignMask
;
187 alignMask
= alignment
- 1;
188 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
190 if (adjustedSize
>= page_size
) {
192 kr
= kernel_memory_allocate(kernel_map
, &address
,
194 if (KERN_SUCCESS
!= kr
)
199 adjustedSize
+= alignMask
;
201 if (adjustedSize
>= page_size
) {
203 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
205 if (KERN_SUCCESS
!= kr
)
206 allocationAddress
= 0;
209 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
211 if (allocationAddress
) {
212 address
= (allocationAddress
+ alignMask
213 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
216 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
217 - sizeof(vm_address_t
))) = adjustedSize
;
218 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
224 assert(0 == (address
& alignMask
));
228 debug_iomalloc_size
+= size
;
231 return (void *) address
;
234 void IOFreeAligned(void * address
, vm_size_t size
)
236 vm_address_t allocationAddress
;
237 vm_size_t adjustedSize
;
244 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
245 if (adjustedSize
>= page_size
) {
247 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
250 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
251 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
252 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
253 - sizeof(vm_address_t
) ));
255 if (adjustedSize
>= page_size
)
256 kmem_free( kernel_map
, allocationAddress
, adjustedSize
);
258 kfree((void *)allocationAddress
, adjustedSize
);
262 debug_iomalloc_size
-= size
;
266 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
268 struct _IOMallocContiguousEntry
274 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
276 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
277 IOPhysicalAddress
* physicalAddress
)
280 vm_address_t address
;
281 vm_address_t allocationAddress
;
282 vm_size_t adjustedSize
;
283 vm_offset_t alignMask
;
291 alignMask
= alignment
- 1;
292 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
294 if (adjustedSize
>= page_size
)
297 if (adjustedSize
> page_size
)
299 kr
= kmem_alloc_contig(kernel_map
, &address
, size
,
304 kr
= kernel_memory_allocate(kernel_map
, &address
,
307 if (KERN_SUCCESS
!= kr
)
312 adjustedSize
+= alignMask
;
313 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
315 if (allocationAddress
) {
317 address
= (allocationAddress
+ alignMask
318 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
321 if (atop_32(address
) != atop_32(address
+ size
- 1))
322 address
= round_page_32(address
);
324 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
325 - sizeof(vm_address_t
))) = adjustedSize
;
326 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
332 /* Do we want a physical address? */
333 if (address
&& physicalAddress
)
337 /* Get the physical page */
338 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
) address
);
344 base
= IOMapperIOVMAlloc((size
+ PAGE_MASK
) >> PAGE_SHIFT
);
347 _IOMallocContiguousEntry
*
348 entry
= IONew(_IOMallocContiguousEntry
, 1);
351 IOFreeContiguous((void *) address
, size
);
355 entry
->virtual = (void *) address
;
356 entry
->ioBase
= base
;
357 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
358 queue_enter( &gIOMallocContiguousEntries
, entry
,
359 _IOMallocContiguousEntry
*, link
);
360 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
362 *physicalAddress
= (IOPhysicalAddress
)((base
<< PAGE_SHIFT
) | (address
& PAGE_MASK
));
363 for (offset
= 0; offset
< ((size
+ PAGE_MASK
) >> PAGE_SHIFT
); offset
++, pagenum
++)
364 IOMapperInsertPage( base
, offset
, pagenum
);
367 *physicalAddress
= (IOPhysicalAddress
)((pagenum
<< PAGE_SHIFT
) | (address
& PAGE_MASK
));
370 /* Did not find, return 0 */
371 *physicalAddress
= (IOPhysicalAddress
) 0;
376 assert(0 == (address
& alignMask
));
380 debug_iomalloc_size
+= size
;
383 return (void *) address
;
386 void IOFreeContiguous(void * address
, vm_size_t size
)
388 vm_address_t allocationAddress
;
389 vm_size_t adjustedSize
;
390 _IOMallocContiguousEntry
* entry
;
398 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
399 queue_iterate( &gIOMallocContiguousEntries
, entry
,
400 _IOMallocContiguousEntry
*, link
)
402 if( entry
->virtual == address
) {
403 base
= entry
->ioBase
;
404 queue_remove( &gIOMallocContiguousEntries
, entry
,
405 _IOMallocContiguousEntry
*, link
);
409 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
413 IOMapperIOVMFree(base
, (size
+ PAGE_MASK
) >> PAGE_SHIFT
);
414 IODelete(entry
, _IOMallocContiguousEntry
, 1);
417 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
418 if (adjustedSize
>= page_size
) {
420 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
423 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
424 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
425 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
426 - sizeof(vm_address_t
) ));
428 kfree((void *)allocationAddress
, adjustedSize
);
432 debug_iomalloc_size
-= size
;
436 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
438 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
439 IOIteratePageableMapsCallback callback
, void * ref
)
441 kern_return_t kr
= kIOReturnNotReady
;
448 if (size
> kIOPageableMaxMapSize
)
449 return( kIOReturnBadArgument
);
452 index
= gIOKitPageableSpace
.hint
;
453 attempts
= gIOKitPageableSpace
.count
;
455 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
456 if( KERN_SUCCESS
== kr
) {
457 gIOKitPageableSpace
.hint
= index
;
463 index
= gIOKitPageableSpace
.count
- 1;
465 if( KERN_SUCCESS
== kr
)
468 lck_mtx_lock( gIOKitPageableSpace
.lock
);
470 index
= gIOKitPageableSpace
.count
;
471 if( index
>= (kIOMaxPageableMaps
- 1)) {
472 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
476 if( size
< kIOPageableMapSize
)
477 segSize
= kIOPageableMapSize
;
482 kr
= kmem_suballoc(kernel_map
,
488 if( KERN_SUCCESS
!= kr
) {
489 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
493 gIOKitPageableSpace
.maps
[index
].map
= map
;
494 gIOKitPageableSpace
.maps
[index
].address
= min
;
495 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
496 gIOKitPageableSpace
.hint
= index
;
497 gIOKitPageableSpace
.count
= index
+ 1;
499 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
506 struct IOMallocPageableRef
508 vm_address_t address
;
512 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
514 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
517 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
522 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
524 kern_return_t kr
= kIOReturnNotReady
;
525 struct IOMallocPageableRef ref
;
527 if (alignment
> page_size
)
529 if (size
> kIOPageableMaxMapSize
)
533 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
534 if( kIOReturnSuccess
!= kr
)
539 debug_iomallocpageable_size
+= round_page_32(size
);
542 return( (void *) ref
.address
);
545 vm_map_t
IOPageableMapForAddress( vm_address_t address
)
550 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
551 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
552 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
553 map
= gIOKitPageableSpace
.maps
[index
].map
;
558 IOPanic("IOPageableMapForAddress: null");
563 void IOFreePageable(void * address
, vm_size_t size
)
567 map
= IOPageableMapForAddress( (vm_address_t
) address
);
569 kmem_free( map
, (vm_offset_t
) address
, size
);
572 debug_iomallocpageable_size
-= round_page_32(size
);
576 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
578 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
579 IOByteCount length
, IOOptionBits cacheMode
)
581 IOReturn ret
= kIOReturnSuccess
;
584 if( task
!= kernel_task
)
585 return( kIOReturnUnsupported
);
587 length
= round_page_32(address
+ length
) - trunc_page_32( address
);
588 address
= trunc_page_32( address
);
591 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
593 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
595 // Get the physical page number
596 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
598 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
599 ret
= IOMapPages( get_task_map(task
), address
, pagenum
<< PAGE_SHIFT
, page_size
, cacheMode
);
601 ret
= kIOReturnVMError
;
603 address
+= page_size
;
611 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
614 if( task
!= kernel_task
)
615 return( kIOReturnUnsupported
);
618 flush_dcache64( (addr64_t
) address
, (unsigned) length
, false );
621 return( kIOReturnSuccess
);
624 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
626 SInt32
OSKernelStackRemaining( void )
630 stack
= (((SInt32
) &stack
) & (KERNEL_STACK_SIZE
- 1));
635 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
637 void IOSleep(unsigned milliseconds
)
639 delay_for_interval(milliseconds
, kMillisecondScale
);
643 * Spin for indicated number of microseconds.
645 void IODelay(unsigned microseconds
)
647 delay_for_interval(microseconds
, kMicrosecondScale
);
650 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
652 void IOLog(const char *format
, ...)
655 extern void conslog_putc(char);
656 extern void logwakeup(void);
658 va_start(ap
, format
);
659 _doprnt(format
, &ap
, conslog_putc
, 16);
663 void IOPanic(const char *reason
)
668 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
671 * Convert a integer constant (typically a #define or enum) to a string.
673 static char noValue
[80]; // that's pretty
675 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
677 for( ; regValueArray
->name
; regValueArray
++) {
678 if(regValueArray
->value
== value
)
679 return(regValueArray
->name
);
681 sprintf(noValue
, "0x%x (UNDEFINED)", value
);
682 return((const char *)noValue
);
685 IOReturn
IOFindValueForName(const char *string
,
686 const IONamedValue
*regValueArray
,
689 for( ; regValueArray
->name
; regValueArray
++) {
690 if(!strcmp(regValueArray
->name
, string
)) {
691 *value
= regValueArray
->value
;
692 return kIOReturnSuccess
;
695 return kIOReturnBadArgument
;
698 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
700 IOAlignment
IOSizeToAlignment(unsigned int size
)
703 const int intsize
= sizeof(unsigned int) * 8;
705 for (shift
= 1; shift
< intsize
; shift
++) {
706 if (size
& 0x80000000)
707 return (IOAlignment
)(intsize
- shift
);
713 unsigned int IOAlignmentToSize(IOAlignment align
)
717 for (size
= 1; align
; align
--) {