2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
25 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
30 #include <IOKit/system.h>
31 #include <mach/sync_policy.h>
32 #include <machine/machine_routines.h>
33 #include <libkern/c++/OSCPPDebug.h>
35 #include <IOKit/assert.h>
37 #include <IOKit/IOReturn.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOLocks.h>
40 #include <IOKit/IOMapper.h>
41 #include <IOKit/IOKitDebug.h>
43 #include "IOKitKernelInternal.h"
45 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
47 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
51 lck_grp_t
*IOLockGroup
;
54 * Global variables for use by iLogger
55 * These symbols are for use only by Apple diagnostic code.
56 * Binary compatibility is not guaranteed for kexts that reference these symbols.
59 void *_giDebugLogInternal
= NULL
;
60 void *_giDebugLogDataInternal
= NULL
;
61 void *_giDebugReserved1
= NULL
;
62 void *_giDebugReserved2
= NULL
;
66 * Static variables for this module.
69 static queue_head_t gIOMallocContiguousEntries
;
70 static lck_mtx_t
* gIOMallocContiguousEntriesLock
;
72 enum { kIOMaxPageableMaps
= 16 };
73 enum { kIOPageableMapSize
= 96 * 1024 * 1024 };
74 enum { kIOPageableMaxMapSize
= 96 * 1024 * 1024 };
76 /* LP64todo - these need to expand */
86 IOMapData maps
[ kIOMaxPageableMaps
];
88 } gIOKitPageableSpace
;
90 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
96 static bool libInitialized
;
101 gIOKitPageableSpace
.maps
[0].address
= 0;
102 ret
= kmem_suballoc(kernel_map
,
103 &gIOKitPageableSpace
.maps
[0].address
,
107 &gIOKitPageableSpace
.maps
[0].map
);
108 if (ret
!= KERN_SUCCESS
)
109 panic("failed to allocate iokit pageable map\n");
111 IOLockGroup
= lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL
);
113 gIOKitPageableSpace
.lock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
114 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
115 gIOKitPageableSpace
.hint
= 0;
116 gIOKitPageableSpace
.count
= 1;
118 gIOMallocContiguousEntriesLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
119 queue_init( &gIOMallocContiguousEntries
);
121 libInitialized
= true;
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
128 kern_return_t result
;
131 result
= kernel_thread_start((thread_continue_t
)fcn
, arg
, &thread
);
132 if (result
!= KERN_SUCCESS
)
135 thread_deallocate(thread
);
141 volatile void IOExitThread(void)
143 (void) thread_terminate(current_thread());
146 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
149 void * IOMalloc(vm_size_t size
)
153 address
= (void *)kalloc(size
);
156 debug_iomalloc_size
+= size
;
161 void IOFree(void * address
, vm_size_t size
)
164 kfree(address
, size
);
166 debug_iomalloc_size
-= size
;
171 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
173 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
176 vm_address_t address
;
177 vm_address_t allocationAddress
;
178 vm_size_t adjustedSize
;
179 vm_offset_t alignMask
;
186 alignMask
= alignment
- 1;
187 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
189 if (adjustedSize
>= page_size
) {
191 kr
= kernel_memory_allocate(kernel_map
, &address
,
193 if (KERN_SUCCESS
!= kr
)
198 adjustedSize
+= alignMask
;
200 if (adjustedSize
>= page_size
) {
202 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
204 if (KERN_SUCCESS
!= kr
)
205 allocationAddress
= 0;
208 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
210 if (allocationAddress
) {
211 address
= (allocationAddress
+ alignMask
212 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
215 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
216 - sizeof(vm_address_t
))) = adjustedSize
;
217 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
223 assert(0 == (address
& alignMask
));
227 debug_iomalloc_size
+= size
;
230 return (void *) address
;
233 void IOFreeAligned(void * address
, vm_size_t size
)
235 vm_address_t allocationAddress
;
236 vm_size_t adjustedSize
;
243 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
244 if (adjustedSize
>= page_size
) {
246 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
249 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
250 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
251 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
252 - sizeof(vm_address_t
) ));
254 if (adjustedSize
>= page_size
)
255 kmem_free( kernel_map
, allocationAddress
, adjustedSize
);
257 kfree((void *)allocationAddress
, adjustedSize
);
261 debug_iomalloc_size
-= size
;
265 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
267 struct _IOMallocContiguousEntry
273 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
275 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
276 IOPhysicalAddress
* physicalAddress
)
279 vm_address_t address
;
280 vm_address_t allocationAddress
;
281 vm_size_t adjustedSize
;
282 vm_offset_t alignMask
;
290 alignMask
= alignment
- 1;
291 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
293 if (adjustedSize
>= page_size
)
296 if (adjustedSize
> page_size
)
298 kr
= kmem_alloc_contig(kernel_map
, &address
, size
,
303 kr
= kernel_memory_allocate(kernel_map
, &address
,
306 if (KERN_SUCCESS
!= kr
)
311 adjustedSize
+= alignMask
;
312 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
314 if (allocationAddress
) {
316 address
= (allocationAddress
+ alignMask
317 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
320 if (atop_32(address
) != atop_32(address
+ size
- 1))
321 address
= round_page_32(address
);
323 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
324 - sizeof(vm_address_t
))) = adjustedSize
;
325 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
331 /* Do we want a physical address? */
332 if (address
&& physicalAddress
)
336 /* Get the physical page */
337 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
) address
);
343 base
= IOMapperIOVMAlloc((size
+ PAGE_MASK
) >> PAGE_SHIFT
);
346 _IOMallocContiguousEntry
*
347 entry
= IONew(_IOMallocContiguousEntry
, 1);
350 IOFreeContiguous((void *) address
, size
);
354 entry
->virtual = (void *) address
;
355 entry
->ioBase
= base
;
356 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
357 queue_enter( &gIOMallocContiguousEntries
, entry
,
358 _IOMallocContiguousEntry
*, link
);
359 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
361 *physicalAddress
= (IOPhysicalAddress
)((base
<< PAGE_SHIFT
) | (address
& PAGE_MASK
));
362 for (offset
= 0; offset
< ((size
+ PAGE_MASK
) >> PAGE_SHIFT
); offset
++, pagenum
++)
363 IOMapperInsertPage( base
, offset
, pagenum
);
366 *physicalAddress
= (IOPhysicalAddress
)((pagenum
<< PAGE_SHIFT
) | (address
& PAGE_MASK
));
369 /* Did not find, return 0 */
370 *physicalAddress
= (IOPhysicalAddress
) 0;
375 assert(0 == (address
& alignMask
));
379 debug_iomalloc_size
+= size
;
382 return (void *) address
;
385 void IOFreeContiguous(void * address
, vm_size_t size
)
387 vm_address_t allocationAddress
;
388 vm_size_t adjustedSize
;
389 _IOMallocContiguousEntry
* entry
;
397 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
398 queue_iterate( &gIOMallocContiguousEntries
, entry
,
399 _IOMallocContiguousEntry
*, link
)
401 if( entry
->virtual == address
) {
402 base
= entry
->ioBase
;
403 queue_remove( &gIOMallocContiguousEntries
, entry
,
404 _IOMallocContiguousEntry
*, link
);
408 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
412 IOMapperIOVMFree(base
, (size
+ PAGE_MASK
) >> PAGE_SHIFT
);
413 IODelete(entry
, _IOMallocContiguousEntry
, 1);
416 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
417 if (adjustedSize
>= page_size
) {
419 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
422 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
423 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
424 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
425 - sizeof(vm_address_t
) ));
427 kfree((void *)allocationAddress
, adjustedSize
);
431 debug_iomalloc_size
-= size
;
435 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
437 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
438 IOIteratePageableMapsCallback callback
, void * ref
)
440 kern_return_t kr
= kIOReturnNotReady
;
447 if (size
> kIOPageableMaxMapSize
)
448 return( kIOReturnBadArgument
);
451 index
= gIOKitPageableSpace
.hint
;
452 attempts
= gIOKitPageableSpace
.count
;
454 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
455 if( KERN_SUCCESS
== kr
) {
456 gIOKitPageableSpace
.hint
= index
;
462 index
= gIOKitPageableSpace
.count
- 1;
464 if( KERN_SUCCESS
== kr
)
467 lck_mtx_lock( gIOKitPageableSpace
.lock
);
469 index
= gIOKitPageableSpace
.count
;
470 if( index
>= (kIOMaxPageableMaps
- 1)) {
471 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
475 if( size
< kIOPageableMapSize
)
476 segSize
= kIOPageableMapSize
;
481 kr
= kmem_suballoc(kernel_map
,
487 if( KERN_SUCCESS
!= kr
) {
488 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
492 gIOKitPageableSpace
.maps
[index
].map
= map
;
493 gIOKitPageableSpace
.maps
[index
].address
= min
;
494 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
495 gIOKitPageableSpace
.hint
= index
;
496 gIOKitPageableSpace
.count
= index
+ 1;
498 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
505 struct IOMallocPageableRef
507 vm_address_t address
;
511 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
513 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
516 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
521 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
523 kern_return_t kr
= kIOReturnNotReady
;
524 struct IOMallocPageableRef ref
;
526 if (alignment
> page_size
)
528 if (size
> kIOPageableMaxMapSize
)
532 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
533 if( kIOReturnSuccess
!= kr
)
538 debug_iomallocpageable_size
+= round_page_32(size
);
541 return( (void *) ref
.address
);
544 vm_map_t
IOPageableMapForAddress( vm_address_t address
)
549 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
550 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
551 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
552 map
= gIOKitPageableSpace
.maps
[index
].map
;
557 IOPanic("IOPageableMapForAddress: null");
562 void IOFreePageable(void * address
, vm_size_t size
)
566 map
= IOPageableMapForAddress( (vm_address_t
) address
);
568 kmem_free( map
, (vm_offset_t
) address
, size
);
571 debug_iomallocpageable_size
-= round_page_32(size
);
575 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
577 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
578 IOByteCount length
, IOOptionBits cacheMode
)
580 IOReturn ret
= kIOReturnSuccess
;
583 if( task
!= kernel_task
)
584 return( kIOReturnUnsupported
);
586 length
= round_page_32(address
+ length
) - trunc_page_32( address
);
587 address
= trunc_page_32( address
);
590 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
592 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
594 // Get the physical page number
595 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
597 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
598 ret
= IOMapPages( get_task_map(task
), address
, pagenum
<< PAGE_SHIFT
, page_size
, cacheMode
);
600 ret
= kIOReturnVMError
;
602 address
+= page_size
;
610 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
613 if( task
!= kernel_task
)
614 return( kIOReturnUnsupported
);
617 flush_dcache64( (addr64_t
) address
, (unsigned) length
, false );
620 return( kIOReturnSuccess
);
623 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
625 SInt32
OSKernelStackRemaining( void )
629 stack
= (((SInt32
) &stack
) & (KERNEL_STACK_SIZE
- 1));
634 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
636 void IOSleep(unsigned milliseconds
)
638 delay_for_interval(milliseconds
, kMillisecondScale
);
642 * Spin for indicated number of microseconds.
644 void IODelay(unsigned microseconds
)
646 delay_for_interval(microseconds
, kMicrosecondScale
);
649 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
651 void IOLog(const char *format
, ...)
654 extern void conslog_putc(char);
655 extern void logwakeup(void);
657 va_start(ap
, format
);
658 _doprnt(format
, &ap
, conslog_putc
, 16);
662 void IOPanic(const char *reason
)
667 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
670 * Convert a integer constant (typically a #define or enum) to a string.
672 static char noValue
[80]; // that's pretty
674 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
676 for( ; regValueArray
->name
; regValueArray
++) {
677 if(regValueArray
->value
== value
)
678 return(regValueArray
->name
);
680 sprintf(noValue
, "0x%x (UNDEFINED)", value
);
681 return((const char *)noValue
);
684 IOReturn
IOFindValueForName(const char *string
,
685 const IONamedValue
*regValueArray
,
688 for( ; regValueArray
->name
; regValueArray
++) {
689 if(!strcmp(regValueArray
->name
, string
)) {
690 *value
= regValueArray
->value
;
691 return kIOReturnSuccess
;
694 return kIOReturnBadArgument
;
697 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
699 IOAlignment
IOSizeToAlignment(unsigned int size
)
702 const int intsize
= sizeof(unsigned int) * 8;
704 for (shift
= 1; shift
< intsize
; shift
++) {
705 if (size
& 0x80000000)
706 return (IOAlignment
)(intsize
- shift
);
712 unsigned int IOAlignmentToSize(IOAlignment align
)
716 for (size
= 1; align
; align
--) {