2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
33 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
38 #include <IOKit/system.h>
39 #include <mach/sync_policy.h>
40 #include <machine/machine_routines.h>
41 #include <libkern/c++/OSCPPDebug.h>
43 #include <IOKit/assert.h>
45 #include <IOKit/IOReturn.h>
46 #include <IOKit/IOLib.h>
47 #include <IOKit/IOLocks.h>
48 #include <IOKit/IOMapper.h>
49 #include <IOKit/IOKitDebug.h>
51 #include "IOKitKernelInternal.h"
53 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
55 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
57 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
59 lck_grp_t
*IOLockGroup
;
62 * Global variables for use by iLogger
63 * These symbols are for use only by Apple diagnostic code.
64 * Binary compatibility is not guaranteed for kexts that reference these symbols.
67 void *_giDebugLogInternal
= NULL
;
68 void *_giDebugLogDataInternal
= NULL
;
69 void *_giDebugReserved1
= NULL
;
70 void *_giDebugReserved2
= NULL
;
74 * Static variables for this module.
77 static queue_head_t gIOMallocContiguousEntries
;
78 static lck_mtx_t
* gIOMallocContiguousEntriesLock
;
80 enum { kIOMaxPageableMaps
= 16 };
81 enum { kIOPageableMapSize
= 96 * 1024 * 1024 };
82 enum { kIOPageableMaxMapSize
= 96 * 1024 * 1024 };
84 /* LP64todo - these need to expand */
94 IOMapData maps
[ kIOMaxPageableMaps
];
96 } gIOKitPageableSpace
;
98 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
104 static bool libInitialized
;
109 gIOKitPageableSpace
.maps
[0].address
= 0;
110 ret
= kmem_suballoc(kernel_map
,
111 &gIOKitPageableSpace
.maps
[0].address
,
115 &gIOKitPageableSpace
.maps
[0].map
);
116 if (ret
!= KERN_SUCCESS
)
117 panic("failed to allocate iokit pageable map\n");
119 IOLockGroup
= lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL
);
121 gIOKitPageableSpace
.lock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
122 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
123 gIOKitPageableSpace
.hint
= 0;
124 gIOKitPageableSpace
.count
= 1;
126 gIOMallocContiguousEntriesLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
127 queue_init( &gIOMallocContiguousEntries
);
129 libInitialized
= true;
132 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
134 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
136 kern_return_t result
;
139 result
= kernel_thread_start((thread_continue_t
)fcn
, arg
, &thread
);
140 if (result
!= KERN_SUCCESS
)
143 thread_deallocate(thread
);
149 volatile void IOExitThread(void)
151 (void) thread_terminate(current_thread());
154 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
157 void * IOMalloc(vm_size_t size
)
161 address
= (void *)kalloc(size
);
164 debug_iomalloc_size
+= size
;
169 void IOFree(void * address
, vm_size_t size
)
172 kfree(address
, size
);
174 debug_iomalloc_size
-= size
;
179 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
181 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
184 vm_address_t address
;
185 vm_address_t allocationAddress
;
186 vm_size_t adjustedSize
;
187 vm_offset_t alignMask
;
194 alignMask
= alignment
- 1;
195 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
197 if (adjustedSize
>= page_size
) {
199 kr
= kernel_memory_allocate(kernel_map
, &address
,
201 if (KERN_SUCCESS
!= kr
)
206 adjustedSize
+= alignMask
;
208 if (adjustedSize
>= page_size
) {
210 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
212 if (KERN_SUCCESS
!= kr
)
213 allocationAddress
= 0;
216 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
218 if (allocationAddress
) {
219 address
= (allocationAddress
+ alignMask
220 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
223 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
224 - sizeof(vm_address_t
))) = adjustedSize
;
225 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
231 assert(0 == (address
& alignMask
));
235 debug_iomalloc_size
+= size
;
238 return (void *) address
;
241 void IOFreeAligned(void * address
, vm_size_t size
)
243 vm_address_t allocationAddress
;
244 vm_size_t adjustedSize
;
251 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
252 if (adjustedSize
>= page_size
) {
254 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
257 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
258 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
259 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
260 - sizeof(vm_address_t
) ));
262 if (adjustedSize
>= page_size
)
263 kmem_free( kernel_map
, allocationAddress
, adjustedSize
);
265 kfree((void *)allocationAddress
, adjustedSize
);
269 debug_iomalloc_size
-= size
;
273 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
275 struct _IOMallocContiguousEntry
281 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
283 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
284 IOPhysicalAddress
* physicalAddress
)
287 vm_address_t address
;
288 vm_address_t allocationAddress
;
289 vm_size_t adjustedSize
;
290 vm_offset_t alignMask
;
298 alignMask
= alignment
- 1;
299 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
301 if (adjustedSize
>= page_size
)
304 if (adjustedSize
> page_size
)
306 kr
= kmem_alloc_contig(kernel_map
, &address
, size
,
311 kr
= kernel_memory_allocate(kernel_map
, &address
,
314 if (KERN_SUCCESS
!= kr
)
319 adjustedSize
+= alignMask
;
320 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
322 if (allocationAddress
) {
324 address
= (allocationAddress
+ alignMask
325 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
328 if (atop_32(address
) != atop_32(address
+ size
- 1))
329 address
= round_page_32(address
);
331 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
332 - sizeof(vm_address_t
))) = adjustedSize
;
333 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
339 /* Do we want a physical address? */
340 if (address
&& physicalAddress
)
344 /* Get the physical page */
345 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
) address
);
351 base
= IOMapperIOVMAlloc((size
+ PAGE_MASK
) >> PAGE_SHIFT
);
354 _IOMallocContiguousEntry
*
355 entry
= IONew(_IOMallocContiguousEntry
, 1);
358 IOFreeContiguous((void *) address
, size
);
362 entry
->virtual = (void *) address
;
363 entry
->ioBase
= base
;
364 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
365 queue_enter( &gIOMallocContiguousEntries
, entry
,
366 _IOMallocContiguousEntry
*, link
);
367 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
369 *physicalAddress
= (IOPhysicalAddress
)((base
<< PAGE_SHIFT
) | (address
& PAGE_MASK
));
370 for (offset
= 0; offset
< ((size
+ PAGE_MASK
) >> PAGE_SHIFT
); offset
++, pagenum
++)
371 IOMapperInsertPage( base
, offset
, pagenum
);
374 *physicalAddress
= (IOPhysicalAddress
)((pagenum
<< PAGE_SHIFT
) | (address
& PAGE_MASK
));
377 /* Did not find, return 0 */
378 *physicalAddress
= (IOPhysicalAddress
) 0;
383 assert(0 == (address
& alignMask
));
387 debug_iomalloc_size
+= size
;
390 return (void *) address
;
393 void IOFreeContiguous(void * address
, vm_size_t size
)
395 vm_address_t allocationAddress
;
396 vm_size_t adjustedSize
;
397 _IOMallocContiguousEntry
* entry
;
405 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
406 queue_iterate( &gIOMallocContiguousEntries
, entry
,
407 _IOMallocContiguousEntry
*, link
)
409 if( entry
->virtual == address
) {
410 base
= entry
->ioBase
;
411 queue_remove( &gIOMallocContiguousEntries
, entry
,
412 _IOMallocContiguousEntry
*, link
);
416 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
420 IOMapperIOVMFree(base
, (size
+ PAGE_MASK
) >> PAGE_SHIFT
);
421 IODelete(entry
, _IOMallocContiguousEntry
, 1);
424 adjustedSize
= (2 * size
) + sizeof(vm_size_t
) + sizeof(vm_address_t
);
425 if (adjustedSize
>= page_size
) {
427 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
430 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
431 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
432 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
433 - sizeof(vm_address_t
) ));
435 kfree((void *)allocationAddress
, adjustedSize
);
439 debug_iomalloc_size
-= size
;
443 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
445 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
446 IOIteratePageableMapsCallback callback
, void * ref
)
448 kern_return_t kr
= kIOReturnNotReady
;
455 if (size
> kIOPageableMaxMapSize
)
456 return( kIOReturnBadArgument
);
459 index
= gIOKitPageableSpace
.hint
;
460 attempts
= gIOKitPageableSpace
.count
;
462 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
463 if( KERN_SUCCESS
== kr
) {
464 gIOKitPageableSpace
.hint
= index
;
470 index
= gIOKitPageableSpace
.count
- 1;
472 if( KERN_SUCCESS
== kr
)
475 lck_mtx_lock( gIOKitPageableSpace
.lock
);
477 index
= gIOKitPageableSpace
.count
;
478 if( index
>= (kIOMaxPageableMaps
- 1)) {
479 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
483 if( size
< kIOPageableMapSize
)
484 segSize
= kIOPageableMapSize
;
489 kr
= kmem_suballoc(kernel_map
,
495 if( KERN_SUCCESS
!= kr
) {
496 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
500 gIOKitPageableSpace
.maps
[index
].map
= map
;
501 gIOKitPageableSpace
.maps
[index
].address
= min
;
502 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
503 gIOKitPageableSpace
.hint
= index
;
504 gIOKitPageableSpace
.count
= index
+ 1;
506 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
513 struct IOMallocPageableRef
515 vm_address_t address
;
519 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
521 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
524 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
529 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
531 kern_return_t kr
= kIOReturnNotReady
;
532 struct IOMallocPageableRef ref
;
534 if (alignment
> page_size
)
536 if (size
> kIOPageableMaxMapSize
)
540 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
541 if( kIOReturnSuccess
!= kr
)
546 debug_iomallocpageable_size
+= round_page_32(size
);
549 return( (void *) ref
.address
);
552 vm_map_t
IOPageableMapForAddress( vm_address_t address
)
557 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
558 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
559 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
560 map
= gIOKitPageableSpace
.maps
[index
].map
;
565 IOPanic("IOPageableMapForAddress: null");
570 void IOFreePageable(void * address
, vm_size_t size
)
574 map
= IOPageableMapForAddress( (vm_address_t
) address
);
576 kmem_free( map
, (vm_offset_t
) address
, size
);
579 debug_iomallocpageable_size
-= round_page_32(size
);
583 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
585 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
586 IOByteCount length
, IOOptionBits cacheMode
)
588 IOReturn ret
= kIOReturnSuccess
;
591 if( task
!= kernel_task
)
592 return( kIOReturnUnsupported
);
594 length
= round_page_32(address
+ length
) - trunc_page_32( address
);
595 address
= trunc_page_32( address
);
598 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
600 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
602 // Get the physical page number
603 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
605 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
606 ret
= IOMapPages( get_task_map(task
), address
, pagenum
<< PAGE_SHIFT
, page_size
, cacheMode
);
608 ret
= kIOReturnVMError
;
610 address
+= page_size
;
618 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
621 if( task
!= kernel_task
)
622 return( kIOReturnUnsupported
);
625 flush_dcache64( (addr64_t
) address
, (unsigned) length
, false );
628 return( kIOReturnSuccess
);
631 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
633 SInt32
OSKernelStackRemaining( void )
637 stack
= (((SInt32
) &stack
) & (KERNEL_STACK_SIZE
- 1));
642 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
644 void IOSleep(unsigned milliseconds
)
646 delay_for_interval(milliseconds
, kMillisecondScale
);
650 * Spin for indicated number of microseconds.
652 void IODelay(unsigned microseconds
)
654 delay_for_interval(microseconds
, kMicrosecondScale
);
657 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
659 void IOLog(const char *format
, ...)
662 extern void conslog_putc(char);
663 extern void logwakeup(void);
665 va_start(ap
, format
);
666 _doprnt(format
, &ap
, conslog_putc
, 16);
670 void IOPanic(const char *reason
)
675 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
678 * Convert a integer constant (typically a #define or enum) to a string.
680 static char noValue
[80]; // that's pretty
682 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
684 for( ; regValueArray
->name
; regValueArray
++) {
685 if(regValueArray
->value
== value
)
686 return(regValueArray
->name
);
688 sprintf(noValue
, "0x%x (UNDEFINED)", value
);
689 return((const char *)noValue
);
692 IOReturn
IOFindValueForName(const char *string
,
693 const IONamedValue
*regValueArray
,
696 for( ; regValueArray
->name
; regValueArray
++) {
697 if(!strcmp(regValueArray
->name
, string
)) {
698 *value
= regValueArray
->value
;
699 return kIOReturnSuccess
;
702 return kIOReturnBadArgument
;
705 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
707 IOAlignment
IOSizeToAlignment(unsigned int size
)
710 const int intsize
= sizeof(unsigned int) * 8;
712 for (shift
= 1; shift
< intsize
; shift
++) {
713 if (size
& 0x80000000)
714 return (IOAlignment
)(intsize
- shift
);
720 unsigned int IOAlignmentToSize(IOAlignment align
)
724 for (size
= 1; align
; align
--) {