2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
25 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
30 #include <IOKit/system.h>
31 #include <mach/sync_policy.h>
32 #include <machine/machine_routines.h>
33 #include <libkern/c++/OSCPPDebug.h>
35 #include <IOKit/assert.h>
37 #include <IOKit/IOReturn.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOLocks.h>
40 #include <IOKit/IOMapper.h>
41 #include <IOKit/IOBufferMemoryDescriptor.h>
42 #include <IOKit/IOKitDebug.h>
44 #include "IOKitKernelInternal.h"
50 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
52 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
54 extern kern_return_t
kmem_suballoc(
62 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
64 lck_grp_t
*IOLockGroup
;
67 * Global variables for use by iLogger
68 * These symbols are for use only by Apple diagnostic code.
69 * Binary compatibility is not guaranteed for kexts that reference these symbols.
72 void *_giDebugLogInternal
= NULL
;
73 void *_giDebugLogDataInternal
= NULL
;
74 void *_giDebugReserved1
= NULL
;
75 void *_giDebugReserved2
= NULL
;
79 * Static variables for this module.
82 static queue_head_t gIOMallocContiguousEntries
;
83 static lck_mtx_t
* gIOMallocContiguousEntriesLock
;
85 enum { kIOMaxPageableMaps
= 16 };
86 enum { kIOPageableMapSize
= 96 * 1024 * 1024 };
87 enum { kIOPageableMaxMapSize
= 96 * 1024 * 1024 };
89 /* LP64todo - these need to expand */
99 IOMapData maps
[ kIOMaxPageableMaps
];
101 } gIOKitPageableSpace
;
103 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
109 static bool libInitialized
;
114 gIOKitPageableSpace
.maps
[0].address
= 0;
115 ret
= kmem_suballoc(kernel_map
,
116 &gIOKitPageableSpace
.maps
[0].address
,
120 &gIOKitPageableSpace
.maps
[0].map
);
121 if (ret
!= KERN_SUCCESS
)
122 panic("failed to allocate iokit pageable map\n");
124 IOLockGroup
= lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL
);
126 gIOKitPageableSpace
.lock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
127 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
128 gIOKitPageableSpace
.hint
= 0;
129 gIOKitPageableSpace
.count
= 1;
131 gIOMallocContiguousEntriesLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
132 queue_init( &gIOMallocContiguousEntries
);
134 libInitialized
= true;
137 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
139 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
141 kern_return_t result
;
144 result
= kernel_thread_start((thread_continue_t
)fcn
, arg
, &thread
);
145 if (result
!= KERN_SUCCESS
)
148 thread_deallocate(thread
);
154 void IOExitThread(void)
156 (void) thread_terminate(current_thread());
159 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
162 void * IOMalloc(vm_size_t size
)
166 address
= (void *)kalloc(size
);
169 debug_iomalloc_size
+= size
;
174 void IOFree(void * address
, vm_size_t size
)
177 kfree(address
, size
);
179 debug_iomalloc_size
-= size
;
184 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
186 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
189 vm_address_t address
;
190 vm_address_t allocationAddress
;
191 vm_size_t adjustedSize
;
192 vm_offset_t alignMask
;
199 alignMask
= alignment
- 1;
200 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
202 if (adjustedSize
>= page_size
) {
204 kr
= kernel_memory_allocate(kernel_map
, &address
,
206 if (KERN_SUCCESS
!= kr
)
211 adjustedSize
+= alignMask
;
213 if (adjustedSize
>= page_size
) {
215 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
217 if (KERN_SUCCESS
!= kr
)
218 allocationAddress
= 0;
221 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
223 if (allocationAddress
) {
224 address
= (allocationAddress
+ alignMask
225 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
228 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
229 - sizeof(vm_address_t
))) = adjustedSize
;
230 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
236 assert(0 == (address
& alignMask
));
240 debug_iomalloc_size
+= size
;
243 return (void *) address
;
246 void IOFreeAligned(void * address
, vm_size_t size
)
248 vm_address_t allocationAddress
;
249 vm_size_t adjustedSize
;
256 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
257 if (adjustedSize
>= page_size
) {
259 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
262 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
263 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
264 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
265 - sizeof(vm_address_t
) ));
267 if (adjustedSize
>= page_size
)
268 kmem_free( kernel_map
, allocationAddress
, adjustedSize
);
270 kfree((void *)allocationAddress
, adjustedSize
);
274 debug_iomalloc_size
-= size
;
278 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
281 IOKernelFreeContiguous(mach_vm_address_t address
, mach_vm_size_t size
)
283 mach_vm_address_t allocationAddress
;
284 mach_vm_size_t adjustedSize
;
291 adjustedSize
= (2 * size
) + sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
);
292 if (adjustedSize
>= page_size
) {
294 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
298 adjustedSize
= *((mach_vm_size_t
*)
299 (address
- sizeof(mach_vm_address_t
) - sizeof(mach_vm_size_t
)));
300 allocationAddress
= *((mach_vm_address_t
*)
301 (address
- sizeof(mach_vm_address_t
) ));
302 kfree((void *)allocationAddress
, adjustedSize
);
306 debug_iomalloc_size
-= size
;
311 IOKernelAllocateContiguous(mach_vm_size_t size
, mach_vm_size_t alignment
)
314 mach_vm_address_t address
;
315 mach_vm_address_t allocationAddress
;
316 mach_vm_size_t adjustedSize
;
317 mach_vm_address_t alignMask
;
324 alignMask
= alignment
- 1;
325 adjustedSize
= (2 * size
) + sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
);
327 if (adjustedSize
>= page_size
)
331 if (adjustedSize
> page_size
)
333 kr
= kmem_alloc_contig(kernel_map
, &virt
, size
,
338 kr
= kernel_memory_allocate(kernel_map
, &virt
,
341 if (KERN_SUCCESS
== kr
)
348 adjustedSize
+= alignMask
;
349 allocationAddress
= (mach_vm_address_t
) kalloc(adjustedSize
);
351 if (allocationAddress
) {
353 address
= (allocationAddress
+ alignMask
354 + (sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
)))
357 if (atop_32(address
) != atop_32(address
+ size
- 1))
358 address
= round_page_32(address
);
360 *((mach_vm_size_t
*)(address
- sizeof(mach_vm_size_t
)
361 - sizeof(mach_vm_address_t
))) = adjustedSize
;
362 *((mach_vm_address_t
*)(address
- sizeof(mach_vm_address_t
)))
370 debug_iomalloc_size
+= size
;
376 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
378 struct _IOMallocContiguousEntry
380 mach_vm_address_t virtualAddr
;
381 IOBufferMemoryDescriptor
* md
;
384 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
386 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
387 IOPhysicalAddress
* physicalAddress
)
389 mach_vm_address_t address
= 0;
396 /* Do we want a physical address? */
397 if (!physicalAddress
)
399 address
= IOKernelAllocateContiguous(size
, alignment
);
403 IOBufferMemoryDescriptor
* bmd
;
404 mach_vm_address_t physicalMask
;
405 vm_offset_t alignMask
;
407 alignMask
= alignment
- 1;
408 physicalMask
= 0xFFFFFFFF ^ (alignMask
& PAGE_MASK
);
409 bmd
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
410 kernel_task
, kIOMemoryPhysicallyContiguous
, size
, physicalMask
);
414 _IOMallocContiguousEntry
*
415 entry
= IONew(_IOMallocContiguousEntry
, 1);
421 entry
->virtualAddr
= (mach_vm_address_t
) bmd
->getBytesNoCopy();
423 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
424 queue_enter( &gIOMallocContiguousEntries
, entry
,
425 _IOMallocContiguousEntry
*, link
);
426 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
428 address
= (mach_vm_address_t
) entry
->virtualAddr
;
429 *physicalAddress
= bmd
->getPhysicalAddress();
433 return (void *) address
;
436 void IOFreeContiguous(void * _address
, vm_size_t size
)
438 _IOMallocContiguousEntry
* entry
;
439 IOMemoryDescriptor
* md
= NULL
;
441 mach_vm_address_t address
= (mach_vm_address_t
) _address
;
448 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
449 queue_iterate( &gIOMallocContiguousEntries
, entry
,
450 _IOMallocContiguousEntry
*, link
)
452 if( entry
->virtualAddr
== address
) {
454 queue_remove( &gIOMallocContiguousEntries
, entry
,
455 _IOMallocContiguousEntry
*, link
);
459 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
464 IODelete(entry
, _IOMallocContiguousEntry
, 1);
468 IOKernelFreeContiguous((mach_vm_address_t
) address
, size
);
472 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
474 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
475 IOIteratePageableMapsCallback callback
, void * ref
)
477 kern_return_t kr
= kIOReturnNotReady
;
484 if (size
> kIOPageableMaxMapSize
)
485 return( kIOReturnBadArgument
);
488 index
= gIOKitPageableSpace
.hint
;
489 attempts
= gIOKitPageableSpace
.count
;
491 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
492 if( KERN_SUCCESS
== kr
) {
493 gIOKitPageableSpace
.hint
= index
;
499 index
= gIOKitPageableSpace
.count
- 1;
501 if( KERN_SUCCESS
== kr
)
504 lck_mtx_lock( gIOKitPageableSpace
.lock
);
506 index
= gIOKitPageableSpace
.count
;
507 if( index
>= (kIOMaxPageableMaps
- 1)) {
508 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
512 if( size
< kIOPageableMapSize
)
513 segSize
= kIOPageableMapSize
;
518 kr
= kmem_suballoc(kernel_map
,
524 if( KERN_SUCCESS
!= kr
) {
525 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
529 gIOKitPageableSpace
.maps
[index
].map
= map
;
530 gIOKitPageableSpace
.maps
[index
].address
= min
;
531 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
532 gIOKitPageableSpace
.hint
= index
;
533 gIOKitPageableSpace
.count
= index
+ 1;
535 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
542 struct IOMallocPageableRef
544 vm_address_t address
;
548 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
550 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
553 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
558 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
560 kern_return_t kr
= kIOReturnNotReady
;
561 struct IOMallocPageableRef ref
;
563 if (alignment
> page_size
)
565 if (size
> kIOPageableMaxMapSize
)
569 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
570 if( kIOReturnSuccess
!= kr
)
575 debug_iomallocpageable_size
+= round_page_32(size
);
578 return( (void *) ref
.address
);
581 vm_map_t
IOPageableMapForAddress( vm_address_t address
)
586 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
587 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
588 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
589 map
= gIOKitPageableSpace
.maps
[index
].map
;
594 IOPanic("IOPageableMapForAddress: null");
599 void IOFreePageable(void * address
, vm_size_t size
)
603 map
= IOPageableMapForAddress( (vm_address_t
) address
);
605 kmem_free( map
, (vm_offset_t
) address
, size
);
608 debug_iomallocpageable_size
-= round_page_32(size
);
612 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
614 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
615 IOByteCount length
, IOOptionBits cacheMode
)
617 IOReturn ret
= kIOReturnSuccess
;
620 if( task
!= kernel_task
)
621 return( kIOReturnUnsupported
);
623 length
= round_page_32(address
+ length
) - trunc_page_32( address
);
624 address
= trunc_page_32( address
);
627 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
629 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
631 // Get the physical page number
632 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
634 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
635 ret
= IOMapPages( get_task_map(task
), address
, ptoa_64(pagenum
), page_size
, cacheMode
);
637 ret
= kIOReturnVMError
;
639 address
+= page_size
;
647 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
650 if( task
!= kernel_task
)
651 return( kIOReturnUnsupported
);
654 flush_dcache64( (addr64_t
) address
, (unsigned) length
, false );
657 return( kIOReturnSuccess
);
660 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
662 SInt32
OSKernelStackRemaining( void )
666 stack
= (((SInt32
) &stack
) & (KERNEL_STACK_SIZE
- 1));
671 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
673 void IOSleep(unsigned milliseconds
)
675 delay_for_interval(milliseconds
, kMillisecondScale
);
679 * Spin for indicated number of microseconds.
681 void IODelay(unsigned microseconds
)
683 delay_for_interval(microseconds
, kMicrosecondScale
);
686 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
688 void IOLog(const char *format
, ...)
691 extern void conslog_putc(char);
692 extern void logwakeup(void);
694 va_start(ap
, format
);
695 _doprnt(format
, &ap
, conslog_putc
, 16);
699 void IOPanic(const char *reason
)
704 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
707 * Convert a integer constant (typically a #define or enum) to a string.
709 static char noValue
[80]; // that's pretty
711 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
713 for( ; regValueArray
->name
; regValueArray
++) {
714 if(regValueArray
->value
== value
)
715 return(regValueArray
->name
);
717 sprintf(noValue
, "0x%x (UNDEFINED)", value
);
718 return((const char *)noValue
);
721 IOReturn
IOFindValueForName(const char *string
,
722 const IONamedValue
*regValueArray
,
725 for( ; regValueArray
->name
; regValueArray
++) {
726 if(!strcmp(regValueArray
->name
, string
)) {
727 *value
= regValueArray
->value
;
728 return kIOReturnSuccess
;
731 return kIOReturnBadArgument
;
734 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
736 IOAlignment
IOSizeToAlignment(unsigned int size
)
739 const int intsize
= sizeof(unsigned int) * 8;
741 for (shift
= 1; shift
< intsize
; shift
++) {
742 if (size
& 0x80000000)
743 return (IOAlignment
)(intsize
- shift
);
749 unsigned int IOAlignmentToSize(IOAlignment align
)
753 for (size
= 1; align
; align
--) {