2 * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <libkern/c++/OSCPPDebug.h>
41 #include <IOKit/assert.h>
43 #include <IOKit/IOReturn.h>
44 #include <IOKit/IOLib.h>
45 #include <IOKit/IOLocks.h>
46 #include <IOKit/IOMapper.h>
47 #include <IOKit/IOBufferMemoryDescriptor.h>
48 #include <IOKit/IOKitDebug.h>
50 #include "IOKitKernelInternal.h"
53 #include <libkern/OSDebug.h>
54 #include <sys/sysctl.h>
61 mach_timespec_t IOZeroTvalspec
= { 0, 0 };
63 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
65 extern kern_return_t
kmem_suballoc(
73 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
75 lck_grp_t
*IOLockGroup
;
78 * Global variables for use by iLogger
79 * These symbols are for use only by Apple diagnostic code.
80 * Binary compatibility is not guaranteed for kexts that reference these symbols.
83 void *_giDebugLogInternal
= NULL
;
84 void *_giDebugLogDataInternal
= NULL
;
85 void *_giDebugReserved1
= NULL
;
86 void *_giDebugReserved2
= NULL
;
90 * Static variables for this module.
93 static queue_head_t gIOMallocContiguousEntries
;
94 static lck_mtx_t
* gIOMallocContiguousEntriesLock
;
96 enum { kIOMaxPageableMaps
= 16 };
97 enum { kIOPageableMapSize
= 96 * 1024 * 1024 };
98 enum { kIOPageableMaxMapSize
= 96 * 1024 * 1024 };
100 /* LP64todo - these need to expand */
110 IOMapData maps
[ kIOMaxPageableMaps
];
112 } gIOKitPageableSpace
;
114 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
120 static bool libInitialized
;
125 gIOKitPageableSpace
.maps
[0].address
= 0;
126 ret
= kmem_suballoc(kernel_map
,
127 &gIOKitPageableSpace
.maps
[0].address
,
131 &gIOKitPageableSpace
.maps
[0].map
);
132 if (ret
!= KERN_SUCCESS
)
133 panic("failed to allocate iokit pageable map\n");
135 IOLockGroup
= lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL
);
137 gIOKitPageableSpace
.lock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
138 gIOKitPageableSpace
.maps
[0].end
= gIOKitPageableSpace
.maps
[0].address
+ kIOPageableMapSize
;
139 gIOKitPageableSpace
.hint
= 0;
140 gIOKitPageableSpace
.count
= 1;
142 gIOMallocContiguousEntriesLock
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
143 queue_init( &gIOMallocContiguousEntries
);
145 libInitialized
= true;
148 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
150 IOThread
IOCreateThread(IOThreadFunc fcn
, void *arg
)
152 kern_return_t result
;
155 result
= kernel_thread_start((thread_continue_t
)fcn
, arg
, &thread
);
156 if (result
!= KERN_SUCCESS
)
159 thread_deallocate(thread
);
165 void IOExitThread(void)
167 (void) thread_terminate(current_thread());
170 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
173 void * IOMalloc(vm_size_t size
)
177 address
= (void *)kalloc(size
);
180 debug_iomalloc_size
+= size
;
186 void IOFree(void * address
, vm_size_t size
)
189 kfree(address
, size
);
191 debug_iomalloc_size
-= size
;
196 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
198 void * IOMallocAligned(vm_size_t size
, vm_size_t alignment
)
201 vm_address_t address
;
202 vm_address_t allocationAddress
;
203 vm_size_t adjustedSize
;
204 vm_offset_t alignMask
;
211 alignMask
= alignment
- 1;
212 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
214 if (adjustedSize
>= page_size
) {
216 kr
= kernel_memory_allocate(kernel_map
, &address
,
218 if (KERN_SUCCESS
!= kr
)
223 adjustedSize
+= alignMask
;
225 if (adjustedSize
>= page_size
) {
227 kr
= kernel_memory_allocate(kernel_map
, &allocationAddress
,
229 if (KERN_SUCCESS
!= kr
)
230 allocationAddress
= 0;
233 allocationAddress
= (vm_address_t
) kalloc(adjustedSize
);
235 if (allocationAddress
) {
236 address
= (allocationAddress
+ alignMask
237 + (sizeof(vm_size_t
) + sizeof(vm_address_t
)))
240 *((vm_size_t
*)(address
- sizeof(vm_size_t
)
241 - sizeof(vm_address_t
))) = adjustedSize
;
242 *((vm_address_t
*)(address
- sizeof(vm_address_t
)))
248 assert(0 == (address
& alignMask
));
252 debug_iomalloc_size
+= size
;
256 return (void *) address
;
259 void IOFreeAligned(void * address
, vm_size_t size
)
261 vm_address_t allocationAddress
;
262 vm_size_t adjustedSize
;
269 adjustedSize
= size
+ sizeof(vm_size_t
) + sizeof(vm_address_t
);
270 if (adjustedSize
>= page_size
) {
272 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
275 adjustedSize
= *((vm_size_t
*)( (vm_address_t
) address
276 - sizeof(vm_address_t
) - sizeof(vm_size_t
)));
277 allocationAddress
= *((vm_address_t
*)( (vm_address_t
) address
278 - sizeof(vm_address_t
) ));
280 if (adjustedSize
>= page_size
)
281 kmem_free( kernel_map
, allocationAddress
, adjustedSize
);
283 kfree((void *)allocationAddress
, adjustedSize
);
287 debug_iomalloc_size
-= size
;
291 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
294 IOKernelFreeContiguous(mach_vm_address_t address
, mach_vm_size_t size
)
296 mach_vm_address_t allocationAddress
;
297 mach_vm_size_t adjustedSize
;
304 adjustedSize
= (2 * size
) + sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
);
305 if (adjustedSize
>= page_size
) {
307 kmem_free( kernel_map
, (vm_address_t
) address
, size
);
311 adjustedSize
= *((mach_vm_size_t
*)
312 (address
- sizeof(mach_vm_address_t
) - sizeof(mach_vm_size_t
)));
313 allocationAddress
= *((mach_vm_address_t
*)
314 (address
- sizeof(mach_vm_address_t
) ));
315 kfree((void *)allocationAddress
, adjustedSize
);
319 debug_iomalloc_size
-= size
;
324 IOKernelAllocateContiguous(mach_vm_size_t size
, mach_vm_size_t alignment
)
327 mach_vm_address_t address
;
328 mach_vm_address_t allocationAddress
;
329 mach_vm_size_t adjustedSize
;
330 mach_vm_address_t alignMask
;
337 alignMask
= alignment
- 1;
338 adjustedSize
= (2 * size
) + sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
);
340 if (adjustedSize
>= page_size
)
344 if (adjustedSize
> page_size
)
346 kr
= kmem_alloc_contig(kernel_map
, &virt
, size
,
351 kr
= kernel_memory_allocate(kernel_map
, &virt
,
354 if (KERN_SUCCESS
== kr
)
361 adjustedSize
+= alignMask
;
362 allocationAddress
= (mach_vm_address_t
) kalloc(adjustedSize
);
364 if (allocationAddress
) {
366 address
= (allocationAddress
+ alignMask
367 + (sizeof(mach_vm_size_t
) + sizeof(mach_vm_address_t
)))
370 if (atop_32(address
) != atop_32(address
+ size
- 1))
371 address
= round_page_32(address
);
373 *((mach_vm_size_t
*)(address
- sizeof(mach_vm_size_t
)
374 - sizeof(mach_vm_address_t
))) = adjustedSize
;
375 *((mach_vm_address_t
*)(address
- sizeof(mach_vm_address_t
)))
383 debug_iomalloc_size
+= size
;
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
392 struct _IOMallocContiguousEntry
394 mach_vm_address_t virtualAddr
;
395 IOBufferMemoryDescriptor
* md
;
398 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
;
400 void * IOMallocContiguous(vm_size_t size
, vm_size_t alignment
,
401 IOPhysicalAddress
* physicalAddress
)
403 mach_vm_address_t address
= 0;
410 /* Do we want a physical address? */
411 if (!physicalAddress
)
413 address
= IOKernelAllocateContiguous(size
, alignment
);
417 IOBufferMemoryDescriptor
* bmd
;
418 mach_vm_address_t physicalMask
;
419 vm_offset_t alignMask
;
421 alignMask
= alignment
- 1;
422 physicalMask
= 0xFFFFFFFF ^ (alignMask
& PAGE_MASK
);
423 bmd
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
424 kernel_task
, kIOMemoryPhysicallyContiguous
, size
, physicalMask
);
428 _IOMallocContiguousEntry
*
429 entry
= IONew(_IOMallocContiguousEntry
, 1);
435 entry
->virtualAddr
= (mach_vm_address_t
) bmd
->getBytesNoCopy();
437 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
438 queue_enter( &gIOMallocContiguousEntries
, entry
,
439 _IOMallocContiguousEntry
*, link
);
440 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
442 address
= (mach_vm_address_t
) entry
->virtualAddr
;
443 *physicalAddress
= bmd
->getPhysicalAddress();
447 return (void *) address
;
450 void IOFreeContiguous(void * _address
, vm_size_t size
)
452 _IOMallocContiguousEntry
* entry
;
453 IOMemoryDescriptor
* md
= NULL
;
455 mach_vm_address_t address
= (mach_vm_address_t
) _address
;
462 lck_mtx_lock(gIOMallocContiguousEntriesLock
);
463 queue_iterate( &gIOMallocContiguousEntries
, entry
,
464 _IOMallocContiguousEntry
*, link
)
466 if( entry
->virtualAddr
== address
) {
468 queue_remove( &gIOMallocContiguousEntries
, entry
,
469 _IOMallocContiguousEntry
*, link
);
473 lck_mtx_unlock(gIOMallocContiguousEntriesLock
);
478 IODelete(entry
, _IOMallocContiguousEntry
, 1);
482 IOKernelFreeContiguous((mach_vm_address_t
) address
, size
);
486 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
488 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
489 IOIteratePageableMapsCallback callback
, void * ref
)
491 kern_return_t kr
= kIOReturnNotReady
;
498 if (size
> kIOPageableMaxMapSize
)
499 return( kIOReturnBadArgument
);
502 index
= gIOKitPageableSpace
.hint
;
503 attempts
= gIOKitPageableSpace
.count
;
505 kr
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
);
506 if( KERN_SUCCESS
== kr
) {
507 gIOKitPageableSpace
.hint
= index
;
513 index
= gIOKitPageableSpace
.count
- 1;
515 if( KERN_SUCCESS
== kr
)
518 lck_mtx_lock( gIOKitPageableSpace
.lock
);
520 index
= gIOKitPageableSpace
.count
;
521 if( index
>= (kIOMaxPageableMaps
- 1)) {
522 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
526 if( size
< kIOPageableMapSize
)
527 segSize
= kIOPageableMapSize
;
532 kr
= kmem_suballoc(kernel_map
,
538 if( KERN_SUCCESS
!= kr
) {
539 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
543 gIOKitPageableSpace
.maps
[index
].map
= map
;
544 gIOKitPageableSpace
.maps
[index
].address
= min
;
545 gIOKitPageableSpace
.maps
[index
].end
= min
+ segSize
;
546 gIOKitPageableSpace
.hint
= index
;
547 gIOKitPageableSpace
.count
= index
+ 1;
549 lck_mtx_unlock( gIOKitPageableSpace
.lock
);
556 struct IOMallocPageableRef
558 vm_address_t address
;
562 static kern_return_t
IOMallocPageableCallback(vm_map_t map
, void * _ref
)
564 struct IOMallocPageableRef
* ref
= (struct IOMallocPageableRef
*) _ref
;
567 kr
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
);
572 void * IOMallocPageable(vm_size_t size
, vm_size_t alignment
)
574 kern_return_t kr
= kIOReturnNotReady
;
575 struct IOMallocPageableRef ref
;
577 if (alignment
> page_size
)
579 if (size
> kIOPageableMaxMapSize
)
583 kr
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref
);
584 if( kIOReturnSuccess
!= kr
)
589 debug_iomallocpageable_size
+= round_page_32(size
);
592 return( (void *) ref
.address
);
595 vm_map_t
IOPageableMapForAddress( vm_address_t address
)
600 for( index
= 0; index
< gIOKitPageableSpace
.count
; index
++) {
601 if( (address
>= gIOKitPageableSpace
.maps
[index
].address
)
602 && (address
< gIOKitPageableSpace
.maps
[index
].end
) ) {
603 map
= gIOKitPageableSpace
.maps
[index
].map
;
608 IOPanic("IOPageableMapForAddress: null");
613 void IOFreePageable(void * address
, vm_size_t size
)
617 map
= IOPageableMapForAddress( (vm_address_t
) address
);
619 kmem_free( map
, (vm_offset_t
) address
, size
);
622 debug_iomallocpageable_size
-= round_page_32(size
);
626 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
628 IOReturn
IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
,
629 IOByteCount length
, IOOptionBits cacheMode
)
631 IOReturn ret
= kIOReturnSuccess
;
634 if( task
!= kernel_task
)
635 return( kIOReturnUnsupported
);
637 length
= round_page_32(address
+ length
) - trunc_page_32( address
);
638 address
= trunc_page_32( address
);
641 cacheMode
= (cacheMode
<< kIOMapCacheShift
) & kIOMapCacheMask
;
643 while( (kIOReturnSuccess
== ret
) && (length
> 0) ) {
645 // Get the physical page number
646 pagenum
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
);
648 ret
= IOUnmapPages( get_task_map(task
), address
, page_size
);
649 ret
= IOMapPages( get_task_map(task
), address
, ptoa_64(pagenum
), page_size
, cacheMode
);
651 ret
= kIOReturnVMError
;
653 address
+= page_size
;
661 IOReturn
IOFlushProcessorCache( task_t task
, IOVirtualAddress address
,
664 if( task
!= kernel_task
)
665 return( kIOReturnUnsupported
);
667 flush_dcache64( (addr64_t
) address
, (unsigned) length
, false );
669 return( kIOReturnSuccess
);
672 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
674 SInt32
OSKernelStackRemaining( void )
678 stack
= (((SInt32
) &stack
) & (KERNEL_STACK_SIZE
- 1));
683 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
686 * Spin for indicated number of milliseconds.
688 void IOSleep(unsigned milliseconds
)
690 delay_for_interval(milliseconds
, kMillisecondScale
);
694 * Spin for indicated number of microseconds.
696 void IODelay(unsigned microseconds
)
698 delay_for_interval(microseconds
, kMicrosecondScale
);
702 * Spin for indicated number of nanoseconds.
704 void IOPause(unsigned nanoseconds
)
706 delay_for_interval(nanoseconds
, kNanosecondScale
);
709 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
711 void IOLog(const char *format
, ...)
714 extern void conslog_putc(char);
715 extern void logwakeup(void);
717 va_start(ap
, format
);
718 _doprnt(format
, &ap
, conslog_putc
, 16);
722 void IOPanic(const char *reason
)
727 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
730 * Convert a integer constant (typically a #define or enum) to a string.
732 static char noValue
[80]; // that's pretty
734 const char *IOFindNameForValue(int value
, const IONamedValue
*regValueArray
)
736 for( ; regValueArray
->name
; regValueArray
++) {
737 if(regValueArray
->value
== value
)
738 return(regValueArray
->name
);
740 snprintf(noValue
, sizeof(noValue
), "0x%x (UNDEFINED)", value
);
741 return((const char *)noValue
);
744 IOReturn
IOFindValueForName(const char *string
,
745 const IONamedValue
*regValueArray
,
748 for( ; regValueArray
->name
; regValueArray
++) {
749 if(!strcmp(regValueArray
->name
, string
)) {
750 *value
= regValueArray
->value
;
751 return kIOReturnSuccess
;
754 return kIOReturnBadArgument
;
757 OSString
* IOCopyLogNameForPID(int pid
)
761 snprintf(buf
, sizeof(buf
), "pid %d, ", pid
);
763 proc_name(pid
, buf
+ len
, sizeof(buf
) - len
);
764 return (OSString::withCString(buf
));
767 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
769 IOAlignment
IOSizeToAlignment(unsigned int size
)
772 const int intsize
= sizeof(unsigned int) * 8;
774 for (shift
= 1; shift
< intsize
; shift
++) {
775 if (size
& 0x80000000)
776 return (IOAlignment
)(intsize
- shift
);
782 unsigned int IOAlignmentToSize(IOAlignment align
)
786 for (size
= 1; align
; align
--) {