]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLib.cpp
5ed1c4708e765c57b89962a82ec93f47d58682e7
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
1 /*
2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * HISTORY
30 *
31 * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT.
32 * 17-Nov-98 cpp
33 *
34 */
35
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <libkern/c++/OSCPPDebug.h>
40
41 #include <IOKit/assert.h>
42
43 #include <IOKit/IOReturn.h>
44 #include <IOKit/IOLib.h>
45 #include <IOKit/IOLocks.h>
46 #include <IOKit/IOMapper.h>
47 #include <IOKit/IOBufferMemoryDescriptor.h>
48 #include <IOKit/IOKitDebug.h>
49
50 #include "IOKitKernelInternal.h"
51
52 extern "C"
53 {
54
55
56 mach_timespec_t IOZeroTvalspec = { 0, 0 };
57
58 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
59
60 extern kern_return_t kmem_suballoc(
61 vm_map_t parent,
62 vm_offset_t *addr,
63 vm_size_t size,
64 boolean_t pageable,
65 boolean_t anywhere,
66 vm_map_t *new_map);
67
68 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
69
70 lck_grp_t *IOLockGroup;
71
72 /*
73 * Global variables for use by iLogger
74 * These symbols are for use only by Apple diagnostic code.
75 * Binary compatibility is not guaranteed for kexts that reference these symbols.
76 */
77
78 void *_giDebugLogInternal = NULL;
79 void *_giDebugLogDataInternal = NULL;
80 void *_giDebugReserved1 = NULL;
81 void *_giDebugReserved2 = NULL;
82
83
84 /*
85 * Static variables for this module.
86 */
87
88 static queue_head_t gIOMallocContiguousEntries;
89 static lck_mtx_t * gIOMallocContiguousEntriesLock;
90
91 enum { kIOMaxPageableMaps = 16 };
92 enum { kIOPageableMapSize = 96 * 1024 * 1024 };
93 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
94
95 /* LP64todo - these need to expand */
96 typedef struct {
97 vm_map_t map;
98 vm_offset_t address;
99 vm_offset_t end;
100 } IOMapData;
101
102 static struct {
103 UInt32 count;
104 UInt32 hint;
105 IOMapData maps[ kIOMaxPageableMaps ];
106 lck_mtx_t * lock;
107 } gIOKitPageableSpace;
108
109 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
110
111 void IOLibInit(void)
112 {
113 kern_return_t ret;
114
115 static bool libInitialized;
116
117 if(libInitialized)
118 return;
119
120 gIOKitPageableSpace.maps[0].address = 0;
121 ret = kmem_suballoc(kernel_map,
122 &gIOKitPageableSpace.maps[0].address,
123 kIOPageableMapSize,
124 TRUE,
125 VM_FLAGS_ANYWHERE,
126 &gIOKitPageableSpace.maps[0].map);
127 if (ret != KERN_SUCCESS)
128 panic("failed to allocate iokit pageable map\n");
129
130 IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
131
132 gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
133 gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
134 gIOKitPageableSpace.hint = 0;
135 gIOKitPageableSpace.count = 1;
136
137 gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
138 queue_init( &gIOMallocContiguousEntries );
139
140 libInitialized = true;
141 }
142
143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
144
145 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
146 {
147 kern_return_t result;
148 thread_t thread;
149
150 result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
151 if (result != KERN_SUCCESS)
152 return (NULL);
153
154 thread_deallocate(thread);
155
156 return (thread);
157 }
158
159
160 void IOExitThread(void)
161 {
162 (void) thread_terminate(current_thread());
163 }
164
165 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
166
167
168 void * IOMalloc(vm_size_t size)
169 {
170 void * address;
171
172 address = (void *)kalloc(size);
173 #if IOALLOCDEBUG
174 if (address)
175 debug_iomalloc_size += size;
176 #endif
177 return address;
178 }
179
180 void IOFree(void * address, vm_size_t size)
181 {
182 if (address) {
183 kfree(address, size);
184 #if IOALLOCDEBUG
185 debug_iomalloc_size -= size;
186 #endif
187 }
188 }
189
190 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
191
192 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
193 {
194 kern_return_t kr;
195 vm_address_t address;
196 vm_address_t allocationAddress;
197 vm_size_t adjustedSize;
198 vm_offset_t alignMask;
199
200 if (size == 0)
201 return 0;
202 if (alignment == 0)
203 alignment = 1;
204
205 alignMask = alignment - 1;
206 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
207
208 if (adjustedSize >= page_size) {
209
210 kr = kernel_memory_allocate(kernel_map, &address,
211 size, alignMask, 0);
212 if (KERN_SUCCESS != kr)
213 address = 0;
214
215 } else {
216
217 adjustedSize += alignMask;
218
219 if (adjustedSize >= page_size) {
220
221 kr = kernel_memory_allocate(kernel_map, &allocationAddress,
222 adjustedSize, 0, 0);
223 if (KERN_SUCCESS != kr)
224 allocationAddress = 0;
225
226 } else
227 allocationAddress = (vm_address_t) kalloc(adjustedSize);
228
229 if (allocationAddress) {
230 address = (allocationAddress + alignMask
231 + (sizeof(vm_size_t) + sizeof(vm_address_t)))
232 & (~alignMask);
233
234 *((vm_size_t *)(address - sizeof(vm_size_t)
235 - sizeof(vm_address_t))) = adjustedSize;
236 *((vm_address_t *)(address - sizeof(vm_address_t)))
237 = allocationAddress;
238 } else
239 address = 0;
240 }
241
242 assert(0 == (address & alignMask));
243
244 #if IOALLOCDEBUG
245 if( address)
246 debug_iomalloc_size += size;
247 #endif
248
249 return (void *) address;
250 }
251
252 void IOFreeAligned(void * address, vm_size_t size)
253 {
254 vm_address_t allocationAddress;
255 vm_size_t adjustedSize;
256
257 if( !address)
258 return;
259
260 assert(size);
261
262 adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
263 if (adjustedSize >= page_size) {
264
265 kmem_free( kernel_map, (vm_address_t) address, size);
266
267 } else {
268 adjustedSize = *((vm_size_t *)( (vm_address_t) address
269 - sizeof(vm_address_t) - sizeof(vm_size_t)));
270 allocationAddress = *((vm_address_t *)( (vm_address_t) address
271 - sizeof(vm_address_t) ));
272
273 if (adjustedSize >= page_size)
274 kmem_free( kernel_map, allocationAddress, adjustedSize);
275 else
276 kfree((void *)allocationAddress, adjustedSize);
277 }
278
279 #if IOALLOCDEBUG
280 debug_iomalloc_size -= size;
281 #endif
282 }
283
284 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
285
286 void
287 IOKernelFreeContiguous(mach_vm_address_t address, mach_vm_size_t size)
288 {
289 mach_vm_address_t allocationAddress;
290 mach_vm_size_t adjustedSize;
291
292 if (!address)
293 return;
294
295 assert(size);
296
297 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
298 if (adjustedSize >= page_size) {
299
300 kmem_free( kernel_map, (vm_address_t) address, size);
301
302 } else {
303
304 adjustedSize = *((mach_vm_size_t *)
305 (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
306 allocationAddress = *((mach_vm_address_t *)
307 (address - sizeof(mach_vm_address_t) ));
308 kfree((void *)allocationAddress, adjustedSize);
309 }
310
311 #if IOALLOCDEBUG
312 debug_iomalloc_size -= size;
313 #endif
314 }
315
316 mach_vm_address_t
317 IOKernelAllocateContiguous(mach_vm_size_t size, mach_vm_size_t alignment)
318 {
319 kern_return_t kr;
320 mach_vm_address_t address;
321 mach_vm_address_t allocationAddress;
322 mach_vm_size_t adjustedSize;
323 mach_vm_address_t alignMask;
324
325 if (size == 0)
326 return (0);
327 if (alignment == 0)
328 alignment = 1;
329
330 alignMask = alignment - 1;
331 adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
332
333 if (adjustedSize >= page_size)
334 {
335 vm_offset_t virt;
336 adjustedSize = size;
337 if (adjustedSize > page_size)
338 {
339 kr = kmem_alloc_contig(kernel_map, &virt, size,
340 alignMask, 0);
341 }
342 else
343 {
344 kr = kernel_memory_allocate(kernel_map, &virt,
345 size, alignMask, 0);
346 }
347 if (KERN_SUCCESS == kr)
348 address = virt;
349 else
350 address = 0;
351 }
352 else
353 {
354 adjustedSize += alignMask;
355 allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
356
357 if (allocationAddress) {
358
359 address = (allocationAddress + alignMask
360 + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
361 & (~alignMask);
362
363 if (atop_32(address) != atop_32(address + size - 1))
364 address = round_page_32(address);
365
366 *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
367 - sizeof(mach_vm_address_t))) = adjustedSize;
368 *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
369 = allocationAddress;
370 } else
371 address = 0;
372 }
373
374 #if IOALLOCDEBUG
375 if (address)
376 debug_iomalloc_size += size;
377 #endif
378
379 return (address);
380 }
381
382 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
383
384 struct _IOMallocContiguousEntry
385 {
386 mach_vm_address_t virtualAddr;
387 IOBufferMemoryDescriptor * md;
388 queue_chain_t link;
389 };
390 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
391
392 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
393 IOPhysicalAddress * physicalAddress)
394 {
395 mach_vm_address_t address = 0;
396
397 if (size == 0)
398 return 0;
399 if (alignment == 0)
400 alignment = 1;
401
402 /* Do we want a physical address? */
403 if (!physicalAddress)
404 {
405 address = IOKernelAllocateContiguous(size, alignment);
406 }
407 else do
408 {
409 IOBufferMemoryDescriptor * bmd;
410 mach_vm_address_t physicalMask;
411 vm_offset_t alignMask;
412
413 alignMask = alignment - 1;
414 physicalMask = 0xFFFFFFFF ^ (alignMask & PAGE_MASK);
415 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
416 kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
417 if (!bmd)
418 break;
419
420 _IOMallocContiguousEntry *
421 entry = IONew(_IOMallocContiguousEntry, 1);
422 if (!entry)
423 {
424 bmd->release();
425 break;
426 }
427 entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
428 entry->md = bmd;
429 lck_mtx_lock(gIOMallocContiguousEntriesLock);
430 queue_enter( &gIOMallocContiguousEntries, entry,
431 _IOMallocContiguousEntry *, link );
432 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
433
434 address = (mach_vm_address_t) entry->virtualAddr;
435 *physicalAddress = bmd->getPhysicalAddress();
436 }
437 while (false);
438
439 return (void *) address;
440 }
441
442 void IOFreeContiguous(void * _address, vm_size_t size)
443 {
444 _IOMallocContiguousEntry * entry;
445 IOMemoryDescriptor * md = NULL;
446
447 mach_vm_address_t address = (mach_vm_address_t) _address;
448
449 if( !address)
450 return;
451
452 assert(size);
453
454 lck_mtx_lock(gIOMallocContiguousEntriesLock);
455 queue_iterate( &gIOMallocContiguousEntries, entry,
456 _IOMallocContiguousEntry *, link )
457 {
458 if( entry->virtualAddr == address ) {
459 md = entry->md;
460 queue_remove( &gIOMallocContiguousEntries, entry,
461 _IOMallocContiguousEntry *, link );
462 break;
463 }
464 }
465 lck_mtx_unlock(gIOMallocContiguousEntriesLock);
466
467 if (md)
468 {
469 md->release();
470 IODelete(entry, _IOMallocContiguousEntry, 1);
471 }
472 else
473 {
474 IOKernelFreeContiguous((mach_vm_address_t) address, size);
475 }
476 }
477
478 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
479
480 kern_return_t IOIteratePageableMaps(vm_size_t size,
481 IOIteratePageableMapsCallback callback, void * ref)
482 {
483 kern_return_t kr = kIOReturnNotReady;
484 vm_size_t segSize;
485 UInt32 attempts;
486 UInt32 index;
487 vm_offset_t min;
488 vm_map_t map;
489
490 if (size > kIOPageableMaxMapSize)
491 return( kIOReturnBadArgument );
492
493 do {
494 index = gIOKitPageableSpace.hint;
495 attempts = gIOKitPageableSpace.count;
496 while( attempts--) {
497 kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
498 if( KERN_SUCCESS == kr) {
499 gIOKitPageableSpace.hint = index;
500 break;
501 }
502 if( index)
503 index--;
504 else
505 index = gIOKitPageableSpace.count - 1;
506 }
507 if( KERN_SUCCESS == kr)
508 break;
509
510 lck_mtx_lock( gIOKitPageableSpace.lock );
511
512 index = gIOKitPageableSpace.count;
513 if( index >= (kIOMaxPageableMaps - 1)) {
514 lck_mtx_unlock( gIOKitPageableSpace.lock );
515 break;
516 }
517
518 if( size < kIOPageableMapSize)
519 segSize = kIOPageableMapSize;
520 else
521 segSize = size;
522
523 min = 0;
524 kr = kmem_suballoc(kernel_map,
525 &min,
526 segSize,
527 TRUE,
528 VM_FLAGS_ANYWHERE,
529 &map);
530 if( KERN_SUCCESS != kr) {
531 lck_mtx_unlock( gIOKitPageableSpace.lock );
532 break;
533 }
534
535 gIOKitPageableSpace.maps[index].map = map;
536 gIOKitPageableSpace.maps[index].address = min;
537 gIOKitPageableSpace.maps[index].end = min + segSize;
538 gIOKitPageableSpace.hint = index;
539 gIOKitPageableSpace.count = index + 1;
540
541 lck_mtx_unlock( gIOKitPageableSpace.lock );
542
543 } while( true );
544
545 return kr;
546 }
547
548 struct IOMallocPageableRef
549 {
550 vm_address_t address;
551 vm_size_t size;
552 };
553
554 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
555 {
556 struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
557 kern_return_t kr;
558
559 kr = kmem_alloc_pageable( map, &ref->address, ref->size );
560
561 return( kr );
562 }
563
564 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
565 {
566 kern_return_t kr = kIOReturnNotReady;
567 struct IOMallocPageableRef ref;
568
569 if (alignment > page_size)
570 return( 0 );
571 if (size > kIOPageableMaxMapSize)
572 return( 0 );
573
574 ref.size = size;
575 kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
576 if( kIOReturnSuccess != kr)
577 ref.address = 0;
578
579 #if IOALLOCDEBUG
580 if( ref.address)
581 debug_iomallocpageable_size += round_page_32(size);
582 #endif
583
584 return( (void *) ref.address );
585 }
586
587 vm_map_t IOPageableMapForAddress( vm_address_t address )
588 {
589 vm_map_t map = 0;
590 UInt32 index;
591
592 for( index = 0; index < gIOKitPageableSpace.count; index++) {
593 if( (address >= gIOKitPageableSpace.maps[index].address)
594 && (address < gIOKitPageableSpace.maps[index].end) ) {
595 map = gIOKitPageableSpace.maps[index].map;
596 break;
597 }
598 }
599 if( !map)
600 IOPanic("IOPageableMapForAddress: null");
601
602 return( map );
603 }
604
605 void IOFreePageable(void * address, vm_size_t size)
606 {
607 vm_map_t map;
608
609 map = IOPageableMapForAddress( (vm_address_t) address);
610 if( map)
611 kmem_free( map, (vm_offset_t) address, size);
612
613 #if IOALLOCDEBUG
614 debug_iomallocpageable_size -= round_page_32(size);
615 #endif
616 }
617
618 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
619
620 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
621 IOByteCount length, IOOptionBits cacheMode )
622 {
623 IOReturn ret = kIOReturnSuccess;
624 ppnum_t pagenum;
625
626 if( task != kernel_task)
627 return( kIOReturnUnsupported );
628
629 length = round_page_32(address + length) - trunc_page_32( address );
630 address = trunc_page_32( address );
631
632 // make map mode
633 cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
634
635 while( (kIOReturnSuccess == ret) && (length > 0) ) {
636
637 // Get the physical page number
638 pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
639 if( pagenum) {
640 ret = IOUnmapPages( get_task_map(task), address, page_size );
641 ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
642 } else
643 ret = kIOReturnVMError;
644
645 address += page_size;
646 length -= page_size;
647 }
648
649 return( ret );
650 }
651
652
653 IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
654 IOByteCount length )
655 {
656 if( task != kernel_task)
657 return( kIOReturnUnsupported );
658
659 #if __ppc__
660 flush_dcache64( (addr64_t) address, (unsigned) length, false );
661 #endif
662
663 return( kIOReturnSuccess );
664 }
665
666 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
667
668 SInt32 OSKernelStackRemaining( void )
669 {
670 SInt32 stack;
671
672 stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
673
674 return( stack );
675 }
676
677 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
678
679 void IOSleep(unsigned milliseconds)
680 {
681 delay_for_interval(milliseconds, kMillisecondScale);
682 }
683
684 /*
685 * Spin for indicated number of microseconds.
686 */
687 void IODelay(unsigned microseconds)
688 {
689 delay_for_interval(microseconds, kMicrosecondScale);
690 }
691
692 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
693
694 void IOLog(const char *format, ...)
695 {
696 va_list ap;
697 extern void conslog_putc(char);
698 extern void logwakeup(void);
699
700 va_start(ap, format);
701 _doprnt(format, &ap, conslog_putc, 16);
702 va_end(ap);
703 }
704
705 void IOPanic(const char *reason)
706 {
707 panic(reason);
708 }
709
710 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
711
712 /*
713 * Convert a integer constant (typically a #define or enum) to a string.
714 */
715 static char noValue[80]; // that's pretty
716
717 const char *IOFindNameForValue(int value, const IONamedValue *regValueArray)
718 {
719 for( ; regValueArray->name; regValueArray++) {
720 if(regValueArray->value == value)
721 return(regValueArray->name);
722 }
723 sprintf(noValue, "0x%x (UNDEFINED)", value);
724 return((const char *)noValue);
725 }
726
727 IOReturn IOFindValueForName(const char *string,
728 const IONamedValue *regValueArray,
729 int *value)
730 {
731 for( ; regValueArray->name; regValueArray++) {
732 if(!strcmp(regValueArray->name, string)) {
733 *value = regValueArray->value;
734 return kIOReturnSuccess;
735 }
736 }
737 return kIOReturnBadArgument;
738 }
739
740 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
741
742 IOAlignment IOSizeToAlignment(unsigned int size)
743 {
744 register int shift;
745 const int intsize = sizeof(unsigned int) * 8;
746
747 for (shift = 1; shift < intsize; shift++) {
748 if (size & 0x80000000)
749 return (IOAlignment)(intsize - shift);
750 size <<= 1;
751 }
752 return 0;
753 }
754
755 unsigned int IOAlignmentToSize(IOAlignment align)
756 {
757 unsigned int size;
758
759 for (size = 1; align; align--) {
760 size <<= 1;
761 }
762 return size;
763 }
764
765 } /* extern "C" */